I have been at this all day, how to we remove the small back noise in the red circle? I would need it to work on other samples of pictures like this.
The idea I used is to findContours and then add a mask with all the small black noise that is less than a certain area (trial and error).
Removing noise in red ellipse
image = cv2.imread("11_Image_after_noise_removal.png")
# copy image
img = image.copy()
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0, cv2.THRESH_BINARY)
thresh = 255 - thresh
# Use cv2.CCOMP for two level hierarchy
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE) # Use cv2.CCOMP for two level hierarchy
cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
cv2.imshow("First detection", img)
# loop through the contours
for i, cnt in enumerate(contours):
# if the contour has no other contours inside of it
if hierarchy[0][i][3] != -1: # basically look for holes
# if the size of the contour is less than a threshold (noise)
if cv2.contourArea(cnt) < 70:
# Fill the holes in the original image
cv2.drawContours(img, [cnt], 0, (0, 0, 0), -1)
# display result
# Visualize the image after the Otsu's method application
cv2.imshow("Image after noise removal", img)
cv2.waitKey(0)
cv2.destroyAllWindows().destroyAllWindows()
You might check the contour area using area = cv.contourArea(cnt) and if it is below some threshold, ignore it.
Here is the OpenCV documentations:
https://docs.opencv.org/4.3.0/dd/d49/tutorial_py_contour_features.html
Related
I created a convex hull of a hand from an X-ray. I did this by first creating contours along the edges of the hand. I am wanting to blacken the outside region of convex hull using OpenCV in Python. How do I approach this?
The code below creates the convex hull after doing contours:
img_path = 'sample_image.png'
# Getting the threshold of the image:
image = cv2.imread(img_path)
original = image.copy()
blank = np.zeros(image.shape[:2], dtype = np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 140, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Drawing the contours along the edges of the hand in X-ray:
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
contours = max(contours, key = lambda x: cv2.contourArea(x))
cv2.drawContours(image, [contours], -1, (255,255,0), 2)
# Drawing the hull along the digits along the edges:
hull = cv2.convexHull(contours)
cv2.drawContours(image, [hull], -1, (0, 255, 255), 2)
Input image: Sample_image.png
Result image: Output from the code
Update
Answer given by #AKX below proposed to draw the contour with filled polygon onto a new white blank and then use bitwise_and. The result code:
# Merge text into a single contour
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations = 2)
contours, hierarchy = cv2.findContours(dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = max(contours, key = lambda x: cv2.contourArea(x))
cv2.drawContours(image, [contours], -1, (255,255,0), 2)
plt.imshow(image)
# Created a new mask and used bitwise_and to select for contours:
hull = cv2.convexHull(contours)
mask = np.zeros_like(image)
cv2.drawContours(mask, [hull], -1, (255, 255, 255), -1)
masked_image = cv2.bitwise_and(image, mask)
plt.imshow(masked_image)
Masking
Instead of drawing the hull contour as a line, draw it as a filled polygon onto a blank image with white, then bitwise_and it onto the original so everything outside the polygon is blanked out.
mask = np.zeros_like(image)
cv2.drawContours(mask, [hull], -1, (255, 255, 255), -1)
masked_image = cv2.bitwise_and(image, mask)
cv2.imwrite(r'dog_masked.jpg', masked_image)
input image
(no idea why I called it dog.jpg, it doesn't look like a dog)
result
(the line below the dog remains since it's inside the hull created by the dog's feet)
Cropping
To crop the image to only include the hull's area, use cv2.boundingRect and slice the image array:
x, y, w, h = cv2.boundingRect(hull)
cropped = image[y:y + h, x:x + w]
cv2.imwrite(r'dog_crop.jpg', cropped)
Result
Iam trying to remove objects from the image using the following method, however, as can be seen from outcome image there is a residual fine colored line around each object. Although i have dilated my image, the lines are still present.
Is there a way to remove those lines?
import cv2
import numpy as np
img = cv2.imread('TRY.jpg')
image_edges = cv2.GaussianBlur(img,(3,3),1) #Helps in defining the edges
image_edges=cv2.Canny(image_edges,100,200)
image_edges=cv2.dilate(image_edges,(3,3),iterations=3)
contours_draw, hierarchy = cv2.findContours(image_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(img.shape, np.uint8)
mask.fill(255)
for c in contours_draw:
cv2.drawContours(mask, [c], -1, (0, 0, 0), -1)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
res = img.copy()
res[mask == 0] = 255
cv2.imshow('img', res)
cv2.waitKey(0)
Original Image
--
Outcome Image
The outlines are still there because the threshold is too high.
Try this:
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 20, 255, 0)
contours_draw, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
The values you pass to the threshold function determine how inclusive or exclusive the findContours function is going to be. Put low numbers if you want a low threshold, and high numbers if you want a high one.
I am trying to detect the black tape on the floor, but whenever I try to increase the threshold to stop it from detecting the ground all it does is stop detecting the tape.
from cv2 import cv2
import numpy as np
img = cv2.imread('tape4.jpeg', cv2.IMREAD_UNCHANGED)
#convert img to grey
img_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#set a thresh
thresh = 100
#get threshold image
ret,thresh_img = cv2.threshold(img_grey, thresh, 255, cv2.THRESH_BINARY)
#find contours
contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#create an empty image for contours
img_contours = np.zeros(img.shape)
# draw the contours on the empty image
cv2.drawContours(img_contours, contours, -1, (0,255,0), 3)
#save image
cv2.imwrite('contours.png',img_contours)
You may use Use cv2.THRESH_BINARY_INV instead of cv2.THRESH_BINARY for finding pixels below thresh instead of pixels above thresh.
The reason you are detecting the floor, is that cv2.threshold marks (with 255 value), the pixels above thresh.
You want to mark the dark pixels below thresh.
You can compute: thresh_img = 255 - thresh_img, or use cv2.THRESH_BINARY_INV (result is the same).
I also recommend using closing morphological operation, for removing clutter.
Code for finding the tape:
import cv2
import numpy as np
img = cv2.imread('tape4.jpeg', cv2.IMREAD_UNCHANGED)
#convert img to grey
img_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#set a thresh
thresh = 20 # Set thresh to very low value (the tape is almost black).
#get threshold image
# Use cv2.THRESH_BINARY_INV instead of cv2.THRESH_BINARY for finding pixels below thresh instead of pixels above thresh
ret,thresh_img = cv2.threshold(img_grey, thresh, 255, cv2.THRESH_BINARY_INV)
# Apply closing morphological operation
thresh_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (21,21)));
#find contours
contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#create an empty image for contours
img_contours = np.zeros(img.shape)
# draw the contours on the empty image
cv2.drawContours(img_contours, contours, -1, (0,255,0), 3)
#save image
cv2.imwrite('contours.png',img_contours)
Result:
For finding the tape, you can look for the largest contour within contours:
# Get contour with maximum area
c = max(contours, key=cv2.contourArea)
img_contours = np.zeros(img.shape)
cv2.drawContours(img_contours, [c], -1, (0,255,0), 3)
Result:
I am having a problem with cv2.drawContours() using python
Problem: I can't show single contour. I would like to get just the track
Here is the Code:
original_image = np.array(ImageGrab.grab(bbox))
crop_img = original_image[200:307, :, :]
# Convert BGR to HSV
hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
# define range of track (grey) color in HSV
lower_grey = np.array([0, 0, 0])
upper_grey = np.array([255, 40, 150])
# Threshold the HSV image to get only gery colors
grey_mask = cv2.inRange(hsv, lower_grey, upper_grey)
grey_mask2 = grey_mask.copy()
_, contours, heir = cv2.findContours(grey_mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(grey_mask2, contours, 0, (0, 255, 0), 3)
cv2.imshow('Orig Image', crop_img)
cv2.imshow('Grey Mask', grey_mask2)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Original Image
When drawContours() is Set to, 0
but it seems to get a few contours if I set the number of contours to show = -1 (all of them)
When drawContours() is Set to, -1
I've tried my best to fix this one, any advice would be highly appreciated
cv2.drawContours(image, contours, contourIdx, color, thickness)
Draw all the contours in the image : contourIdx = -1
To draw a specific contour say 3 rd contour in the list, then set contourIdx = 2
So If you want the contour which has the race track, then find its index and draw it. Else, assuming the racetrack's contour is the largest in area. You can simply do the following:
_, contours, heir = cv2.findContours(grey_mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
c = max(contours, key = cv2.contourArea)
cv2.drawContours(grey_mask2,[c],0,(0, 255, 0),3)
I'm trying to remove the square boxes(vertical and horizontal lines) using Hough transform in opencv (Python). The problem is none of the vertical lines are being detected. I've tried looking through contours and hierarchy but there are too many contours in this image and I'm confused how to use them.
After looking through related posts, I've played with the threshold and rho parameters but that didn't help.
I've attached the code for more details. Why does Hough transform not find the vertical lines in the image?. Any suggestions in solving this task are welcome. Thanks.
Input Image :
Hough transformed Image:
Drawing contours:
import cv2
import numpy as np
import pdb
img = cv2.imread('/home/user/Downloads/cropped/robust_blaze_cpp-300-0000046A-02-HW.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 140, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0,0,255), 2)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 5
maxLineGap = 100
lines = cv2.HoughLinesP(edges,rho=1,theta=np.pi/180,threshold=100,minLineLength=minLineLength,maxLineGap=maxLineGap)
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite('probHough.jpg',img)
To be honest, rather than looking for the lines, I'd instead look for the white boxes.
Preparation
import cv2
import numpy as np
Load the image
img = cv2.imread("digitbox.jpg", 0)
Binarize it, so that both the boxes and the digits are black, rest is white
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
cv2.imwrite('digitbox_step1.png', thresh)
Find contours. In this example image, it's fine to just look for external contours.
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Process the contours, filtering out any with too small an area. Find convex hull of each contour, create a mask of all areas outside the contour. Store the bounding boxes of each found contour, sorted by x coordinate.
mask = np.ones_like(img) * 255
boxes = []
for contour in contours:
if cv2.contourArea(contour) > 100:
hull = cv2.convexHull(contour)
cv2.drawContours(mask, [hull], -1, 0, -1)
x,y,w,h = cv2.boundingRect(contour)
boxes.append((x,y,w,h))
boxes = sorted(boxes, key=lambda box: box[0])
cv2.imwrite('digitbox_step2.png', mask)
Dilate the mask (to shrink the black parts), to clip off any remains the the gray frames.
mask = cv2.dilate(mask, np.ones((5,5),np.uint8))
cv2.imwrite('digitbox_step3.png', mask)
Fill all the masked pixels with white, to erase the frames.
img[mask != 0] = 255
cv2.imwrite('digitbox_step4.png', img)
Process the digits as you desire -- i'll just draw the bounding boxes.
result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for n,box in enumerate(boxes):
x,y,w,h = box
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(result, str(n),(x+5,y+17), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('digitbox_step5.png', result)
The whole script in one piece:
import cv2
import numpy as np
img = cv2.imread("digitbox.jpg", 0)
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask = np.ones_like(img) * 255
boxes = []
for contour in contours:
if cv2.contourArea(contour) > 100:
hull = cv2.convexHull(contour)
cv2.drawContours(mask, [hull], -1, 0, -1)
x,y,w,h = cv2.boundingRect(contour)
boxes.append((x,y,w,h))
boxes = sorted(boxes, key=lambda box: box[0])
mask = cv2.dilate(mask, np.ones((5,5),np.uint8))
img[mask != 0] = 255
result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for n,box in enumerate(boxes):
x,y,w,h = box
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(result, str(n),(x+5,y+17), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('digitbox_result.png', result)