Related
I am trying to draw a contour on the leaf to calculate the area of the leaf using OpenCV but cv2.findContours() does not draw a contour around the leaf but the other regions.
My Code
def Contours_Detection(image_path):
img = cv2.imread(image_path)
image = cv2.resize(img,(500,500), interpolation = cv2.INTER_AREA)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# store the a-channel
a_channel = lab[:,:,1]
# Automate threshold using Otsu method
th = cv2.threshold(a_channel,127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
# Mask the result with the original image
masked = cv2.bitwise_and(image, image, mask = th)
blur = cv2.medianBlur(masked, 7)
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
binary = cv2.threshold(gray,125, 255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
MORPH_close = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel, iterations=2)
result = 255 - MORPH_close
contours, hierarchy = cv2.findContours(result, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_NONE)
Num_of_contour = len(contours)
image_copy = image.copy()
spot_on_org_img = cv2.drawContours(image_copy, contours, -1, (0, 255, 0), thickness=2)
black_img = np.zeros(image.shape)
# draw the contours on the black image
spot_on_black_img = cv2.drawContours(black_img, contours, -1, (0,255,0), 1)
return spot_on_black_img
image_path = '/content/IMG_3407.jpg'
spot_on_black_img = Contours_Detection(image_path)
cv2_imshow(spot_on_black_img)
Input Image
Output Image
Input:
The output should be:
How can I do this using OpenCV or any other method?
I tried this
img = cv2.imread('test2.JPG')
imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 150, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
print("Number of contours = " + str(len(contours)))
print(contours[0])
# cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
# cv2.drawContours(imgray, contours, -1, (0, 255, 0), 3)
for cnt in contours:
area = cv2.contourArea(cnt)
if area>20:
peri = cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
x,y,w,h = cv2.boundingRect(approx)
cv2.rectangle(img,(x,y-3),(x+w,y+h-3),(255,0,0),1)
You have found the contours and drawn only those above certain area. So far so good.
To capture each line as an individual entity, you need to find a way to connect the text in each line. Since the lines given in the image a straight, a simple approach would be to use a horizontal kernel ([1, 1, 1, 1, 1]) of certain length and perform morphology.
Code:
img = cv2.imread('text.jpg',1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
Using horizontal kernel 8 pixels in length. This is the parameter you would need to change when trying out for other images of different font size and text length.
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 1))
# array([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=uint8)
dilated = cv2.dilate(th, hor_kernel, iterations=1)
Looking at the image above, hope you have an idea of what dilation using a horizontal kernel does. From here on, we find outermost contours above certain area.
contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
img2 = img.copy()
for i, c in enumerate(contours):
area = cv2.contourArea(c)
if area > 100:
x,y,w,h = cv2.boundingRect(c)
img2 = cv2.rectangle(img2, (x, y), (x + w, y + h), (0,255,0), 1)
I'm trying to get the watermelon from the image. I have tried doing hsv segmentation and grabcut but it doesn't give me the output I wanted. How can I get the watermelon only? Any method will do except for Neural Networks since I'm new to image processing.
# hsv segmentation
lb = (20, 50, 0)
ub = (100, 255, 255)
mask_hsv = cv.inRange(hsv, lb, ub)
image_copy = image.copy()
morph = cv.erode(mask_hsv, (3, 3), iterations=4)
output = cv.bitwise_and(image_copy, image_copy, mask=morph)
After I use hsv I find the largest contour in the image.
draw = image.copy()
contours, h = cv.findContours(morph, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
if contours != 0:
for contour in contours:
area = cv.contourArea(contour)
if max_area < area:
max_area = area
cnt = contour
# else:
# pass
else:
print('No contours found!')
Then I find the boundingRect of the biggest contour and feed it to grabcut function
x, y, h, w = cv.boundingRect(cnt)
rect = (x, y, h, w)
output_rect = image.copy()
mask = np.ones(image.shape[:2], dtype=np.uint8) * cv.GC_PR_BGD
bgdModel = np.zeros((1, 65), dtype=np.float64)
fgdModel = np.zeros((1, 65), dtype=np.float64)
# performs grabCut
cv.grabCut(output_rect, mask, rect, bgdModel, fgdModel, 100, cv.GC_INIT_WITH_RECT)
mask = np.where((mask == 2)|(mask == 0), 0, 1).astype('uint8')
mask *= 255
# applying the genrated mask to the image
output_image = cv.bitwise_and(output_rect, output_rect, mask=mask)
# change black pixels to white
black_pixels = np.where(
(output_image[:, :, 0] == 0) & (output_image[:, :, 1] == 0) & (output_image[:, :, 2] == 0)
)
output_image[black_pixels] = [255, 255, 255]
Original Image
Biggest contour found
Output after grabcut
I got the correct outputs via drawing your detected contours on blank image, then applying erosion and finally finding HoughCircles. Feel Free to change hyper-parameters to obtain better results.
Code:
import cv2
import numpy as np
image = cv2.imread("watermelon.png")
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image_copy = image.copy()
lower_boundary = (20,50, 0)
upper_boundary = (100,255,255)
mask_hsv = cv2.inRange(image_hsv, lower_boundary, upper_boundary)
blank_im = np.zeros((image.shape[0], image.shape[1], 1), dtype=np.uint8)
morph = cv2.erode(mask_hsv, (3,3), iterations=4)
output = cv2.bitwise_and(image, image, mask=morph)
# Finding Contours
contours, _ = cv2.findContours(morph, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if len(contours) != 0:
# Rather than using long for loop, this is how you can find the max value with given function
contour = max(contours, key=cv2.contourArea)
else:
print("No contours found!")
# Draw Contours on Blank Image (np.zeros() with same shape as original image)
cv2.drawContours(blank_im, contour, -1, (255,255,255), 10)
# Eroding the Blank Image with Drawn Contours
blank_im = cv2.erode(blank_im, (3,3), iterations=4)
# Find The Possible Circles, (Feel Free to Change Hyperparameters)
circles = cv2.HoughCircles(blank_im, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
circles = np.uint16(np.around(circles))
# Draw each circle in green and centers in red
for i in circles[0,:]:
cv2.circle(image, (i[0],i[1]), i[2], (0,255,0), 2)
cv2.circle(image, (i[0], i[1]), 2, (0,0,255), 3)
# Display Images
cv2.imshow('Original Image', image_copy)
cv2.imshow('Drawed Contours On Blank Image', blank_im)
cv2.imshow('Detected Circle', image)
Output:
I applied the floodfill function in opencv to extract the foreground from the background but some of the objects in the image were not recognized by the algorithm so I would like to know how I can improve my detections and what modifications are necessary.
image = cv2.imread(args["image"])
image = cv2.resize(image, (800, 800))
h,w,chn = image.shape
ratio = image.shape[0] / 800.0
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
# show the original image and the edge detected image
print("STEP 1: Edge Detection")
cv2.imshow("Image", image)
cv2.imshow("Edged", edged)
warped1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
T = threshold_local(warped1, 11, offset = 10, method = "gaussian")
warped1 = (warped1 > T).astype("uint8") * 255
print("STEP 3: Apply perspective transform")
seed = (10, 10)
foreground, birdEye = floodFillCustom(image, seed)
cv2.circle(birdEye, seed, 50, (0, 255, 0), -1)
cv2.imshow("originalImg", birdEye)
cv2.circle(birdEye, seed, 100, (0, 255, 0), -1)
cv2.imshow("foreground", foreground)
cv2.imshow("birdEye", birdEye)
gray = cv2.cvtColor(foreground, cv2.COLOR_BGR2GRAY)
cv2.imshow("gray", gray)
cv2.imwrite("gray.jpg", gray)
threshImg = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)[1]
h_threshold,w_threshold = threshImg.shape
area = h_threshold*w_threshold
cv2.imshow("threshImg", threshImg)[![enter image description here][1]][1]
The floodFillCustom function is as follows -
def floodFillCustom(originalImage, seed):
originalImage = np.maximum(originalImage, 10)
foreground = originalImage.copy()
cv2.floodFill(foreground, None, seed, (0, 0, 0),
loDiff=(10, 10, 10), upDiff=(10, 10, 10))
return [foreground, originalImage]
A little bit late, but here's an alternative solution for segmenting the tools. It involves converting the image to the CMYK color space and extracting the K (Key) component. This component can be thresholded to get a nice binary mask of the tools, the procedure is very straightforward:
Convert the image to the CMYK color space
Extract the K (Key) component
Threshold the image via Otsu's thresholding
Apply some morphology (a closing) to clean up the mask
(Optional) Get bounding rectangles of all the tools
Let's see the code:
# Imports
import cv2
import numpy as np
# Read image
imagePath = "C://opencvImages//"
inputImage = cv2.imread(imagePath+"DAxhk.jpg")
# Create deep copy for results:
inputImageCopy = inputImage.copy()
# Convert to float and divide by 255:
imgFloat = inputImage.astype(np.float) / 255.
# Calculate channel K:
kChannel = 1 - np.max(imgFloat, axis=2)
# Convert back to uint 8:
kChannel = (255*kChannel).astype(np.uint8)
The first step is to convert the BGR image to CMYK. There's no direct conversion in OpenCV for this, so I applied directly the conversion formula. We can get every color space component from that formula, but we are only interested on the K channel. The conversion is easy, but we need to be careful with the data types. We need to operate on float arrays. After getting the K channel, we convert back the image to an unsigned 8-bit array, this is the resulting image:
Let's threshold this image using Otsu's thresholding method:
# Threshold via Otsu:
_, binaryImage = cv2.threshold(kChannel, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
This yields the following binary image:
Looks very nice! Additionally, we can clean it up a little bit (joining the little gaps) using a morphological closing. Let's apply a rectangular structuring element of size 5 x 5 and use 2 iterations:
# Use a little bit of morphology to clean the mask:
# Set kernel (structuring element) size:
kernelSize = 5
# Set morph operation iterations:
opIterations = 2
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
# Perform closing:
binaryImage = cv2.morphologyEx(binaryImage, cv2.MORPH_CLOSE, morphKernel, None, None, opIterations, cv2.BORDER_REFLECT101)
Which results in this:
Very cool. What follows is optional. We can get the bounding rectangles for every tool by looking for the outer (external) contours:
# Find the contours on the binary image:
contours, hierarchy = cv2.findContours(binaryImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Look for the outer bounding boxes (no children):
for _, c in enumerate(contours):
# Get the contours bounding rectangle:
boundRect = cv2.boundingRect(c)
# Get the dimensions of the bounding rectangle:
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
# Set bounding rectangle:
color = (0, 0, 255)
cv2.rectangle( inputImageCopy, (int(rectX), int(rectY)),
(int(rectX + rectWidth), int(rectY + rectHeight)), color, 5 )
cv2.imshow("Bounding Rectangles", inputImageCopy)
cv2.waitKey(0)
Which produces the final image:
I have many images of specimen which have uncontrollable background color. Some of them have black background. Some of them have white background. Some of them have green background, etc.
I would like to remove these background color of a given image where the object in the image is just only one specimen. I try this code but it does not work as i expect.
def get_holes(image, thresh):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
im_bw = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]
im_bw_inv = cv2.bitwise_not(im_bw)
_, contour, _ = cv2.findContours(im_bw_inv, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(im_bw_inv, [cnt], 0, 255, -1)
nt = cv2.bitwise_not(im_bw)
im_bw_inv = cv2.bitwise_or(im_bw_inv, nt)
return im_bw_inv
def remove_background(image, thresh, scale_factor=.25, kernel_range=range(1, 15), border=None):
border = border or kernel_range[-1]
holes = get_holes(image, thresh)
small = cv2.resize(holes, None, fx=scale_factor, fy=scale_factor)
bordered = cv2.copyMakeBorder(small, border, border, border, border, cv2.BORDER_CONSTANT)
for i in kernel_range:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*i+1, 2*i+1))
bordered = cv2.morphologyEx(bordered, cv2.MORPH_CLOSE, kernel)
unbordered = bordered[border: -border, border: -border]
mask = cv2.resize(unbordered, (image.shape[1], image.shape[0]))
fg = cv2.bitwise_and(image, image, mask=mask)
return fg
file = your_file_location
img = cv2.imread(file)
nb_img = dm.remove_background(img, 255)
These are some example images
May i have your suggestions?
Here's a simple approach with the assumption that there is only one specimen per image.
Kmeans color quantization. We load the image then perform Kmeans color quantization to segment the image into a specified cluster of colors. For instance with clusters=4, the image will be labeled into four colors.
Obtain binary image. Convert to grayscale, Gaussian blur, adaptive threshold.
Draw largest enclosing circle onto mask. Find contours, sort for largest contour using contour area filtering then draw the largest enclosing circle onto a mask using cv2.minEnclosingCircle.
Bitwise-and. Since we have isolated the desired sections to extract, we simply bitwise-and the mask and input image
Input image -> Kmeans -> Binary image
Detected largest enclosing circle -> Mask -> Result
Here's the output for the second image
Input image -> Kmeans -> Binary image
Detected largest enclosing circle -> Mask -> Result
Code
import cv2
import numpy as np
# Kmeans color segmentation
def kmeans_color_quantization(image, clusters=8, rounds=1):
h, w = image.shape[:2]
samples = np.zeros([h*w,3], dtype=np.float32)
count = 0
for x in range(h):
for y in range(w):
samples[count] = image[x][y]
count += 1
compactness, labels, centers = cv2.kmeans(samples,
clusters,
None,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
rounds,
cv2.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
res = centers[labels.flatten()]
return res.reshape((image.shape))
# Load image and perform kmeans
image = cv2.imread('2.jpg')
original = image.copy()
kmeans = kmeans_color_quantization(image, clusters=4)
# Convert to grayscale, Gaussian blur, adaptive threshold
gray = cv2.cvtColor(kmeans, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,21,2)
# Draw largest enclosing circle onto a mask
mask = np.zeros(original.shape[:2], dtype=np.uint8)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
for c in cnts:
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(image, (int(x), int(y)), int(r), (36, 255, 12), 2)
cv2.circle(mask, (int(x), int(y)), int(r), 255, -1)
break
# Bitwise-and for result
result = cv2.bitwise_and(original, original, mask=mask)
result[mask==0] = (255,255,255)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.imshow('mask', mask)
cv2.imshow('kmeans', kmeans)
cv2.imshow('image', image)
cv2.waitKey()