Hi I have a code that finds and draws contours around objects that are Yellow.
Here is the code:
import cv2
import numpy as np
from PIL import ImageGrab
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([30, 255, 255])
def test():
while True:
imgDef = ImageGrab.grab()
image = np.array(imgDef)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernel = np.ones((5,5),np.uint8)
mask = cv2.dilate(mask, kernel, iterations=1)
mask = cv2.erode(mask, kernel, iterations=1)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, contours, -1, (0,255,0), 3)
cv2.imshow('test', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
test()
Right now the output is as follows:
I wish to group contours that are in close proximity to one another and draw a bounding box around them like so:
How can I achieve this? Am I right to be looking into the scikit KMeans function to group them?
You can use cv2.kmeans for that. I suggest to change cv2.RETR_EXTERNAL by cv2.RETR_TREE. I share here a possible solution, the only problem is you need to know the number of cluster before use cv2.kmeans.
import cv2
import numpy as np
from PIL import ImageGrab
import random
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([30, 255, 255])
def get_contours_by_zones(contours, number_of_zones):
center_points = []
for cnt in contours:
cx = cnt[:,0,0].mean()
cy = cnt[:,0,1].mean()
center_points.append((cx, cy))
center_points = np.array(center_points).astype(np.float32)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
_,_,cluster_centers=cv2.kmeans(center_points, number_of_zones, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
contour_zones = [[] for _ in range(number_of_zones)]
for i, (cnt_x, cnt_y) in enumerate(center_points):
tmp = cluster_centers - (cnt_x, cnt_y)
distances = np.sqrt( (tmp**2).sum(axis=1) )
cluster_index = np.argmin(distances)
contour_zones[cluster_index].append( contours[i] )
return contour_zones
def test():
while True:
imgDef = ImageGrab.grab()
image = np.array(imgDef)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernel = np.ones((5,5),np.uint8)
mask = cv2.dilate(mask, kernel, iterations=1)
mask = cv2.erode(mask, kernel, iterations=1)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# ignore outer contour
contour_per_zone = get_contours_by_zones(contours=contours[1:], number_of_zones=3)
canvas = np.empty_like(image)
canvas[...]=255
for zone in contour_per_zone:
# add the corresponding code to get xmin, xmax, ymin, ymax from zone,
# so you can draw the corresponding reactangle
# draw each zone with a different color
cv2.drawContours(canvas, zone, -1, (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)), 3)
cv2.imshow('test', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
test()
The get_contours_by_zones() function uses cv2.kmeans to assign each contour to a group.
Related
How do I run a loop to get the contour and pixles for 8 objects in an image, rather than just finding max contour and pixels of one object in an image.
import cv2
import numpy as np
img = cv2.imread('C:\\Users\\marnes\\Downloads\\25%.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# get shape of largest contour and count pixels
edges = cv2.Canny(image=img, threshold1=100, threshold2=200)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_contour = max(contours, key=lambda c: cv2.contourArea(c))
mask = np.zeros_like(gray)
cv2.drawContours(mask, [max_contour], 0, 255, -1)
pct_100 = cv2.countNonZero(mask)
cv2.imshow("mask", mask)
# get dark area and count pixels
ret, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.bitwise_and(thresh, mask)
pct_dark = cv2.countNonZero(thresh)
cv2.imshow("dark", thresh)
print(f"mask = {pct_100}, dark = {pct_dark}, %dark = {pct_dark / pct_100 * 100}")
first_operator = 100
second_operator = pct_dark / pct_100 * 100
output1 = first_operator - second_operator
parameter = 'starch breakdown'
print([parameter] + [output1])
cv2.waitKey(0)
cv2.destroyAllWindows()
I have some images which are in form of a grid. I have a code that works to find the largest rectangle in the grid. However it works in some images and completely fails in doing so in others, I need help fine-tuning the code to work in all the cases. Ideally I'd like the contours exactly on the border.
The code:
import cv2
import numpy as np
img = cv2.imread('3.jpg')
frame = cv2.resize(img,(1000,500))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 10, 120])
upper_red = np.array([15, 255, 255])
mask = cv2.inRange (hsv, lower_red, upper_red)
cv2.imshow("a",mask)
cv2.waitKey(0)
contours, _ = cv2.findContours(mask.copy(),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:
red_area = max(contours, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(red_area)
cv2.rectangle(frame,(x, y),(x+w, y+h),(0, 0, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.waitKey(0)
Image in which it works correctly:
Image in which the code does not work:
your problem in finding the max contour after thresolding can be solved simply by identifying the max contour corners:
import cv2
import numpy as np
img = cv2.imread('2.png')
frame = cv2.resize(img,(1000,500))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 10, 120])
upper_red = np.array([15, 255, 255])
mask = cv2.inRange (hsv, lower_red, upper_red)
y0 = np.min(np.where(mask>0)[0])
x0 = np.min(np.where(mask>0)[1])
y1 = np.max(np.where(mask>0)[0])
x1 = np.max(np.where(mask>0)[1])
cv2.rectangle(frame,(x0, y0),(x1, y1),(0, 0, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.waitKey(0)
I try to remove background and white text from a photo 1 like below but I can only remove like these images2 3. They still have white text inside the circle.
I've used the following code.
Any help from everyone is greatly appreciated by me.
img = cv2.imread('sample.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#Crop image
croped_img = img[51:403,102:454]
#plt.imshow(croped_img)
radius = 176
cx, cy = radius, radius # The center of circle
x,y = np.ogrid[-radius: radius, -radius: radius]
index = x**2 + y**2 > radius**2
croped_img[cy-radius:cy+radius, cx-radius:cx+radius][index] = 0
plt.imshow(croped_img)
croped_img=cv2.cvtColor(croped_img, cv2.COLOR_BGR2RGB)
cv2.imwrite('croped_circle_2.jpg', croped_img)
One approach is to create a mask of the text and use that to do inpainting. In Python/OpenCV, there are two forms of inpainting: Telea and Navier-Stokes. Both produce about the same results.
Input:
import cv2
import numpy as np
# read input
img = cv2.imread('circle_text.png')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold and invert
thresh = cv2.threshold(gray, 155, 255, cv2.THRESH_BINARY)[1]
# apply morphology close
kernel = np.ones((3,3), np.uint8)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, kernel)
# get contours and filter to keep only small regions
mask = np.zeros_like(gray, dtype=np.uint8)
cntrs = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
for c in cntrs:
area = cv2.contourArea(c)
if area < 1000:
cv2.drawContours(mask,[c],0,255,-1)
# do inpainting
result1 = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA)
result2 = cv2.inpaint(img,mask,3,cv2.INPAINT_NS)
# save results
cv2.imwrite('circle_text_threshold.png', thresh)
cv2.imwrite('circle_text_mask.png', mask)
cv2.imwrite('circle_text_inpainted_telea.png', result1)
cv2.imwrite('circle_text_inpainted_ns.png', result2)
# show results
cv2.imshow('thresh',thresh)
cv2.imshow('mask',mask)
cv2.imshow('result1',result1)
cv2.imshow('result2',result2)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Mask image:
Telea Inpainting:
Navier-Stokes Inpainting:
I have my code like this:
import numpy as np
import cv2
im = cv2.imread('snorlax.jpg')
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print(contours)
cv2.drawContours(im, contours, -1, (0, 255, 0), 3)
cv2.imshow("imagen", im)
input()
The print show a list of lists that have to number every list I dont know if that are the points (x,y) of the contours and the cv2.show only showme a grey screen and doesn't show me the contours of the image.
import numpy as np
import cv2
img = cv2.imread("snorlax.jpg", cv2.IMREAD_GRAYSCALE)
canny = cv2.Canny(img, 100, 150)
cv2.imshow("Image", img)
cv2.imshow("Canny", canny)
indices = np.where(canny != [0])
coordinates = zip(indices[0], indices[1])
coordinates_list = ""
for coordinate in coordinates:
x = "'('{}, {}')', ".format(coordinate[1] / 100, -coordinate[0] / 100)
coordinates_list += x
coordinates_list = "'('{}')'".format(coordinates_list)
coordinates_list = coordinates_list.replace("'('", "{")
coordinates_list = coordinates_list.replace("')'", "}")
print(coordinates_list)
cv2.waitKey(0)
cv2.destroyAllWindows()
I use canny to resolve the problem then with the "where" function of numpy I get all the white points and zip it in a variable, the last past of the code is to get the points in a specific format to use it in an other language.
I have a code here that detects LASER light but I'm experiencing problems in different light conditions. So I think I might solve it if I added a code that checks if that light is a circle.
The problem is I don't know how to apply it here. Here is what the laser light looks like in the mask.
I'm hoping that you can help me with my code.
Here's my code:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) convert from bgr to hsv color space
lower = np.array([0,0,255]) #range of laser light
upper = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
maskcopy = mask.copy()
circles = cv2.HoughCircles(maskcopy, cv2.HOUGH_GRADIENT, 1, 500,
param1 = 20, param2 = 10,
minRadius = 1, maxRadius = 3)
_,cont,_ = cv2.findContours(maskcopy, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
if circles is not None:
circles = np.round(circles[0,:]).astype('int')
for(x,y,r) in circles:
cv2.circle(frame, (x,y), r, (0,255,0),4)
cv2.imshow('mask', mask)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Screenshot:
I tried something similar once and the best solution for me was:
(I saved your image to my hard disk and made a sample code)
import cv2
import math
img = cv2.imread('laser.jpg')
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray_image,100,255,cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
area = sorted(contours, key=cv2.contourArea, reverse=True)
contour = area[0]
(x,y),radius = cv2.minEnclosingCircle(contour)
radius = int(radius)
area = cv2.contourArea(contour)
circ = 4*area/(math.pi*(radius*2)**2)
cv2.drawContours(img, [contour], 0, (0,255,0), 2)
cv2.imshow('img', img)
print(circ)
So the idea is to find your contour with cv2.findContours (laser point) and enclosing circle to it so you can get the radius, then get the area with cv2.contourArea of your contour and check its circularity with the formula circ = 4*area/(math.pi*(radius*2)**2). The perfect citrcle would return the result of 1. The more it goes to 0 the less "circuar" your contour is (in pictures below). Hope it helps!
so your code should be something like this and it will return no error (tried it and it works)
import cv2
import numpy as np
import math
cap = cv2.VideoCapture(0)
while True:
try:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #convert from bgr to hsv color space
lower = np.array([0,0,255]) #range of laser light
upper = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
im2, contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
area = sorted(contours, key=cv2.contourArea, reverse=True)
contour = area[0]
(x,y),radius = cv2.minEnclosingCircle(contour)
radius = int(radius)
area = cv2.contourArea(contour)
circ = 4*area/(math.pi*(radius*2)**2)
print(circ)
except:
pass
cv2.imshow('mask', mask)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I came up with a solution with a different approach.
My idea was to create a circle with center in the center of the white region of the mask and with radius equal to half the width of the white region of the mask. Then I check how similar is this circle from the mask.
Here is the code:
white = np.where(mask>250) # you can also make it == 255
white = np.asarray(white)
minx = min(white[0])
maxx = max(white[0])
miny = min(white[1])
maxy = max(white[1])
radius = int((maxx-minx)/2)
cx = minx + radius
cy = miny + radius
black = mask.copy()
black[:,:]=0
cv2.circle(black, (cy,cx), radius, (255,255,255),-1)
diff = cv2.bitwise_xor(black, mask)
diffPercentage = len(diff>0)/diff.size
print (diffPercentage)
Then you have to come up with what percentage threshold is "similar" enough for you.
The code above was tested reading a mask from disk, but a video is just a sequence of images. Without your webcam input I cannot test the code with video, but it should work like this:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([0,0,255]) #range of laser light
upper = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
white = np.where(mask>250) # you can also make it == 255
white = np.asarray(white)
minx = min(white[0])
maxx = max(white[0])
miny = min(white[1])
maxy = max(white[1])
radius = int((maxx-minx)/2)
cx = minx + radius
cy = miny + radius
black = mask.copy()
black[:,:]=0
cv2.circle(black, (cy,cx), radius, (255,255,255),-1)
diff = cv2.bitwise_xor(black, mask)
diffPercentage = len(diff>0)/diff.size
print (diffPercentage)
cv2.imshow('mask', mask)
cvw.imshow('diff', diff)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()