I have some images which are in form of a grid. I have a code that works to find the largest rectangle in the grid. However it works in some images and completely fails in doing so in others, I need help fine-tuning the code to work in all the cases. Ideally I'd like the contours exactly on the border.
The code:
import cv2
import numpy as np
img = cv2.imread('3.jpg')
frame = cv2.resize(img,(1000,500))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 10, 120])
upper_red = np.array([15, 255, 255])
mask = cv2.inRange (hsv, lower_red, upper_red)
cv2.imshow("a",mask)
cv2.waitKey(0)
contours, _ = cv2.findContours(mask.copy(),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:
red_area = max(contours, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(red_area)
cv2.rectangle(frame,(x, y),(x+w, y+h),(0, 0, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.waitKey(0)
Image in which it works correctly:
Image in which the code does not work:
your problem in finding the max contour after thresolding can be solved simply by identifying the max contour corners:
import cv2
import numpy as np
img = cv2.imread('2.png')
frame = cv2.resize(img,(1000,500))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 10, 120])
upper_red = np.array([15, 255, 255])
mask = cv2.inRange (hsv, lower_red, upper_red)
y0 = np.min(np.where(mask>0)[0])
x0 = np.min(np.where(mask>0)[1])
y1 = np.max(np.where(mask>0)[0])
x1 = np.max(np.where(mask>0)[1])
cv2.rectangle(frame,(x0, y0),(x1, y1),(0, 0, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.waitKey(0)
Related
Hi I have a code that finds and draws contours around objects that are Yellow.
Here is the code:
import cv2
import numpy as np
from PIL import ImageGrab
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([30, 255, 255])
def test():
while True:
imgDef = ImageGrab.grab()
image = np.array(imgDef)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernel = np.ones((5,5),np.uint8)
mask = cv2.dilate(mask, kernel, iterations=1)
mask = cv2.erode(mask, kernel, iterations=1)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, contours, -1, (0,255,0), 3)
cv2.imshow('test', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
test()
Right now the output is as follows:
I wish to group contours that are in close proximity to one another and draw a bounding box around them like so:
How can I achieve this? Am I right to be looking into the scikit KMeans function to group them?
You can use cv2.kmeans for that. I suggest to change cv2.RETR_EXTERNAL by cv2.RETR_TREE. I share here a possible solution, the only problem is you need to know the number of cluster before use cv2.kmeans.
import cv2
import numpy as np
from PIL import ImageGrab
import random
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([30, 255, 255])
def get_contours_by_zones(contours, number_of_zones):
center_points = []
for cnt in contours:
cx = cnt[:,0,0].mean()
cy = cnt[:,0,1].mean()
center_points.append((cx, cy))
center_points = np.array(center_points).astype(np.float32)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
_,_,cluster_centers=cv2.kmeans(center_points, number_of_zones, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
contour_zones = [[] for _ in range(number_of_zones)]
for i, (cnt_x, cnt_y) in enumerate(center_points):
tmp = cluster_centers - (cnt_x, cnt_y)
distances = np.sqrt( (tmp**2).sum(axis=1) )
cluster_index = np.argmin(distances)
contour_zones[cluster_index].append( contours[i] )
return contour_zones
def test():
while True:
imgDef = ImageGrab.grab()
image = np.array(imgDef)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernel = np.ones((5,5),np.uint8)
mask = cv2.dilate(mask, kernel, iterations=1)
mask = cv2.erode(mask, kernel, iterations=1)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# ignore outer contour
contour_per_zone = get_contours_by_zones(contours=contours[1:], number_of_zones=3)
canvas = np.empty_like(image)
canvas[...]=255
for zone in contour_per_zone:
# add the corresponding code to get xmin, xmax, ymin, ymax from zone,
# so you can draw the corresponding reactangle
# draw each zone with a different color
cv2.drawContours(canvas, zone, -1, (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)), 3)
cv2.imshow('test', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
test()
The get_contours_by_zones() function uses cv2.kmeans to assign each contour to a group.
How do I run a loop to get the contour and pixles for 8 objects in an image, rather than just finding max contour and pixels of one object in an image.
import cv2
import numpy as np
img = cv2.imread('C:\\Users\\marnes\\Downloads\\25%.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# get shape of largest contour and count pixels
edges = cv2.Canny(image=img, threshold1=100, threshold2=200)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_contour = max(contours, key=lambda c: cv2.contourArea(c))
mask = np.zeros_like(gray)
cv2.drawContours(mask, [max_contour], 0, 255, -1)
pct_100 = cv2.countNonZero(mask)
cv2.imshow("mask", mask)
# get dark area and count pixels
ret, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.bitwise_and(thresh, mask)
pct_dark = cv2.countNonZero(thresh)
cv2.imshow("dark", thresh)
print(f"mask = {pct_100}, dark = {pct_dark}, %dark = {pct_dark / pct_100 * 100}")
first_operator = 100
second_operator = pct_dark / pct_100 * 100
output1 = first_operator - second_operator
parameter = 'starch breakdown'
print([parameter] + [output1])
cv2.waitKey(0)
cv2.destroyAllWindows()
I'm a beginner and I'm trying to do some line detection in-game.
This is the photo in which I'm trying to detect lanes
This is the result
The HoughLinesP code: ```
lines = cv2.HoughLinesP(cropped_image, 2, np.pi / 180, 100, np.array([]), minLineLength=50, maxLineGap=5)
# The displaying function:
def displayLines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, x2, y1, y2 = line.reshape(4)
cv2.line(line_image, (x1, x2), (x2, y2), (0,255,0), 10)
return line_image```
# Here is the cropping function:
def region(image):
height = image.shape[0]
polygons = np.array([[
(570, 640), (1600, 700), (863, 520)
]])
mask = np.zeros_like(image)
cv2.fillPoly(mask, polygons, 255)
masked_image = cv2.bitwise_and(canny, mask)
return masked_image
#As input I'm giving image with edges displayed. Function:
def canny(image):
gray = cv2.cvtColor(lane_image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
canny = cv2.Canny(blur, 50, 150)
return canny
I don't know what is the problem
I recommend you mask out the noise before attempting to detect lines:
import cv2
import numpy as np
img = cv2.imread("driving_game.jpg")
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([18, 120, 200])
upper = np.array([30, 255, 255])
mask = cv2.inRange(img_hsv, lower, upper)
img_masked = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow("Mask", mask)
cv2.imshow("Masked Image", img_masked)
cv2.waitKey(0)
Output:
Where the
lower = np.array([18, 120, 200])
upper = np.array([30, 255, 255])
are the lower and upper values for the HSV color mask. With the above mask, you won't even need the cv2.HoughLinesP method; you can simply detect the contours of the non-masked object and approximate the results:
import cv2
import numpy as np
def process(img):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([18, 120, 200])
upper = np.array([30, 255, 255])
mask = cv2.inRange(img_hsv, lower, upper)
mask_canny = cv2.Canny(mask, 50, 50)
kernel = np.ones((2, 2))
img_dilate = cv2.dilate(mask_canny, kernel, iterations=7)
return cv2.erode(img_dilate, kernel, iterations=7)
def draw_lines(img):
contours, _ = cv2.findContours(process(img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.13 * peri, True)
cv2.drawContours(img, [approx], -1, (0, 0, 255), 5)
img = cv2.imread("driving_game.jpg")
draw_lines(img)
cv2.imshow("Lines Detected", img)
cv2.waitKey(0)
Output:
I have this image with 3 channels RGB (a result of a VARI Index computation) and I would like to draw bounding boxes (rectangles) around the plants, represented in green here. What is the best and easiest way to do it with OpenCV / python?
I guess it's an easy problem for OpenCV experts, but I could not find good tutorials online to do this for multiple objects.
The closest tutorial I found was: determining-object-color-with-opencv
The assumptions for the bounding boxes should/could be:
green is the dominant color.
it should be more than X pixels.
Thanks in advance!
Just answering my own question after stumbling upon this resource: https://docs.opencv.org/3.4/da/d0c/tutorial_bounding_rects_circles.html
May not be the best answer but it somehow solves my problem!
import cv2
import numpy as np
image = cv2.imread('vari3.png')
# https://www.pyimagesearch.com/2016/02/15/determining-object-color-with-opencv/
# https://docs.opencv.org/3.4/da/d0c/tutorial_bounding_rects_circles.html
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# mask: green is dominant.
thresh = np.array((image.argmax(axis=-1) == 1) * 255, dtype=np.uint8)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
contours_poly = [None] * len(cnts)
boundRect = [None] * len(cnts)
for i, c in enumerate(cnts):
contours_poly[i] = cv2.approxPolyDP(c, 3, True)
boundRect[i] = cv2.boundingRect(contours_poly[i])
for i in range(len(cnts)):
# cv2.drawContours(image, contours_poly, i, (0, 255, 0), thickness=2)
pt1 = (int(boundRect[i][0]), int(boundRect[i][1]))
pt2 = (int(boundRect[i][0] + boundRect[i][2]), int(boundRect[i][1] + boundRect[i][3]))
if np.sqrt((pt2[1] - pt1[1]) * (pt2[0] - pt1[0])) < 30:
continue
cv2.rectangle(image, pt1, pt2, (0, 0, 0), 2)
cv2.imwrite('result.png', image)
cv2.imshow("Image", image)
cv2.waitKey(0)
You need to do HSV filtering
Change image colors from BGR to HSV (Hue Saturation Value).
Filter a certain range of saturation and hue that matches green by
thresholding.
Refer to this page for code to do the first 2
https://pythonprogramming.net/color-filter-python-opencv-tutorial/
Do some morphological operations like Erosion, Dilation, Opening,
Closing to remove the small bits of green that don't represent trees
and to connect the trees that look broken together.
Refer to https://docs.opencv.org/master/d9/d61/tutorial_py_morphological_ops.html
Detect the contours then draw the rectangles
import cv2
import numpy as np
img = cv2.imread('8FGo1.jpg',1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red = np.array([45,100,50])
upper_red = np.array([75,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
kernel = np.ones((5,5),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('img',img)
#cv2.imshow('mask',mask)
Output
I have an image such as this
I am trying to detect and remove the arrow from this image so that I end up with an image that just has the text.
I tried the below approach but it isn't working
image_src = cv2.imread("roi.png")
gray = cv2.cvtColor(image_src, cv2.COLOR_BGR2GRAY)
canny=cv2.Canny(gray,50,200,3)
ret, gray = cv2.threshold(canny, 10, 255, 0)
contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
largest_area = sorted(contours, key=cv2.contourArea)[-1]
mask = np.ones(image_src.shape[:2], dtype="uint8") * 255
cv2.drawContours(mask, [largest_area], -1, 0, -1)
image = cv2.bitwise_and(image_src, image_src, mask=mask)
The above code seems to give me back the same image WITH the arrow.
How can I remove the arrow?
The following will remove the largest contour:
import numpy as np
import cv2
image_src = cv2.imread("roi.png")
gray = cv2.cvtColor(image_src, cv2.COLOR_BGR2GRAY)
ret, gray = cv2.threshold(gray, 250, 255,0)
image, contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
mask = np.zeros(image_src.shape, np.uint8)
largest_areas = sorted(contours, key=cv2.contourArea)
cv2.drawContours(mask, [largest_areas[-2]], 0, (255,255,255,255), -1)
removed = cv2.add(image_src, mask)
cv2.imwrite("removed.png", removed)
Note, the largest contour in this case will be the whole image, so it is actually the second largest contour.