I have found lots of information about how to draw rectangles around the biggest blue object in the frame but I need to draw rectangles around all of the blue objects.
This is my current code
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([100,50,50])
upper_blue = np.array([130,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange (hsv, lower_blue, upper_blue)
bluecnts = cv2.findContours(mask.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(bluecnts)>0:
blue_area = max(bluecnts, key=cv2.contourArea)
print(blue_area)
(xg,yg,wg,hg) = cv2.boundingRect(blue_area)
cv2.rectangle(frame,(xg,yg),(xg+wg, yg+hg),(0,255,0),2)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('blue', result)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
And this is what it currently does, draw one rectangle around the biggest blue object but I need it around each one.
In your Python/OpenCV code, try replacing
if len(bluecnts)>0:
blue_area = max(bluecnts, key=cv2.contourArea)
print(blue_area)
(xg,yg,wg,hg) = cv2.boundingRect(blue_area)
cv2.rectangle(frame,(xg,yg),(xg+wg, yg+hg),(0,255,0),2)
with
if len(bluecnts)>0:
for cnt in bluecnts:
area = cv2.contourArea(cnt)
print(area)
(xg,yg,wg,hg) = cv2.boundingRect(cnt)
cv2.rectangle(frame,(xg,yg),(xg+wg, yg+hg),(0,255,0),2)
(Untested)
Related
Our objective is to classify plants in a field based on its leaves. We have trained our model on segmented images (these images only have a leaf and black background). But the live feed from the camera will look like this:
So our idea is to find the biggest contour, separate the leaf marked by it and give it a black background.
This is kinda what we are trying to achieve (except the small leaf popping in):
Our approach was to draw a bounding box around the leaf and form a new separate frame. This is our code:
def nothing(useless=None):
pass
cv2.namedWindow("Mask")
cap = cv2.VideoCapture(0)
cv2.createTrackbar('R_l','Mask',26,255,nothing)
cv2.createTrackbar('G_l','Mask',46,255,nothing)
cv2.createTrackbar('B_l','Mask',68,255,nothing)
cv2.createTrackbar('R_h','Mask',108,255,nothing)
cv2.createTrackbar('G_h','Mask',138,255,nothing)
cv2.createTrackbar('B_h','Mask',155,255,nothing)
while True:
R_l = cv2.getTrackbarPos('R_l', 'Mask')
G_l = cv2.getTrackbarPos('G_l', 'Mask')
B_l = cv2.getTrackbarPos('B_l', 'Mask')
R_h = cv2.getTrackbarPos('R_h', 'Mask')
G_h = cv2.getTrackbarPos('G_h', 'Mask')
B_h = cv2.getTrackbarPos('B_h', 'Mask')
_,frame = cap.read()
blurred_frame = cv2.blur(frame,(5,5),0)
hsv_frame = cv2.cvtColor(blurred_frame,cv2.COLOR_BGR2HSV)
low_green = np.array([R_l, G_l, B_l])
high_green = np.array([R_h, G_h, B_h])
green_mask = cv2.inRange(hsv_frame, low_green, high_green)
green = cv2.bitwise_and(frame, frame, mask=green_mask)
contours,_ = cv2.findContours(green_mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
try:
sorted_ = sorted(contours,key=cv2.contourArea,reverse=True)
biggest = sorted_[0]
cv2.drawContours(frame,biggest,-1,(255,0,0),1)
except :
pass
#kernel = np.zeros(frame.shape(), np.uint8)
x,y,w,h=cv2.boundingRect(biggest)
roi= frame[y:y+h, x:x+w]
blurred_frame1 = cv2.blur(roi,(5,5),0)
hsv_frame1 = cv2.cvtColor(blurred_frame1,cv2.COLOR_BGR2HSV)
low_green1 = np.array([R_l, G_l, B_l])
high_green1 = np.array([R_h, G_h, B_h])
green_mask1 = cv2.inRange(hsv_frame1, low_green, high_green)
green1= cv2.bitwise_and(roi,roi, mask=green_mask1)
cv2.imshow("frame",frame)
cv2.imshow("Mask",green1)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
How can we prepare the desired image?
You're on the right track. I suggest using HSV color thresholding with a lower/upper threshold to isolate the green leaves. To determine the lower/upper HSV color threshold ranges, I used the HSV color thresholder script from a previous answer. This will give us a binary mask. From here we perform morphological operations to smooth the image and remove noise. Next we find contours and sort using contour area. We extract the largest contour, draw this onto a blank mask, then bitwise-and to get color. From here we find the bounding rectangle coordinates on the mask then crop the ROI from the color image using Numpy slicing. Here's the result
Code
import numpy as np
import cv2
# Read image, create blank masks, color threshold
image = cv2.imread('1.jpg')
blank_mask = np.zeros(image.shape, dtype=np.uint8)
original = image.copy()
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([0, 18, 0])
upper = np.array([88, 255, 139])
mask = cv2.inRange(hsv, lower, upper)
# Perform morphological operations
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
close = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel, iterations=1)
# Find contours and filter for largest contour
# Draw largest contour onto a blank mask then bitwise-and
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
cv2.fillPoly(blank_mask, [cnts], (255,255,255))
blank_mask = cv2.cvtColor(blank_mask, cv2.COLOR_BGR2GRAY)
result = cv2.bitwise_and(original,original,mask=blank_mask)
# Crop ROI from result
x,y,w,h = cv2.boundingRect(blank_mask)
ROI = result[y:y+h, x:x+w]
cv2.imshow('result', result)
cv2.imshow('ROI', ROI)
cv2.waitKey()
I need to detect black objects in a real time video. I got a code in the internet for detecting blue objects. So I changed the upper and lower hsv value according to bgr colour code(am not clear about how to convert bgr to hsv), But its not detecting the black object in the video.the code am using blue colour detection is:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([110,50,50])
upper_red = np.array([130,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
the output for blue color is:
original image:
The code I'm using for black is:`
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0,0,0])
upper_red = np.array([0,0,0])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
Result:
Nothing is displayed in the result of black. I think the problem is in the hsv conversion but am not sure. And in the detected blue image is not at all accurate it result in noise. How to achieve black detection and reduce noise?.
The easiest way to detect black would be to do a binary threshold in greyscale. Black pixel values will always have a very low value, so therefore it would be easier to do this in a 1 channel image instead of a 3 channel. I would recommend:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 15, 255, cv2.THRESH_BINARY_INV)
change the value of 15 until you get reasonable results. Lower value would result in preserving only darker pixels. If you wanted to extract the location of the pixels you could also get the contours i.e.
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
and then draw the contour back onto the original frame with:
frame = cv2.drawContours(frame, contours, -1,(0,0,255),3)
Alternatively, you might find it easier to invert the image first so that you are trying to extract white pixels. This could lead to less confusion with the pixels you want to extract being similar to the mask pixel (0). You could do this simple with numpy subtraction, then set your thresh value to a very high value i.e:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = 255-gray
ret, thresh = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
frame = cv2.drawContours(frame, contours, -1,(0,0,255),3)
black= np.array([0, 0, 0], np.uint8)
grayScale= np.array([0, 0, 29], np.uint8)
Valor (29) depends of how much "brightness" you want.
This page is where you can test your color ranges
I have a code here that detects LASER light but I'm experiencing problems in different light conditions. So I think I might solve it if I added a code that checks if that light is a circle.
The problem is I don't know how to apply it here. Here is what the laser light looks like in the mask.
I'm hoping that you can help me with my code.
Here's my code:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) convert from bgr to hsv color space
lower = np.array([0,0,255]) #range of laser light
upper = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
maskcopy = mask.copy()
circles = cv2.HoughCircles(maskcopy, cv2.HOUGH_GRADIENT, 1, 500,
param1 = 20, param2 = 10,
minRadius = 1, maxRadius = 3)
_,cont,_ = cv2.findContours(maskcopy, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
if circles is not None:
circles = np.round(circles[0,:]).astype('int')
for(x,y,r) in circles:
cv2.circle(frame, (x,y), r, (0,255,0),4)
cv2.imshow('mask', mask)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Screenshot:
I tried something similar once and the best solution for me was:
(I saved your image to my hard disk and made a sample code)
import cv2
import math
img = cv2.imread('laser.jpg')
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray_image,100,255,cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
area = sorted(contours, key=cv2.contourArea, reverse=True)
contour = area[0]
(x,y),radius = cv2.minEnclosingCircle(contour)
radius = int(radius)
area = cv2.contourArea(contour)
circ = 4*area/(math.pi*(radius*2)**2)
cv2.drawContours(img, [contour], 0, (0,255,0), 2)
cv2.imshow('img', img)
print(circ)
So the idea is to find your contour with cv2.findContours (laser point) and enclosing circle to it so you can get the radius, then get the area with cv2.contourArea of your contour and check its circularity with the formula circ = 4*area/(math.pi*(radius*2)**2). The perfect citrcle would return the result of 1. The more it goes to 0 the less "circuar" your contour is (in pictures below). Hope it helps!
so your code should be something like this and it will return no error (tried it and it works)
import cv2
import numpy as np
import math
cap = cv2.VideoCapture(0)
while True:
try:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #convert from bgr to hsv color space
lower = np.array([0,0,255]) #range of laser light
upper = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
im2, contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
area = sorted(contours, key=cv2.contourArea, reverse=True)
contour = area[0]
(x,y),radius = cv2.minEnclosingCircle(contour)
radius = int(radius)
area = cv2.contourArea(contour)
circ = 4*area/(math.pi*(radius*2)**2)
print(circ)
except:
pass
cv2.imshow('mask', mask)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I came up with a solution with a different approach.
My idea was to create a circle with center in the center of the white region of the mask and with radius equal to half the width of the white region of the mask. Then I check how similar is this circle from the mask.
Here is the code:
white = np.where(mask>250) # you can also make it == 255
white = np.asarray(white)
minx = min(white[0])
maxx = max(white[0])
miny = min(white[1])
maxy = max(white[1])
radius = int((maxx-minx)/2)
cx = minx + radius
cy = miny + radius
black = mask.copy()
black[:,:]=0
cv2.circle(black, (cy,cx), radius, (255,255,255),-1)
diff = cv2.bitwise_xor(black, mask)
diffPercentage = len(diff>0)/diff.size
print (diffPercentage)
Then you have to come up with what percentage threshold is "similar" enough for you.
The code above was tested reading a mask from disk, but a video is just a sequence of images. Without your webcam input I cannot test the code with video, but it should work like this:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([0,0,255]) #range of laser light
upper = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
white = np.where(mask>250) # you can also make it == 255
white = np.asarray(white)
minx = min(white[0])
maxx = max(white[0])
miny = min(white[1])
maxy = max(white[1])
radius = int((maxx-minx)/2)
cx = minx + radius
cy = miny + radius
black = mask.copy()
black[:,:]=0
cv2.circle(black, (cy,cx), radius, (255,255,255),-1)
diff = cv2.bitwise_xor(black, mask)
diffPercentage = len(diff>0)/diff.size
print (diffPercentage)
cv2.imshow('mask', mask)
cvw.imshow('diff', diff)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I am new to CV2 and I am looking for some high level guidance for an application. I am working on a program that can detect red and blue beanbags that are in the frame of the camera. I played around with the example code offered by openCV to detect blue colors and modified it slightly to detect red as well.
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
lower_red = np.array([-20, 100, 100])
upper_red = np.array([13, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_red, upper_red)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
This is (slightly modified) copied and pasted code from the OpenCV site. I am looking for the correct way to analyze the res numpy array of the dimensions <460, 640, 3> in order to detect that
I have red/blue objects(s) on my screen
Do something with this information such as print(1 red and 2 blue squares detected)
Image link:
Input, res and mask image of blue beanbag
You will need to obtain two masks, one for red and the other for blue as:
mask_red = cv2.inRange(hsv, lower_red, upper_red)
mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
Now let's define a function which detects if given area in mask if above threshold for taking decision if the bean bags are present, for that purpose we will use cv2.findContours.
def is_object_present(mask, threshold):
im, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Find the largest contour
largest_contour = max(contours, key=lambda x:cv2.contourArea(x))
if cv2.contourArea(largest_contour) > threshold:
return True
return False
Now you call this method on both the masks, to get individual values if red bean bag or blue bean bag is present:
# Adjust this manually as per your needs
bean_bag_area_threshold = 5000
is_red_bean_bag_present = is_object_present(mask_red, bean_bag_area_threshold)
is_blue_bean_bag_present = is_object_present(mask_blue, bean_bag_area_threshold)
if is_red_bean_bag_present and is_blue_bean_bag_present:
print "Both bean bags are present."
I want to track 3 colors, but I have a problem with this statement:
(ti,contours,hierarchy)=cv2.findContours(red,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
ValueError: need more than 2 values to unpack
When I try:
(contours,hierarchy)=cv2.findContours(red,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
There is no error but the variables contours and hierarchy are empty and I get the following error for cv2.imshow("Color Tracking",img):
"error in size img"
I used Python 2.7 and OpenCV 2.4.
My code is :
#importing modules
import cv2
import numpy as np
#capturing video through webcam
webcam = cv2.VideoCapture(0)
while(webcam.isOpened()):
ret, img = webcam.read()
if ret:
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#definig the range of red color
red_lower=np.array([136,87,111],np.uint8)
red_upper=np.array([180,255,255],np.uint8)
#defining the Range of Blue color
blue_lower=np.array([99,115,150],np.uint8)
blue_upper=np.array([110,255,255],np.uint8)
#defining the Range of yellow color
yellow_lower=np.array([22,60,200],np.uint8)
yellow_upper=np.array([60,255,255],np.uint8)
#finding the range of red,blue and yellow color in the image
red=cv2.inRange(hsv, red_lower, red_upper)
blue=cv2.inRange(hsv,blue_lower,blue_upper)
yellow=cv2.inRange(hsv,yellow_lower,yellow_upper)
#Morphological transformation, Dilation
kernal = np.ones((5 ,5), "uint8")
red=cv2.dilate(red, kernal)
res=cv2.bitwise_and(img, img, mask = red)
blue=cv2.dilate(blue,kernal)
res1=cv2.bitwise_and(img, img, mask = blue)
yellow=cv2.dilate(yellow,kernal)
res2=cv2.bitwise_and(img, img, mask = yellow)
#Tracking the Red Color
(ti,contours,hierarchy)=cv2.findContours(red,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE[, contours[, hierarchy[, offset]]])
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area>300):
x,y,w,h = cv2.boundingRect(contour)
img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
cv2.putText(img,"RED color",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255))
#Tracking the Blue Color
(ti,contours,hierarchy)=cv2.findContours(blue,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area>300):
x,y,w,h = cv2.boundingRect(contour)
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(img,"Blue color",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0))
#Tracking the yellow Color
(ti,contours,hierarchy)=cv2.findContours(yellow,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area>300):
x,y,w,h = cv2.boundingRect(contour)
img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(img,"yellow color",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,255,0))
#cv2.imshow("Redcolour",red)
cv2.imshow("Color Tracking",img)
#cv2.imshow("red",res)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
Hi i tried to replicate your error what i have noted is that there is no control in your code that checks if those colors you have set are available or not in a frame. this is the reason why you get error in img size as it did not find any matching color and it has masked an empty array. my advice is try the code with a single frame and single color like this:
import cv2
import numpy as np
img = cv2.imread('frame1.jpg')
#Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
kernal = np.ones((5 ,5), "uint8")
# define range of blue color in HSV
yellow_lower=np.array([22,60,200],np.uint8)
yellow_upper=np.array([60,255,255],np.uint8)
# Threshold the HSV image to get only blue colors
yellow=cv2.inRange(hsv,yellow_lower,yellow_upper)
yellow=cv2.dilate(yellow,kernal)
res2=cv2.bitwise_and(img, img, mask = yellow)
(ti,contours,hierarchy)=cv2.findContours(yellow,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area>300):
x,y,w,h = cv2.boundingRect(contour)
img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(img,"yellow color",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,255,0))
cv2.imshow("Color Tracking",img)
cv2.waitKey(0)