I want to do find contours in camera video and Draw the line with Contours
But I got Something Error on my code :|
File ".\Contour.py", line 28, in <module>
cv2.drawContours(frame, contours[i], 1, (0, 0, 255), 3)
cv2.error: OpenCV(4.5.1) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-r2ue8w6k\opencv\modules\imgproc\src\drawing.cpp:2490: error:
(-215:Assertion failed) 0 <= contourIdx && contourIdx < (int)last in function 'cv::drawContours'
What I should do in this situation?
My Full Code
import cv2
import sys
import time
import numpy as np
cap = cv2.VideoCapture(0)
while True:
# if cap.isOpened() == False:
# print("Cant Open The Camera")
# sys.exit(1)
ret, frame = cap.read()
# if ret == False:
# print("Can't load the Camera")
# sys.exit(1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)
contours = cv2.findContours(binary, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
print(len(contours))
if len(contours) > 0:
# cnt = contours[len(contours) - 1]
for i in range(len(contours)):
cv2.drawContours(frame, contours[i], 1, (0, 0, 255), 3)
key = cv2.waitKey(1)
cv2.imshow("frame", frame)
# cv2.imshow("gray", gray)
# cv2.imshow("binary", binary)
time.sleep(0.5)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
My Environment
openCV : '4.5.1'
python : '3.7.0'
Camera : Laptop Built-In Camera
:D
cv2.drawContours is expecting a list and the index in that list you want to plot. In your code, the list has just one item, so 1 is out of range.
You could fix that using the following code:
for i in range(len(contours):
cv2.drawContours(frame, [contours[i]], 0, (0,0,255), 3)
If you want to draw all contours, simply pass all the contours to the function:
cv2.drawContours(frame, contours, -1, (0, 0, 255), 3)
As a side note, since cv2.drawContours overwrites the input image (frame in your code), I would recommend creating a copy before drawing the contours, so you can keep the original image for further processing if needed:
output = frame.copy()
cv2.drawContours(output, contours, -1, (0, 0, 255), 3)
Related
Hi I wrote this code with the objective of detect contours of an object from a live video produced by a camera (in videoCapture I put 0 as input to indicate the camera built-in of computer.
import cv2
import numpy as np
def nothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L-H", "Trackbars", 0, 180, nothing)
cv2.createTrackbar("L-S", "Trackbars", 68, 255, nothing)
cv2.createTrackbar("L-V", "Trackbars", 154, 255, nothing)
cv2.createTrackbar("U-H", "Trackbars", 180, 180, nothing)
cv2.createTrackbar("U-S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U-V", "Trackbars", 243, 255, nothing)
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
l_h = cv2.getTrackbarPos("L-H", "Trackbars")
l_s = cv2.getTrackbarPos("L-S", "Trackbars")
l_v = cv2.getTrackbarPos("L-V", "Trackbars")
u_h = cv2.getTrackbarPos("U-H", "Trackbars")
u_s = cv2.getTrackbarPos("U-S", "Trackbars")
u_v = cv2.getTrackbarPos("U-V", "Trackbars")
lower_red = np.array([l_h, l_s, l_v])
upper_red = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_red, upper_red)
# rilevazione contorni
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print(contours)
for cnt in contours:
cv2.drawContours(frame, [int(cnt)], 0, (0, 0, 0), 5)
cv2.imshow("Frame", frame)
cv2.imshow("Mask", mask)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
when the execution arrives to cv2.findContours I receive this error message
Traceback (most recent call last):
File "c:\Users\gpoli\Desktop (3176)\peachthon\rilevaContorno.py", line 37, in <module>
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ValueError: not enough values to unpack (expected 3, got 2)
findContours has two parameters as output not three- In new version of OpenCV.
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
What are contours?
OpenCV (findContours) Detailed Guide
I'm trying to get my camera to detect both red and green colours.
Now it can detect either one colour successfully, but it can't detect both colours.
ball_color=('red','green')
If I change it to ball_color=('red') or ball_color=('green') it can work, but when i put ball_color=('red','green') there's errors.
I added the error code at the bottom. Thanks alot!
import cv2
import numpy as np
ball_color=('red','green')
color_dist = {'red': {'Lower': np.array([0, 60, 60]), 'Upper': np.array([10, 255, 255])},
'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},
}
cap = cv2.VideoCapture(0)
cv2.namedWindow('camera', cv2.WINDOW_AUTOSIZE)
while cap.isOpened():
ret, frame = cap.read()
if ret:
if frame is not None:
gs_frame = cv2.GaussianBlur(frame, (5, 5), 0) # 高斯模糊
hsv = cv2.cvtColor(gs_frame, cv2.COLOR_BGR2HSV) # 转化成HSV图像
erode_hsv = cv2.erode(hsv, None, iterations=2) # 腐蚀 粗的变细
inRange_hsv = cv2.inRange(erode_hsv, color_dist[ball_color]['Lower'], color_dist[ball_color]['Upper'])
cnts = cv2.findContours(inRange_hsv.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
c = max(cnts, key=cv2.contourArea)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
cv2.drawContours(frame, [np.int0(box)], -1, (0, 255, 255), 2)
cv2.imshow('camera', frame)
cv2.waitKey(1)
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
Error code are as follows:
Traceback (most recent call last):
File "/Users/fangyaoting/PycharmProjects/pythonProject/顏色辨識.py", line 21, in <module>
inRange_hsv = cv2.inRange(erode_hsv, color_dist[ball_color]['Lower'], color_dist[ball_color]['Upper'])
KeyError: ('red', 'green')
I think it could be the soultion:
import cv2
import numpy as np
ball_color=('red','green')
color_dist = {'red': {'Lower': np.array([-10, 100, 100]), 'Upper': np.array([10, 255, 255])},
'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},
}
cap = cv2.VideoCapture(0)
cv2.namedWindow('camera', cv2.WINDOW_AUTOSIZE)
while cap.isOpened():
ret, frame = cap.read()
if ret:
if frame is not None:
gs_frame = cv2.GaussianBlur(frame, (5, 5), 0) # 高斯模糊
hsv = cv2.cvtColor(gs_frame, cv2.COLOR_BGR2HSV) # 转化成HSV图像
erode_hsv = cv2.erode(hsv, None, iterations=2) # 腐蚀 粗的变细
b_shape = hsv.shape[0], hsv.shape[1], 1
inRange_hsv = np.zeros(b_shape, hsv.dtype)
for i in ball_color:
inRange_hsv_tmp = cv2.inRange(erode_hsv, color_dist[i]['Lower'], color_dist[i]['Upper'])
inRange_hsv = cv2.bitwise_or(inRange_hsv, inRange_hsv_tmp)
cnts = cv2.findContours(inRange_hsv.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
cv2.drawContours(frame, [np.int0(box)], -1, (0, 255, 255), 2)
cv2.imshow('camera', frame)
cv2.waitKey(1)
else:
print("無畫面")
else:
print("無法讀取鏡頭!")
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
You tried to pass tuple of strings as argument to cv2.inRange and it could work with tuple length 1, but it won't work with more arguments. So you should iterate them and use cv2.inRange for every color separately and then use cv2.bitwise_or for them to 'concat' binary images.
And about cnts: sometimes it could not find any contours so you should use if statement to control it.
Edit: in this line inRange_hsv = cv2.bitwise_or(inRange_hsv, inRange_hsv_tmp)
I am trying to build a college project Sign Language translator and the code for detecting the gestures is below.
This code to create data set for hand signs, whenever i run this code I get this error:
this is my first time asking a question so idk proper way of asking a question.
This the error i am getting:
Traceback (most recent call last):
File "create_gesture_data.py", line 74, in <module>
hand = segment_hand(gray_frame)
File "create_gesture_data.py", line 40, in segment_hand
hand_segment_max_cont = max(contours, key=cv2.contourArea(cv2.UMat()))
cv2.error: OpenCV(4.4.0) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-zsozjuva\openc
\modules\imgproc\src\shapedescr.cpp:315: error: (-215:Assertion failed) npoints >= 0 &&
(depth==CV_32F||depth == CV_32S) in function 'cv::contourArea'
[ WARN:0] global C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-zsozjuva\opencv\module\video
\src\cap_msmf.cpp (435) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async
callback
import cv2
import numpy as np
background = None
accumulated_weight = 0.5
ROI_top = 100
ROI_bottom = 300
ROI_right = 150
ROI_left = 350
def cal_accum_avg(frame, accumulated_weight):
global background
if background is None:
background = frame.copy().astype("float")
return None
cv2.accumulateWeighted(frame, background, accumulated_weight)
def segment_hand(frame, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), frame)
_ , thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)
# Grab the external contours for the image
image = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hierarchy = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 0:
return None
else:
hand_segment_max_cont = max(contours, key=cv2.contourArea(cv2.UMat()))
return (thresholded, hand_segment_max_cont)
cam = cv2.VideoCapture(0)
num_frames = 0
element = 10
num_imgs_taken = 0
while True:
ret, frame = cam.read()
# filpping the frame to prevent inverted image of captured frame...
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
roi = frame[ROI_top:ROI_bottom, ROI_right:ROI_left]
gray_frame = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray_frame = cv2.GaussianBlur(gray_frame, (9, 9), 0)
if num_frames < 60:
cal_accum_avg(gray_frame, accumulated_weight)
if num_frames <= 59:
cv2.putText(frame_copy, "FETCHING BACKGROUND...PLEASE WAIT", (80, 400),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,0,255), 2)
#cv2.imshow("Sign Detection",frame_copy)
#Time to configure the hand specifically into the ROI...
elif num_frames <= 300:
hand = segment_hand(gray_frame)
cv2.putText(frame_copy, "Adjust hand...Gesture for" + str(element), (200, 400),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# Checking if hand is actually detected by counting number of contours detected...
if hand is not None:
thresholded, hand_segment = hand
# Draw contours around hand segment
cv2.drawContours(frame_copy, [hand_segment + (ROI_right, ROI_top)], -1, (255, 0, 0),1)
cv2.putText(frame_copy, str(num_frames)+"For" + str(element), (70, 45),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# Also display the thresholded image
cv2.imshow("Thresholded Hand Image", thresholded)
else:
# Segmenting the hand region...
hand = segment_hand(gray_frame)
# Checking if we are able to detect the hand...
if hand is not None:
# unpack the thresholded img and the max_contour...
thresholded, hand_segment = hand
# Drawing contours around hand segment
cv2.drawContours(frame_copy, [hand_segment + (ROI_right, ROI_top)], -1, (255, 0, 0),1)
cv2.putText(frame_copy, str(num_frames), (70, 45), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0,0,255), 2)
#cv2.putText(frame_copy, str(num_frames)+"For" + str(element), (70, 45),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
cv2.putText(frame_copy, str(num_imgs_taken) + 'images' +"For" + str(element), (200,400),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# Displaying the thresholded image
cv2.imshow("Thresholded Hand Image", thresholded)
if num_imgs_taken <= 300:
#cv2.imwrite(r"D:\\gesture\\train\\"+str(element)+"\\" + str(num_imgs_taken+300)
+'.jpg', thresholded)
cv2.imwrite(r"D:\\gesture\\x"+"\\" + str(num_imgs_taken) + '.jpg', thresholded)
else:
break
num_imgs_taken +=1
else:
cv2.putText(frame_copy, 'No hand detected...', (200, 400), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0,0,255), 2)
# Drawing ROI on frame copy
cv2.rectangle(frame_copy, (ROI_left, ROI_top), (ROI_right, ROI_bottom), (255,128,0), 3)
cv2.putText(frame_copy, "DataFlair hand sign recognition_ _ _", (10, 20), cv2.FONT_ITALIC, 0.5,
(51,255,51), 1)
# increment the number of frames for tracking
num_frames += 1
# Display the frame with segmented hand
cv2.imshow("Sign Detection", frame_copy)
# Closing windows with Esc key...(any other key with ord can be used too.)
k = cv2.waitKey(30) & 0xFF
if k == 27:
break
# Releasing camera & destroying all the windows...
cv2.destroyAllWindows()
cam.release()
Due to python max doc, it should look as:
hand_segment_max_cont = max(contours, key=cv2.contourArea)
cv2.findContours returns a tuple of list of contours and hierachy, but not the list of contours:
contours,hierachy = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
I have a problem with my code. I don't know where I could put cv.resize() in order to get requirement resolution. I want to change it because the file which I upload is full HD resolution and I want to get smaller resolution. I will be glad of solution and explanation.
Below I show my code:
import cv2
import numpy as np
cap = cv2.VideoCapture('DJI_0037.MP4')
while cap.isOpened():
ret, frame = cap.read()
if ret == True:
frame_resize = cv2.resize(frame, (640, 480), interpolation=cv2.INTER_CUBIC)
else:
break
ret, frame_resize1 = cap.read(frame_resize)
ret, frame_resize2 = cap.read(frame_resize)
diff = cv2.absdiff(frame_resize1, frame_resize2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 2000:
continue
cv2.rectangle(frame_resize1, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame_resize1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),3)
cv2.imshow("feed", frame_resize1)
frame_resize1 = frame_resize2
ret, frame_resize2 = cap.read()
if cv2.waitKey(40) == 27:
break
cap.release()
cv2.destroyAllWindows()
I was following this tutorial and I tried to save the video to an avi file, but every time I tried the file was corrupted. I was able to save the frames individually using cv2.imwrite(), but stitching together the individual frames was a lot of work, and would lag the entire program. Here is my code:
from imutils.video import VideoStream
import imutils
import time
import cv2
MINIMUM_AREA = 500
# For Video Recording Purposes
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('vibralert_test001.avi', fourcc, 20, (640, 480))
vs = VideoStream(src=0).start()
print("Setting up feed.")
time.sleep(2)
print("Live")
firstFrame = None
while True:
frame = vs.read()
text = "No Movement Detected"
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
delta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < MINIMUM_AREA:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Movement Detected"
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("VibrAlert v0.1", frame)
out.write(frame)
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
vs.stop()
out.release()
cv2.destroyAllWindows()
print('End Feed')
The size of the frame images need to be the same as the frameSize given to the VideoWriter, (640,480). The frame is being resized to have a width of 500, while the videoWriter is expecting a width of 640.
However, changing the resize to frame = imutils.resize(frame, width=640) probably won't work either. imutils.resize will return images with the same aspect ratio as the original image, so the height won't necessarily be 480. I would suggest replacing that line with the opencv.resize:
frame = cv2.resize(frame, (640,480))