When I try the code about motion capture,I can't run succesfully because of this error.
Traceback (most recent call last):
File "G:\machine learning\CV\Video Capture\motion capture with square.py", line 21, in <module>
x,y,w,h=cv2.boundingRect(thresh)
error: ..\..\..\..\opencv\modules\imgproc\src\contours.cpp:1895: error: (-215) points.checkVector(2) >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S) in function cv::boundingRect
Firtly, I thought maybe the data type may wrong, so I change the type to the 'float32' and 'int32'. But it can't help, so I have no idea.
And here is my code:
import cv2
import numpy as np
camera=cv2.VideoCapture(0)
firstframe=None
while True:
ret,frame = camera.read()
#cv2.imshow("frame", frame)
if not ret:
break
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
gray=cv2.GaussianBlur(gray,(21,21),0)
if firstframe is None:
firstframe=gray
continue
frameDelta = cv2.absdiff(firstframe,gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
#(cnts,_)= cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
x,y,w,h=cv2.boundingRect(thresh)
frame=cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow("frame", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("frame2", frameDelta)
key = cv2.waitKey(1)&0xFF
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
Because cv2.boundingRect() expects a set of points (x, y) coordinates in a special format to calculate the bounding rect, Your input image is not a set of (x, y) points. This method is not meant to be applied directly onto images. You must find contours of the given binary mask, then you can iterate all the contours and call cv2.boundingRect() on the individual contours as:
cnts, hierarchy= cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# Iterate over all the contours.
for contour in cnts:
print cv2.boundingRect(contour)
Related
i found very interesting article about detection of moving objects, here is correspondng link :Detection of moving object
and also corresponding article : Article about object detection
i followed code and try to implement my self, here is corresponding code :
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Background_Image_Creation import get_background
cap =cv2.VideoCapture("video_1.mp4")
#print(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#print(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
save_name = "Result.mp4"
# define codec and create VideoWriter object
out = cv2.VideoWriter(save_name,cv2.VideoWriter_fourcc(*'mp4v'), 10, (frame_width, frame_height))
background_frame =get_background("video_1.mp4")
background = cv2.cvtColor(background_frame, cv2.COLOR_BGR2GRAY)
print(background.shape)
frame_count =0
consecutive_frame=8
#frame_diff_list =[]
while cap.isOpened():
ret,frame =cap.read()
print(ret)
print(frame.shape)
if ret==True:
frame_count+=1
orig_frame =frame.copy()
gray =cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if frame_count % consecutive_frame == 0 or frame_count == 1:
frame_diff_list =[]
frame_diff = cv2.absdiff(gray, background)
ret, thresh = cv2.threshold(frame_diff, 50, 255, cv2.THRESH_BINARY)
dilate_frame = cv2.dilate(thresh, None, iterations=2)
frame_diff_list.append(dilate_frame)
print(frame_diff_list)
if len(frame_diff_list) == consecutive_frame:
# add all the frames in the `frame_diff_list`
sum_frames = sum(frame_diff_list)
print(sum_frames)
# find the contours around the white segmented areas
contours, hierarchy = cv2.findContours(sum_frames, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# draw the contours, not strictly necessary
for i, cnt in enumerate(contours):
cv2.drawContours(frame, contours, i, (0, 0, 255), 3)
for contour in contours:
# continue through the loop if contour area is less than 500...
# ... helps in removing noise detection
if cv2.contourArea(contour) < 500:
continue
# get the xmin, ymin, width, and height coordinates from the contours
(x, y, w, h) = cv2.boundingRect(contour)
# draw the bounding boxes
cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('Detected Objects', orig_frame)
out.write(orig_frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
code for background frame creation is also presented :
import numpy as np
import cv2
import matplotlib.pyplot as plt
def get_background(path):
cap =cv2.VideoCapture(path)
frame_indices =cap.get(cv2.CAP_PROP_FRAME_COUNT)*np.random.uniform(size=50)
frames =[]
for idx in frame_indices:
cap.set(cv2.CAP_PROP_POS_FRAMES,idx)
ret,frame =cap.read()
frames.append(frame)
median_frame = np.median(frames, axis=0).astype(np.uint8)
return median_frame
#median_frame =get_background("video_1.mp4")
#cv2.imshow("Median_Background",median_frame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#plt.show()
code runs fine, but output video does not contain anything, it just 1 KGB size, one thing what i am thinking is that this fragment
frame_diff_list.append(dilate_frame)
is colored with yellow color, here is screenshot :
and also when i try to print print(frame_diff_list)
it just printed one output :
i was more surprised when i have tested
print(ret)
print(frame.shape)
from the begining of the loop and it just printed one output :
True
(360, 640, 3)
it seems that loop does not cover all frames right? could you help me please to figure out what is wrong with my code?
I understood the error I got, I need to change the color space of the variable to gray, then I won't get the error, but I couldn't solve this problem, so I can't go to the next stage in my project. I'm posting the error I got below:
error: OpenCV(4.5.3) C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-sn_xpupm\opencv\modules\imgproc\src\contours.cpp:197: error:
(-210:Unsupported format or combination of formats)
[Start]FindContours supports only CV_8UC1 images when mode != CV_RETR_FLOODFILL otherwise supports CV_32SC1 images only in function 'cvStartFindContours_Impl'
import cv2
import numpy as np
img = cv2.imread("s.jpg")
while True:
# ret, frame = cap.read()
# frame = cv2.flip(frame,1)
# print(frame.shape) # 480,640
ycrbc = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
minYCrCb = np.array([0,140,90],np.uint8)
maxYCrCb = np.array([230,170,120],np.uint8)
imgeYCrCb = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
skinRegionYCrCb = cv2.inRange(imgeYCrCb,minYCrCb,maxYCrCb)
skinYCrCb = cv2.bitwise_and(img, img, mask = skinRegionYCrCb)
median_ycrcb = cv2.medianBlur(skinYCrCb, 3)
_, esik = cv2.threshold(median_ycrcb, 20, 255, cv2.THRESH_BINARY)
median_binary = cv2.medianBlur(esik, 7)
gray = cv2.cvtColor(median_binary, cv2.COLOR_BGR2GRAY)
contours, hierarchy = cv2.findContours(median_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
x,y,w,h = cv2.boundingRect(contours[max_index])
print(x, y, x+w, y+h)
cv2.imshow("ycrbc",ycrbc)
cv2.imshow("skinYCrCb",median_ycrcb)
cv2.imshow("binary goruntu", esik)
cv2.imshow("median_binary", median_binary)
if cv2.waitKey(5) & 0xFF == ord("q"):
break
# cap.release()
cv2.destroyAllWindows()
I worked on this picture:
If you can solve my problem by changing the color space here, I would really appreciate it, I searched a lot, but I couldn't find it or I'm missing something, it would be great if you could help.
Use the below:
contours, hierarchy = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
You have to use a single channel input in findContours function.
Changing your input to "gray", which is single channel, should fix this.
I am trying to extract a hand out of an image. I am using OpenCV 4.0 and Python 3.6.
cnts = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# return None, if no contours detected
if len(cnts) == 0:
return
else:
# based on contour area, get the maximum contour which is the hand
segmented = max(cnts, key=cv2.contourArea)
And it gives me this error:
Traceback (most recent call last):
File "test.py", line 85, in <module>
hand = segment(gray)
File "test.py", line 37, in segment
segmented = max(cnts, key=cv2.contourArea)
TypeError: Expected cv::UMat for argument 'contour'
Since that worked like half a year ago, I am assuming that the error occurs because of some kind of a module change. How can that be fixed?
You didn't pay attention to the outputs of cv2.findContours. For any OpenCV version >= 4.0, it's
contours, hierarchy = cv2.findContours(...)
I made up an example:
import cv2
import numpy as np
# Set up dummy image
image = np.zeros((400, 400), np.uint8)
cv2.circle(image, (150, 250), 100, 255, cv2.FILLED)
# Find contours: OpenCV 4.x
cnts, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 0:
# return None, if no contours detected
segmented = None
else:
# based on contour area, get the maximum contour which is the hand
segmented = max(cnts, key=cv2.contourArea)
print('Number of contour points:', segmented.shape[0])
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
The example image looks like this:
The output:
Number of contour points: 292
Hope that helps!
what i am trying to do in my code below is to create an opencv program with python to open my laptop webcam and the filter the camera so that it will only show my clothes. but i coudnt even run the program because i have encounter an error that seem to be coming from the 10th line of the code. it is definitely not a misspeal error, i double checked it.
the code sample
#color filtering
import cv2
import numpy as np
#use camera
cap = cv2.VideoCapture(1)
while True:
_, frame = cap.read()
`this line seem to be the source-->` hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# hsv hue sat value
# try to get the value of the color that you want
lower_red = np.array([150,150,150])
upper_red = np.array([180,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask = mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('result',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
#release camera
cap.release()
the error
Traceback (most recent call last):
File "D:/Program_Files/Python/legit8.py", line 10, in <module>
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cv2.error: C:\projects\opencv-python\opencv\modules\imgproc\src\color.cpp:10705: error: (-215) (scn == 3 || scn == 4) && (depth == CV_8U || depth == CV_32F) in function cv::cvtColor
Your frame is probably None. This could be because of VideoCapture(1). If your webcam is the only cam connected to your computer, use VideoCapture(0)!
I am trying to implement Convexhull in my python project but I am getting the error
Traceback (most recent call last): File "contourfeaturestest.py", line 22, in <module>
hull2 = cv2.convexHull(cnt)
cv2.error:convhull.cpp:134: error: (-215) total >= 0 && (depth == CV_32F || depth == CV_32S) in function convexHull
I am trying to use convexhull with frames that come from the computer's video camera and I am not sure why my code is producing the error above.
My code is provided below.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
ret1,thresh = cv2.threshold(gray,127,255,0)
contours,hierarchy,ret2= cv2.findContours(thresh, 1, 2)
cnt = contours[0]
hull2 = cv2.convexHull(cnt)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
From what I can guess by your code, it seems that you are using Opencv3.1, under which the return value from cv2.findContours() has been changed to: ret_image, contours, hierarchy as opposed to what you are assuming it to be: contours,hierarchy,ret2 So basically the second value in the returned tuple contains the list of contours.
However, in the previous OpenCV version cv2.findContours() returned only 2 values: contours, hierarchy, so a slight change in the naming conventions would work for you.