Unsupported format or combination of formats) ,FindContours - python

I want to count the cars by using haar cascade.
#import libraries of python opencv
import numpy as np
import cv2
import gc
import uuid
import datetime
import time
import csv
cap = cv2.VideoCapture('v3.mp4')
car_cascade = cv2.CascadeClassifier('cars.xml')
W = cap.get(3)
H = cap.get(4)
areaTH = 700
H1 = (H/2)+10
W1 = W/2
mx = 0
my = 30
while (cap.isOpened()):
#capture frame by frame
ret, frame = cap.read()
#convert video into gray scale of each frames
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#detect cars in the video
cars = car_cascade.detectMultiScale(gray, 1.1, 3)
#to draw arectangle in each cars
for (x,y,w,h) in cars:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
#display the resulting frame
cv2.imshow('video', frame)
#press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
Line1 = np.array([[20,H1],[310,H1]], np.int32).reshape((-1,1,2))
frame = cv2.polylines(frame,[Line1],False,(0,0,255),thickness=5)
fram, contours0, hierarchy =
cv2.findContours(frame,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours0:
# cv2.drawContours(frame, cnt, -1, (0,255,0), 2, 8)
area = cv2.contourArea(cnt)
#print ('Area : '+str(area))
if area > areaTH:
#################
# TRACKING #
#################
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
x,y,w,h = cv2.boundingRect(cnt)
cv2.circle(frame,(cx,cy), 3, (255,0,0), -1)
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
#print ('H1 : '+str(H1))
print('cy : '+str(cy))
if (cy >= 147) and (cy<= 155):
Vehicles = Vehicles + 1
Line1 = np.array([[200,H1],[880,H1]],
np.int32).reshape((-1,1,2))
frame = cv2.polylines(frame,[Line1],False,(255,0,0),thickness=5)
cv2.imshow('Frame',frame)
#Abort and exit with 'Q' or ESC
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release() #release video file
cv2.destroyAllWindows()
I convert the image properly to gray-scale and successfully drawn a line on the frame the problem is that I am getting this error
error: OpenCV(3.4.2) C:\build\3_4_winpack-bindings-win32-vc14-static\opencv\modules\imgproc\src\contours.cpp:199: error: (-210:Unsupported format or combination of formats) [Start]FindContours supports only CV_8UC1 images when mode != CV_RETR_FLOODFILL otherwise supports CV_32SC1 images only in function 'cvStartFindContours_Impl'
when ever i pass a variable 'frame' in this code line can somebody help me to solve this error thanx
fram, contours0, hierarchy =
cv2.findContours(frame,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)

If you look at OpenCV documentation you see that function cv2.findContour() requires monochrome images. You pass frame which is still in 3 channels. You should use the gray variable instead (cv2.findContours(gray,...).

From the documentation for cv2.findContour()
image – Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. Zero pixels remain 0’s, so the image is treated as binary . You can use compare() , inRange() , threshold() , adaptiveThreshold() , Canny() , and others to create a binary image out of a grayscale or color one. The function modifies the image while extracting the contours. If mode equals to CV_RETR_CCOMP or CV_RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).
Try this line to scale the pixels
image = cv2.convertScaleAbs(image)

Related

detect moving object with opencv and python

i found very interesting article about detection of moving objects, here is correspondng link :Detection of moving object
and also corresponding article : Article about object detection
i followed code and try to implement my self, here is corresponding code :
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Background_Image_Creation import get_background
cap =cv2.VideoCapture("video_1.mp4")
#print(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#print(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
save_name = "Result.mp4"
# define codec and create VideoWriter object
out = cv2.VideoWriter(save_name,cv2.VideoWriter_fourcc(*'mp4v'), 10, (frame_width, frame_height))
background_frame =get_background("video_1.mp4")
background = cv2.cvtColor(background_frame, cv2.COLOR_BGR2GRAY)
print(background.shape)
frame_count =0
consecutive_frame=8
#frame_diff_list =[]
while cap.isOpened():
ret,frame =cap.read()
print(ret)
print(frame.shape)
if ret==True:
frame_count+=1
orig_frame =frame.copy()
gray =cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if frame_count % consecutive_frame == 0 or frame_count == 1:
frame_diff_list =[]
frame_diff = cv2.absdiff(gray, background)
ret, thresh = cv2.threshold(frame_diff, 50, 255, cv2.THRESH_BINARY)
dilate_frame = cv2.dilate(thresh, None, iterations=2)
frame_diff_list.append(dilate_frame)
print(frame_diff_list)
if len(frame_diff_list) == consecutive_frame:
# add all the frames in the `frame_diff_list`
sum_frames = sum(frame_diff_list)
print(sum_frames)
# find the contours around the white segmented areas
contours, hierarchy = cv2.findContours(sum_frames, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# draw the contours, not strictly necessary
for i, cnt in enumerate(contours):
cv2.drawContours(frame, contours, i, (0, 0, 255), 3)
for contour in contours:
# continue through the loop if contour area is less than 500...
# ... helps in removing noise detection
if cv2.contourArea(contour) < 500:
continue
# get the xmin, ymin, width, and height coordinates from the contours
(x, y, w, h) = cv2.boundingRect(contour)
# draw the bounding boxes
cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('Detected Objects', orig_frame)
out.write(orig_frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
code for background frame creation is also presented :
import numpy as np
import cv2
import matplotlib.pyplot as plt
def get_background(path):
cap =cv2.VideoCapture(path)
frame_indices =cap.get(cv2.CAP_PROP_FRAME_COUNT)*np.random.uniform(size=50)
frames =[]
for idx in frame_indices:
cap.set(cv2.CAP_PROP_POS_FRAMES,idx)
ret,frame =cap.read()
frames.append(frame)
median_frame = np.median(frames, axis=0).astype(np.uint8)
return median_frame
#median_frame =get_background("video_1.mp4")
#cv2.imshow("Median_Background",median_frame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#plt.show()
code runs fine, but output video does not contain anything, it just 1 KGB size, one thing what i am thinking is that this fragment
frame_diff_list.append(dilate_frame)
is colored with yellow color, here is screenshot :
and also when i try to print print(frame_diff_list)
it just printed one output :
i was more surprised when i have tested
print(ret)
print(frame.shape)
from the begining of the loop and it just printed one output :
True
(360, 640, 3)
it seems that loop does not cover all frames right? could you help me please to figure out what is wrong with my code?

img data type = 17 is not supported

Ive got a video which ive im trying to show a specific region using cv2.polyfill and bitwise operation. When I do this on a image it works fine but when done on a video it comes up with the following error. Ive had no problems doing this earlier on with another picture/video. The region to be shown does show up as a frozen picture but also crashes the kernel. The code is:
import cv2
import numpy as np
cap = cv2.VideoCapture("heartvideo.wmv",0)
def roi(frame):
mask = np.zeros_like (frame)
array = np.array([[148,550],[300,650],[400,680],[800,680],[880,560],[555,70],[492,50]])
contours = np.array([[50,50], [50,150], [150,150], [150,50]])
cv2.fillPoly(mask, pts = [array], color =(255))
masked = cv2.bitwise_and(mask,frame)
return mask
while(cap.isOpened()): # while video is initialised
ret, frame = cap.read() #reads the video bit by bit
adj = roi(frame)
if ret:
cv2.imshow("Image", adj)
else:
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
if cv2.waitKey(15) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
error: OpenCV(4.5.3) :-1: error: (-5:Bad argument) in function 'fillPoly'
Overload resolution failed:
img data type = 17 is not supported
Expected Ptr<cv::UMat> for argument 'img'
The issue is that you're using a 3-channel BGR mask (datatype 17 is a 3-channel image). You used np.zeros_like(frame) to set your mask which means that it'll have the exact same dimensions as the image you passed in. If you meant for it to be a 1-channel image you should set the dimensions.
I'm not sure what version of OpenCV you're using and I can't replicate the error with OpenCV 4.4. On this version it allows a 3-channel image even if you've specified a 1-channel color as the fillPoly argument though it does this by assuming you meant (255,0,0) for the color. It could be that on a different version of OpenCV the color dimensions had to match the image dimensions and it's complaining about that.
Try out this revised version of the code and see if it works.
import cv2
import numpy as np
def roi(frame):
# draw a polygon on mask
height,width = frame.shape[:2];
mask = np.zeros((height, width), np.uint8);
array = np.array([[148,550],[300,650],[400,680],[800,680],[880,560],[555,70],[492,50]])
contours = np.array([[50,50], [50,150], [150,150], [150,50]])
cv2.fillPoly(mask, pts = [array], color =(255))
# mask stuff on frame
# masked = cv2.bitwise_and(mask,frame)
copy = np.zeros_like(frame);
copy[mask == 255] = frame[mask == 255];
return copy;
# open video
cap = cv2.VideoCapture("heartvideo.wmv", 0);
while(cap.isOpened()): # while video is initialised
ret, frame = cap.read() #reads the video bit by bit
if ret:
adj = roi(frame)
cv2.imshow("Image", adj)
else:
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
if cv2.waitKey(15) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Object tracking program does not show tracked points in the output image

I need to track an object in a video frame but to do so I have to initialize good features to track my object.
So, I have a .mp4 file, I retained the blue channel of the first frame and I obtained the first frame. I then went on to create my binary mask by extracting my region of interest from the first frame and it was specified that my ROI lies within [300,400] on the y-axis and [460,550] x-axis (which is the front side of the bus).
I then initialized 10 corner points by using cv2.goodFeaturesToTrack having a quality level of 0.01 and a minimum allowable distance of 10 pixels between corner points. I then tried to display these points on the RGB image however I am not getting any points. The reason as to why I do not know.
This is shown here:
import numpy as np
import cv2
import matplotlib.pyplot as plt
vid = cv2.VideoCapture('Bus.mp4')
ret, frame = vid.read()
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
blue_ch = frame[:,:,2]
mask = blue_ch[300:400,460:550]
prev_pts = cv2.goodFeaturesToTrack(image = mask,maxCorners=10,qualityLevel=0.01,minDistance=10,blockSize=10)
blue_ch = np.array(blue_ch)
for i in prev_pts:
x,y = i.ravel()
cv2.circle(blue_ch,(x,y),3,255,-1)
plt.imshow(blue_ch)
It creates small yellow dots in top left corner. They are similar to background color so you may not see them.
When I draw on RGB frame then I get red dots which you can see on image
If I convert blue_ch to RBG before drawing
blue_ch = cv2.cvtColor(blue_ch, cv2.COLOR_GRAY2RGB)
then I see
You get mask = blue_ch[300:400, 460:550] so when I add
y += 300
x += 460
then I get dots in correct place
My code:
import numpy as np
import cv2
import matplotlib.pyplot as plt
vid = cv2.VideoCapture('Bus.mp4')
ret, frame = vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
blue_ch = frame[:, :, 2]
mask = blue_ch[300:400, 460:550]
prev_pts = cv2.goodFeaturesToTrack(image=mask, maxCorners=10, qualityLevel=0.01, minDistance=10, blockSize=10)
blue_ch = cv2.cvtColor(blue_ch, cv2.COLOR_GRAY2RGB)
#blue_ch = np.array(blue_ch)
for i in prev_pts:
x, y = i.ravel()
y += 300
x += 460
#print(x, y)
cv2.circle(frame, (int(x), int(y)), 3, 255, -1)
cv2.circle(blue_ch, (int(x), int(y)), 3, 255, -1)
# --- display with matplot ---
plt.imshow(frame)
plt.show()
plt.imshow(blue_ch)
plt.show()
# -- OR display with `cv2` ---
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
blue_ch = cv2.cvtColor(blue_ch, cv2.COLOR_BGR2RGB)
cv2.imshow('frame', frame)
cv2.imshow('blue_ch', blue_ch)
print("Press ESC to exit")
try:
while cv2.waitKey(1) != 27: # ESC
pass
except:
cv2.destroyAllWindows()

How to insert a smaller image in an opencv Videocapture frame?

So, I have this code, I can easily add a text or any shape in opencv frame.
But inserting an image on a frame is a very difficult one.
I want to insert a smaller image on a Videocapture frame, let say the image to be inserted is a 50x50 pixel.
Any idea on this?
import cv2
webcam = cv2.VideoCapture(0)
insertImage = "sample.jpg" # size 50x50
while True:
rval = False
while(not rval):
(rval, frame) = webcam.read()
if(not rval):
print("Failed to open webcam. Trying again...")
cv2.putText(frame, " image here ", (0,70),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,0,0), 2)
cv2.imshow('with image', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
webcam.release()
cv2.destroyAllWindows()
OpenCv images are numpy arrays. As far as I know, OpenCv does not provide functions with which you can get what you want. The result can be achieved by manipulating arrays.
import cv2
import numpy as np
if __name__ == '__main__':
bigImage = cv2.imread("image1.png") #I don't have a webcam connected, so it's your frame
smallImage = cv2.imread("image2.png") #50x50 for you
height, width, channels = smallImage.shape
offset = np.array((40, 50)) #top-left point from which to insert the smallest image. height first, from the top of the window
bigImage[offset[0]:offset[0] + height, offset[1]:offset[1] + width] = smallImage
cv2.imshow("test", bigImage)
cv2.waitKey(0)
Input
bigImage = big gray rectangle. It is your frame
smallImage = small green rectangle. It is your 50x50 image
Output

How to modify this python opencv program to get xy coordinates from thresholded image

I have an opencv program to track a blue object here
Can someone modify it to find center of the object tracked from webcam after thresholding it to a binary image
PS: I wan this programto work with a webcam not an image
import numpy as np
import cv2
cap = cv2.VideoCapture(1)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
mask = cv2.inRange(gray, lower_blue, upper_blue)
res = cv2.bitwise_and(frame,frame, mask= mask)
# Display the resulting frame
cv2.imshow('frame',gray)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
I would like to find the center ie xy coordinates of the white blob from threshold image from webcam
I have code that i used for finding xy cordinates of a threshold image
I want to modify it for live video from webcam
HERE is the code to find co-ordinates of a binary image
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixels[x, y] == 0:
xlist.append(x)
ylist.append(y)
xleft = min(xlist)
xright = max(xlist)
ytop = min(ylist)
ybot = max(ylist)
xax = (xleft+xright)/2
yax = (ytop+ybot)/2
can someone combian this 2 codes to make it work for live feed from webcam
you want to findContours after the bitwise_and, then get the boundingRect (or the moments) for the contour to get the x,y location.
and forget about your "find co-ordinates of a binary image" idea. (no, you don't want to reinvent 'connected components'. use the builtin stuff instead)

Categories

Resources