VideoCapture does not play the video - python

I have a problem with VideoCapture where the video I am streaming shows only the 1st frame. In the following code I superimpose a video on top of the bounding box yielded from object detection:
if view_img:
####video_name is a path to my video
img = cv2.VideoCapture(video_name)
ret_video, frame_video = img.read()
if not ret_video: ######so that the video can be played in a loop
img = cv2.VideoCapture(video_name)
ret_video, frame_video = img.read()
###here I look for the bounding boxes and superimpose the video
hsv = cv2.cvtColor(im0, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 120, 120), (10, 255, 255))#(110, 120, 120), (130, 255, 255))#<- blue # RED: (0, 120, 120), (10, 255, 255))
thresh = cv2.dilate(mask, None, iterations=2)
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#contours = contours[0]
contours = imutils.grab_contours(contours)
#frame_counter = 0
for contour in contours:
if cv2.contourArea(contour) < 750:
continue
(x, y, w, h) = cv2.boundingRect(contour)
height = 480
width = 640
if y + h < height and x + w < width:
logo = cv2.resize(frame_video, (w, h))###frame_video is the frame from the video which I superimpose
img2gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
_, logo_mask = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY)
roi = im0[y:y+h, x:x+w]
roi[np.where(logo_mask)] = 0
roi += logo
cv2.imshow(str(p), im0)# im0 is the webcam frame
cv2.waitKey(25)
What happens when I run this code, is that instead of showing the entire video on top of the webcam frame, it displays only the first frame of that video.
Superimposing video works fine in another script, modified original: source
I believe that the issue has something to do with waitKey() for the superimposed video, as it is not specified.
If I try to initialize the video with while (cap.isopened()): or while (True) then the program freezes and there is no output at all.

cv2.VideoCapture should be run once for each device source. Using while loop should exclude cv2.VideoCapture (initialize it outside the loop). The reason why it's hang on while loop because you open up the connection for the same device multipletimes without closing it.

I didn't test it. You simply do like this: Btwe
if view_img:
####video_name is a path to my video
img = cv2.VideoCapture(video_name)
while img.isOpened():
ret_video, frame_video = img.read()
if not ret_video: ######so that the video can be played in a loop
break
###here I look for the bounding boxes and superimpose the video
hsv = cv2.cvtColor(im0, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 120, 120), (10, 255, 255))#(110, 120, 120), (130, 255, 255))#<- blue # RED: (0, 120, 120), (10, 255, 255))
thresh = cv2.dilate(mask, None, iterations=2)
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#contours = contours[0]
contours = imutils.grab_contours(contours)
#frame_counter = 0
for contour in contours:
if cv2.contourArea(contour) < 750:
continue
(x, y, w, h) = cv2.boundingRect(contour)
height = 480
width = 640
if y + h < height and x + w < width:
logo = cv2.resize(frame_video, (w, h))###frame_video is the frame from the video which I superimpose
img2gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
_, logo_mask = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY)
roi = im0[y:y+h, x:x+w]
roi[np.where(logo_mask)] = 0
roi += logo
cv2.imshow(str(p), im0)# im0 is the webcam frame
cv2.waitKey(25)
Btw, If you are using OpenCV4.5.5. You may have to add this:
ret,contours = cv2.findContours

Related

How to make a mesh within rectangle using drawing functions of the OpenCV?

This is my full code.
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print (width, height)
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
# blue color
blue_lower = np.array([86,0,90], np.uint8)
blue_upper = np.array([163, 64, 145], np.uint8)
blue = cv2.inRange(hsv, blue_lower, blue_upper)
kernal = np.ones((9, 9), "uint8")
blue = cv2.dilate(blue, kernal)
res_blue = cv2.bitwise_and(img, img, mask=blue)
# Tracking blue
(_, contours, hierarchy) = cv2.findContours(blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > 2000):
print (area)
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(img, "Blue Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0))
# cv2.putText(img, "Blue Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0))
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
I would like to mesh all rectangles as these pictures.
you can see the mesh of the contour next to them.
In my case, I would like to mesh rectangles themselves.
This picture is taken from this video

detect a single shape and colour in realtime using opencv

I'm working on a project that requires me to detect a red rectangle in real time. so far I've managed to get the colour and shape detected together but it can't differentiate between other objects that are red.
How might I go about doing this?
import cv2
import numpy as np
def nothing():
pass
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_COMPLEX
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([175, 50, 20])
high_red = np.array([180, 255, 255])
mask1 = cv2.inRange(hsv, low_red, high_red)
kernel = np.ones((5, 5), np.uint8)
mask2 = cv2.erode(mask1, kernel)
red = cv2.bitwise_and(frame, frame, mask=mask2)
contours, _ = cv2.findContours(mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
approx = cv2.approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
hull = cv2.convexHull(cnt)
x = approx.ravel()[0]
y = approx.ravel()[1]
if area > 400:
cv2.drawContours(frame, [approx], 0, (0, 0, 0), 5)
if len(approx) == 4:
cv2.putText(frame, "Rectangle", (x, y), font, 1, (0, 0, 0))
edges = cv2.Canny(frame, 100, 200)
_, threshold_binary = cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY)
_, threshold_binary_inv = cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY_INV)
_, threshold_trunc = cv2.threshold(frame, 128, 255, cv2.THRESH_TRUNC)
_, threshold_to_zero = cv2.threshold(frame, 12, 255, cv2.THRESH_TOZERO)
cv2.imshow("Frame", frame)
cv2.imshow('edges', edges)
cv2.imshow('red', red)
cv2.imshow("mask", mask1)
key = cv2.waitKey(1)
if key == 27:
cap.release()
cv2.destroyAllWindows()
break
Output image:

OpenCV Background Subtraction Get Color Objects (Python)

Background subtraction method (BackgroundSubtractorMOG2) will normally return the output in binary image.
Is there a solution on how I can get the original colour of the object after implementing the BackgroundSubtractorMOG2 ?
import cv2
import numpy as np
cap = cv2.VideoCapture("people.mkv")
kernel_dil = np.ones((10,10), np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
fgbg = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=444, detectShadows=False)
while True:
ret, frame1 = cap.read()
frame = cv2.resize(frame1,(1364,700),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)
mask = np.zeros(frame.shape, dtype=np.uint8)
mask.fill(255)
roi_corners = np.array([[(11,652), (1353,652), (940,84), (424,84)]], dtype=np.int32)
cv2.fillPoly(mask, roi_corners, 0)
masking = cv2.bitwise_or(frame, mask)
if ret == True:
fgmask = fgbg.apply(masking,mask)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
dilation = cv2.dilate(fgmask2, kernel_dil, iterations = 1)
(contours,hierarchy) = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
x,y,w,h = cv2.boundingRect(contour)
if(area>0.001):
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 2)
cv2.putText(frame, 'People', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2, cv2.LINE_AA)
cv2.imshow("FullScreen", frame)
cv2.imshow("FGMask1", fgmask)
cv2.imshow("FGMask2", dilation)
key = cv2.waitKey(12)
if key == ord("q"):
break
If you consider the output as binary mask, you could just do a bitwise and with your original image
cv.bitwise_and(input, input, mask = yourMOG_output);

Python cv2 VideoWriter File getting corrupted

I was following this tutorial and I tried to save the video to an avi file, but every time I tried the file was corrupted. I was able to save the frames individually using cv2.imwrite(), but stitching together the individual frames was a lot of work, and would lag the entire program. Here is my code:
from imutils.video import VideoStream
import imutils
import time
import cv2
MINIMUM_AREA = 500
# For Video Recording Purposes
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('vibralert_test001.avi', fourcc, 20, (640, 480))
vs = VideoStream(src=0).start()
print("Setting up feed.")
time.sleep(2)
print("Live")
firstFrame = None
while True:
frame = vs.read()
text = "No Movement Detected"
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
delta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < MINIMUM_AREA:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Movement Detected"
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("VibrAlert v0.1", frame)
out.write(frame)
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
vs.stop()
out.release()
cv2.destroyAllWindows()
print('End Feed')
The size of the frame images need to be the same as the frameSize given to the VideoWriter, (640,480). The frame is being resized to have a width of 500, while the videoWriter is expecting a width of 640.
However, changing the resize to frame = imutils.resize(frame, width=640) probably won't work either. imutils.resize will return images with the same aspect ratio as the original image, so the height won't necessarily be 480. I would suggest replacing that line with the opencv.resize:
frame = cv2.resize(frame, (640,480))

To detect shapes in a irregular image

Trying to find the circles and rectangle (or square) in an irregular object using contours,edge detection but not getting the output properly.
I tried changing values of canny values and epsilon(contour approx) but was not able to detect,
Another difficulty iam facing is lot hand written character are there in the metal object so my code is detecting that also as a shape
Can anyone please help me on detecting this required shape on this object using opencv-python.
Metal object
import imutils
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('part1.jpg')
#image = cv2.imread('C:\Python27\plates\plates2.1.jpg')#$episolon==0.04,len=5,6
#image = cv2.imread('C:\Python27\plates\plates4.jpg')
#image = cv2.imread('C:\Python27\plates\plates1.jpg')
#image = cv2.imread('C:\Python27\plates\plates3.jpg')#episilon=0.0370,len=5
#image = cv2.imread('C:\Python27\plates\plates5.jpg') #change the episilon to 0.01
#image = cv2.imread('C:\Python27\plates\plates6.jpg')#not working properly
cv2.namedWindow('Image')
#for angle in xrange(0, 360, 90):
# rotate the image and display it
#image = imutils.rotate(image, angle=angle)
#gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#edges=cv2.Canny(image,200,650)#plates3.jpg,plates1.jpg,plates5.jpg,
#edges=cv2.Canny(image,200,500)#plates4.jpg
#edges=cv2.Canny(image,200,589)#plates2.1.jpg
#edges=cv2.Canny(image,100,450)
edges=cv2.Canny(image,300,589)
kernel = np.ones((5,5),np.uint8)
#thresh = cv2.erode(edges,kernel,iterations = 1)
#thresh = cv2.dilate(edges,kernel,iterations = 1)
#thresh = cv2.morphologyEx(edges, cv2.MORPH_OPEN, kernel)
thresh = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
print len(cnts)
for c in cnts:
shape = "unidentified"
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.0373* peri, True)
if len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
#ar = w / float(h)
#shape = "slots" if ar >= 0.95 and ar <= 1.05 else "slots"
shape="slots"
#cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image,[box],0,(0,0,255),2)
#cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 4)
elif len(approx)==2:
shape="nothing"
(x,y),radius = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
radius = int(radius)
#cv2.circle(image,center,radius,(0,255,0),2)
#cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 4)
elif len(approx)==5:
shape="nothing"
elif len(approx)==3:
shape="nothing"
elif len(approx)==6:
shape="nothing"
else:
shape = "c"+str(len(approx))
(x,y),radius = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(image,center,radius,(0,255,0),2)
cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 2)
cv2.imshow("Image",image)
cv2.imshow("edges", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
Use binarization. You will get blobs that you can discriminate by size, location and other geometric criteria.

Categories

Resources