Reading a video file VideoCapture - python

Why is it if i read a video file using cv2.VideoCapture('video.avi') the fps is very very very low? I want the fps to be the same as when i play the video using a video player(30fps). What changes should i make to achieve this? Btw, i am using a raspberry pi 3 with python.
import cv2
import numpy as np
cap = cv2.VideoCapture('Fchecking.avi')
kernel = np.ones((5,5), np.uint8)
while(1):
# Take each frame
ret, frame = cap.read()
img2gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,mask = cv2.threshold(img2gray,140,255,cv2.THRESH_BINARY)
w3w = cv2.adaptiveThreshold(mask,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,47,2)
cv2.imshow("mask", mask)
mask_inv = cv2.bitwise_not(w3w)
img2_fg = cv2.bitwise_and(frame, frame, mask=mask_inv)
hsv = cv2.cvtColor(img2_fg, cv2.COLOR_BGR2HSV)
lower_red= np.array([0,58,130])
upper_red = np.array([255,255,255])
erosion = cv2.erode(mask,kernel,iterations = 3)
rmask = cv2.inRange(hsv, lower_red, upper_red)
mask2 = cv2.morphologyEx(rmask, cv2.MORPH_CLOSE, kernel)
final = cv2.bitwise_and(frame, frame, mask=mask2)
cv2.imshow('final',final)
cv2.imshow('original',frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()

Why is it if i read a video file using cv2.VideoCapture('video.avi') the fps is very very very low?
The answer to this question is the massive chunk of code that you have between reading the frame and showing it to the screen. When you watch the video in a standard video player it doesnt do this processing.
img2gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,mask = cv2.threshold(img2gray,140,255,cv2.THRESH_BINARY)
w3w = cv2.adaptiveThreshold(mask,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,47,2)
cv2.imshow("mask", mask)
mask_inv = cv2.bitwise_not(w3w)
img2_fg = cv2.bitwise_and(frame, frame, mask=mask_inv)
hsv = cv2.cvtColor(img2_fg, cv2.COLOR_BGR2HSV)
lower_red= np.array([0,58,130])
upper_red = np.array([255,255,255])
erosion = cv2.erode(mask,kernel,iterations = 3)
rmask = cv2.inRange(hsv, lower_red, upper_red)
mask2 = cv2.morphologyEx(rmask, cv2.MORPH_CLOSE, kernel)
final = cv2.bitwise_and(frame, frame, mask=mask2)
What changes should i make to achieve this?
It's likely that:
If you reduce the processing it will speed up
If you increase the processing power it will speed up

Related

Detect if a object is big enough open cv

I need some help here plz :)
I have this piece of code with open cv:
import cv2
import numpy as np
import time
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
low_red = np.array([100, 100, 100])
high_red = np.array([225, 225, 225])
mask = cv2.inRange(frame, low_red, high_red)
hasColor = np.sum(mask)
if hasColor > 1000000:
print(f'Hand opened, infos : {hasColor}')
elif hasColor > 500000 and hasColor < 1000000:
print(f'Hand closed, infos : {hasColor}')
cv2.imshow("Camera", frame)
cv2.imshow("Mask", mask)
if cv2.waitKey(1) & 0xFF == ord('e'):
break
And I would like that it detect only the objects of a certain size (like a hand) and not the smaller ones.
Thanks for help :)
EDIT: Made some good progress but still don't know how to get the size
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
low = np.array([100, 100, 100])
high = np.array([225, 225, 225])
mask = cv2.inRange(frame, low, high)
cv2.imshow("Camera", frame)
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
ret, tresh = cv2.threshold(gray, 127, 255, 0)
contours, hierarchy = cv2.findContours(tresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
x,y,w,h = cv2.boundingRect(contours[i])
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow("Mask", mask)
cv2.imshow("Hull", frame)
if cv2.waitKey(1) & 0xFF == ord('e'):
break
So I think I have to get the value of the bounding box but idk how
Problem solved thanks to a kind human,
I just had to add the following code in the for loop:
if h > 150 and w > 150:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
I just had to check for the size, thanks for your help !

How to give user inputs to detect a specific color from video in opencv?

I have written a code in python to detect a red color in OpenCV. I am getting proper output,but I want to give user input in the code to detect a specific color. Eg: If I will give blue as a user input, it should show only blue on the output. I also want to add few attributes as a output, such as time of that object get detected and location of the live video. I am new to python and opencv, it would be great if I will get some guidance.
My existing code is as below:
import cv2
import numpy as np
# Capture the input frame from webcam
def get_frame(cap, scaling_factor):
# Capture the frame from video capture object
ret, frame = cap.read()
# Resize the input frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
if __name__=='__main__':
cap = cv2.VideoCapture(0)
scaling_factor = 0.5
# Iterate until the user presses ESC key
while True:
frame = get_frame(cap, scaling_factor)
# Convert the HSV colorspace
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Define 'blue' range in HSV colorspace
lower = np.array([60,100,100])
upper = np.array([180,255,255])
# Threshold the HSV image to get only blue color
mask = cv2.inRange(hsv, lower, upper)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
res = cv2.medianBlur(res, 5)
cv2.imshow('Original image', frame)
cv2.imshow('Color Detector', res)
# Check if the user pressed ESC key
c = cv2.waitKey(5)
if c == 27:
break
cv2.destroyAllWindows()
Your code seems to work correctly. Looking at the equations about converting from RGB to HSV bearing in mind that boundaries in OpenCV are between 0-180 for H channel and 0-255 for S and V channels, we can generalize the code to work for all colors.
import cv2
import numpy as np
# Capture the input frame from webcam
def get_frame(cap, scaling_factor):
# Capture the frame from video capture object
ret, frame = cap.read()
# Resize the input frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
# Define the color range in HSV colorspace
lower1 = np.array([0,100,100])
upper1 = np.array([60,255,255])
lower2 = np.array([120,100,100])
upper2 = np.array([180,255,255])
def color_range(degree):
# Degree is between 0-360
# OpenCV uses 0-180 range for H channel
degree = int(degree/2);
global lower1, upper1, lower2, upper2
if degree < 60:
lower1 = np.array([int(0),100,100])
upper1 = np.array([degree+60,255,255])
lower2 = np.array([degree+120,100,100])
upper2 = np.array([int(180),255,255])
elif degree > 120:
lower1 = np.array([degree-60,100,100])
upper1 = np.array([int(180),255,255])
lower2 = np.array([int(0),100,100])
upper2 = np.array([degree-120,255,255])
else:
lower1 = np.array([degree-60,100,100])
upper1 = np.array([degree+60,255,255])
# ineffective variables
lower2 = np.array([181,100,100])
upper2 = np.array([181,255,255])
if __name__=='__main__':
cap = cv2.VideoCapture(0)
scaling_factor = 0.5
# create trackbar for color change
winname = 'Color Detector'
cv2.namedWindow(winname)
cv2.createTrackbar('Color', winname, 0, 360, color_range)
# Iterate until the user presses ESC key
while True:
frame = get_frame(cap, scaling_factor)
# Convert the HSV colorspace
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Threshold the HSV image to get only desired color
mask1 = cv2.inRange(hsv, lower1, upper1)
mask2 = cv2.inRange(hsv, lower2, upper2)
mask = cv2.bitwise_or(mask1, mask2)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('Original image', frame)
cv2.imshow(winname, res)
# Check if the user pressed ESC key
c = cv2.waitKey(1) & 0xFF
if c == 27:
break
cv2.destroyAllWindows()
cap.release()

Optimised way to generate summary of a video?

The code which I have written is able to create the summary of the video by skippig the frames with no motion. But it is taking more than 10 times the duration of the video to save the output video. So anyone can please help me with making some changes in the code. It is not the issue with system configuration. I have tried it even in a i7 GPU system.
import cv2
import imutils
vs = cv2.VideoCapture("example_01.mp4")
fgbg = cv2.createBackgroundSubtractorMOG2()
pathOut = "output.mp4"
frame_array = []
while True:
ret,frame = vs.read()
forig = frame.copy()
height,width,layers = frame.shape
size = (width,height)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
mask = fgbg.apply(gray)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(),
cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cv2.imshow('mask',mask)
for c in cnts:
area = cv2.contourArea(c)
if area > 2000:
frame_array.append(frame)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'MPEG'), 20,
size)
for i in range(len(frame_array)):
out.write(frame_array[i])
out.release()
cv2.imshow("Frame", frame)
key = cv2.waitKey(25)
if key == ord("q"):
break
vs.release()
cv2.destroyAllWindows()
I took 18seconds movie and your code on my old computer worked many minutes and it was slowing down so finally I killed it and didn't get output movie.
This code needs ~57 seconds to do the same. If I don't display windows then it needs 39 seconds.
I open out only once. I don't append frame to frame_array but write this one frame directly to file.
import cv2
import imutils
import time
vs = cv2.VideoCapture("Wideo/1-sierpinski-carpet-turtle.mp4")
fgbg = cv2.createBackgroundSubtractorMOG2()
pathOut = "output.mp4"
out = None
start = time.time()
while True:
ret, frame = vs.read()
if frame is None:
break
forig = frame.copy()
height, width, layers = frame.shape
size = (width, height)
if not out:
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'MPEG'), 20, size)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
mask = fgbg.apply(gray)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cv2.imshow('mask',mask)
for c in cnts:
area = cv2.contourArea(c)
if area > 2000:
out.write(frame)
break # don't check other areas
cv2.imshow("Frame", frame)
key = cv2.waitKey(25)
if key == ord("q"):
break
end = time.time()
print("time:", end-start)
out.release()
vs.release()
cv2.destroyAllWindows()
Your code opens file again and again and write all frames again and again - so it was slowing down on my computer. You could eventually write it only once after loop.
import cv2
import imutils
import time
vs = cv2.VideoCapture("Wideo/1-sierpinski-carpet-turtle.mp4")
fgbg = cv2.createBackgroundSubtractorMOG2()
pathOut = "output.mp4"
frame_array = []
start = time.time()
while True:
ret, frame = vs.read()
if frame is None:
break
forig = frame.copy()
height, width, layers = frame.shape
size = (width, height)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
mask = fgbg.apply(gray)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cv2.imshow('mask',mask)
for c in cnts:
area = cv2.contourArea(c)
if area > 2000:
frame_array.append(frame)
break
cv2.imshow("Frame", frame)
key = cv2.waitKey(25)
if key == ord("q"):
break
# --- after loop ---
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'MPEG'), 20, size)
for frame in frame_array:
out.write(frame)
out.release()
end = time.time()
print("time:", end-start)
vs.release()
cv2.destroyAllWindows()

Using IP wecam for live streaming in OpenCV

When I run this program it uses front camera of my Android phone. But I want to have video processing from back camera. How should i do it?
import cv2
import numpy as np
import urllib
url = 'http://192.168.1.183:8080/shot.jpg'
while True:
imgResp = urllib.urlopen(url)
img = np.array(bytearray(imgResp.read()), dtype=np.uint8)
img1 = cv2.imdecode(img, -1)
hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(img1, img1, mask=mask)
cv2.imshow('img',res)
cv2.waitKey(10)
import cv2
import numpy as np
import urllib
url = 'http://192.168.1.183:8080/video'
cap = cv2.VideoCapture(url)
while True:
_, img1 = cap.read()
hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(img1, img1, mask=mask)
cv2.imshow('img',res)
cv2.waitKey(10)
I had passed the url into VideoCapture(). Then read the frames. It worked.
Added Code:
url = 'http://192.168.1.183:8080/video'
cap = cv2.VideoCapture(url)
(Inside while loop)
_, img1 = cap.read()

How to Save the trackbar values after closing the python code using Opencv?

I am using opencv to detect the color of objects by using HSV trackbars values and I want my python code to save the latest changes I would make to the trackbars in opencv , when I start the code again, the trackbars will have the last values? below is my code
import numpy as np
import cv2
# open the camera
cap = cv2.VideoCapture(0)
def nothing(x):
pass
cv2.namedWindow('result')
# Starting with 100's to prevent error while masking
h,s,v = 100,100,100
# Creating track bar
cv2.createTrackbar('h', 'result',0,179,nothing)
cv2.createTrackbar('s', 'result',0,255,nothing)
cv2.createTrackbar('v', 'result',0,255,nothing)
while True:
#read the image from the camera
ret, frame = cap.read()
#You will need this later
frame = cv2.cvtColor(frame, 35)
#converting to HSV
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# get info from track bar and appy to result
h = cv2.getTrackbarPos('h','result')
s = cv2.getTrackbarPos('s','result')
v = cv2.getTrackbarPos('v','result')
# Normal masking algorithm
lower_blue = np.array([h,s,v])
upper_blue = np.array([180,255,255])
mask = cv2.inRange(hsv,lower_blue, upper_blue)
result = cv2.bitwise_and(frame,frame,mask = mask)
cv2.imshow('result',result)
#find center
cnts=cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
center=None
if len(cnts)>0:
c=max(cnts, key=cv2.contourArea)
((x,y),radius)=cv2.minEnclosingCircle(c)
M=cv2.moments(c)
center=(int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius>10:
#cv2.circle(frame, (int(x),int(y)), int(radius), 2)
cv2.circle(frame, center,5,(0,0,255),-1)
# color detection limits
lB = 5
lG = 50
lR = 50
hB = 15
hG = 255
hR = 255
lowerLimits = np.array([lB, lG, lR])
upperLimits = np.array([hB, hG, hR])
# Our operations on the frame come here
thresholded = cv2.inRange(frame, lowerLimits, upperLimits)
outimage = cv2.bitwise_and(frame, frame, mask = thresholded)
cv2.imshow('original', frame)
# Display the resulting frame
cv2.imshow('processed',outimage)
# Quit the program when Q is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
print 'closing program'
cap.release()
cv2.destroyAllWindows()
One option is to write the values to a text file somewhere, then when the program starts, read the file and parse the values written in the file.
See: How could I save data after closing my program?

Categories

Resources