I'm trying to read video into a buffer using threads in OpenCV and I get "Assertion fctx->async_lock failed at libavcodec/pthread_frame.c:167" the reason I want to use threads to do this is so I can read a lot of frames into a list at a time so it's fast. I need all the frames and can not skip any frames. I thought multi-threading would be the way to go. my code works single-threaded aka with "buffer_refill = 1".
import threading
import cv2
cap = cv2.VideoCapture('data/temp/testing/test.mp4')
frames = []
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
frames_left = total_frames
threads = []
print(total_frames)
min_buffer = 120
buffer_refill = 60
def buffer():
print('buffer')
frame = cap.grab()
ret, frame = cap.retrieve()
frames.append(frame)
def get_buffer(num):
global frames_left
for i in range(num):
frames_left -= 1
if frames_left > 0:
t = threading.Thread(target=buffer)
t.start()
threads.append(t)
for thread in threads:
thread.join()
print('block')
while(cap.isOpened()):
if frames_left > 0 and len(frames) < min_buffer:
get_buffer(buffer_refill)
else:
cv2.imshow('Frame',frames[0])
frames.pop(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Related
I am reading an rtsp(local rtsp link) stream from my cctv camera connected on LAN.
My Main Goal :-
I want to perform some processing on the frames and want to display via m3u8 in real time or nearly real time so that i can display in the frontend using hls.js.
Currently i am trying to create video in realtime so that using ffmpeg i can create the m3u8 .
Sharing my code below.
import cv2
from moviepy.editor import *
import numpy as np
import time
url = "rtsp://username:password#192.168.1.100:10554/Streaming/channels/401"
cap = cv2.VideoCapture(url)
def make_video_file(clips):
try:
print(f"clips = {clips}")
video_clip = concatenate_videoclips(clips,method='compose')
video_clip.write_videofile("video-output.mp4",fps=30)
except Exception as e:
print(e)
FRAME_COUNTER = 0
NUMBER_OF_FRAMES = 30
CLIPS = [0 for i in range(NUMBER_OF_FRAMES)]
while True:
ret, frame = cap.read()
# print(frame)
if not ret:
continue
CLIPS.pop(0)
CLIPS.append(ImageClip(frame).set_duration(1))
if FRAME_COUNTER == NUMBER_OF_FRAMES:
try:
FRAME_COUNTER = 0
make_video_file(CLIPS)
except:
pass
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
NUMBER_OF_FRAMES += 1
cap.release()
cv2.destroyAllWindows()
I am trying to interface my OpenCV program with my Raspberry Pi PiCamera. Every time I use OpenCV to capture video, it drastically drops the FPS. When I capture video using PiCamera's Library, everything is fine and smooth.
Why is this happening?
Is there a way to fix it?
This is my code:
import time
import RPi.GPIO as GPIO
from PCA9685 import PCA9685
import numpy as np
import cv2
try:
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FPS, 90)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 700)
while(True):
ret, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
except:
pwm.exit_PCA9685()
print ("\nProgram end")
exit()
cap.release()
cv2.destroyAllWindows()
This is the error I'm getting:
First of all, those are warnings not errors.
Reduce the video dimension. Specify the dimension.
cv2.VideoCapture has some problems as it buffers the frames, and the frames are queued so if you're doing some processing and the speed is less than the bandwidth of VideoCapture the video will be slowed down.
So, here is a bufferless VideoCapture.
video_capture_Q_buf.py
import cv2, queue as Queue, threading, time
is_frame = True
# bufferless VideoCapture
class VideoCaptureQ:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = Queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
global is_frame
is_frame = False
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except Queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
Using it:
test.py
import video_capture_Q_buf as vid_cap_q # import as alias
from video_capture_Q_buf import VideoCaptureQ # class import
import time
cap = VideoCaptureQ(vid_path)
while True:
t1 = time.time()
if vid_cap_q.is_frame == False:
print('no more frames left')
break
try:
ori_frame = cap.read()
# do your stuff
except Exception as e:
print(e)
break
t2 = time.time()
print(f'FPS: {1/(t2-t1)}')
I am able to extract the frames of a certain test.mp4 file using the following code:
import cv2
def get_frames():
cap = cv2.VideoCapture('test.mp4')
i = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2.imwrite('test_'+str(i)+'.jpg', frame)
i += 1
cap.release()
cv2.destroyAllWindows()
A lot of the frames that are extracted are useless (they're nearly identical). I need to be able to set a certain rate at which the frame extraction can be done.
I think you need to just skip frames based on a fixed cycle.
import cv2
def get_frames():
cap = cv2.VideoCapture('test.mp4')
i = 0
# a variable to set how many frames you want to skip
frame_skip = 10
# a variable to keep track of the frame to be saved
frame_count = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if i > frame_skip - 1:
frame_count += 1
cv2.imwrite('test_'+str(frame_count*frame_skip)+'.jpg', frame)
i = 0
continue
i += 1
cap.release()
cv2.destroyAllWindows()
Try below logic. Here, we are waiting for a period of time(based on frame rate) and reading the last frame.
def get_frames():
cap = cv2.VideoCapture('test.mp4')
frame_rate = 10
prev = 0
i = 0
while cap.isOpened():
time_elapsed = time.time() - prev
ret, frame = cap.read()
if not ret:
break
if time_elapsed > 1./frame_rate:
# print(time_elapsed)
prev = time.time()
cv2.imwrite('./data/sample1/test_'+str(i)+'.jpg', frame)
i += 1
cap.release()
cv2.destroyAllWindows()
As an alternative to writing your own code to do this, have you considered using FFMPEG? FFMPEG has the ability to extract all frames from a video and save them as images, it also can extract frames at a lower frame rate than the source video.
See here for a demonstration of what I think you're trying to do, and the arguments to give ffmpeg to do so.
My code does capture images every 2 secs. But the problem is it runs endlessly. I need the script to terminate or close at a time period (i.e like terminate or close after 50secs). I tried using sleep() but suspects that doesn't terminate the whole script or closes it, It just puts the script to sleep !
Hope someone could help me with terminating the script after a time period!
My script :
import cv2
import numpy
import time
capture = cv2.VideoCapture(0)
capture.set(3, 640)
capture.set(4, 480)
img_counter = 0
frame_set = []
start_time = time.time()
while True:
ret, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if time.time() - start_time >= 2:
img_name = "FaceFrame{}.jpg".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_counter))
start_time = time.time()
img_counter += 1
actual_start_time = time.clock()
start_time = time.time()
while ((time.clock() - actual_start_time) < 50):
#do stuff
Alternatively:
actual_start_time = time.clock()
start_time = time.time()
while True:
#do stuff
if (time.clock() - actual_start_time) > 50):
break
I have to read frames from a video in parallel with multiprocessing & queues using opencv in Python and I'm getting an error with my code.
This is my code, and I don't know where my problem is.
#! /usr/bin/python
import numpy as np
import cv2
import multiprocessing as mp
import time
def read_frames(q1,q2):
while True :
frame = q1.get()
if frame=='Done':
break
R=frame[:,:,0]
G=frame[:,:,1]
B=frame[:,:,2]
y = (np.uint8)((0.299 * R)+ (0.587 *G) +( 0.114 *B))
q2.put(y)
if __name__ == '__main__':
q1 = mp.Queue()
q2 = mp.Queue()
processes =[mp.Process(target=read_frames, args= (q1,q2)) for i in rang$
for p in processes:
p.start()
# feed the processes
# read input file and send to the processes the frames:
cap = cv2.VideoCapture('gou.avi')
lines = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cols = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = int(cap.get(cv2.CAP_PROP_FPS))
fourcc_ver = int(cap.get(cv2.CAP_PROP_FOURCC))
out = cv2.VideoWriter('output.avi',fourcc_ver, fps, (cols,lines),False)
# y = np.empty(shape=(lines,cols),dtype=np.uint8)
while(cap.isOpened()):
ret, frame = cap.read()
# as long as new frames are there
if ret==True:
q1.put(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
q1.put('Done')
for p in processes:
p.join()
for p in processes:
result=[q2.get()]
# result.sort()
# results = [r[1] for r in results]
for i in result:
out.write(i)
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
What you could do is add all the frames to a list[] and then use list references such as frame = list[] and then address frames by location on the list: frame[0] or frame[1:4]