import numpy as np
import cv2
import multiprocessing
import time
import random
finish_state = multiprocessing.Event()
#function that requests frames
def actions_func(frame):
while True:
time.sleep(random.randint(1,5))
cv2.imshow('requested_frame_1',frame)
time.sleep(random.randint(1,5))
cv2.imshow('requested_frame_2',frame)
if cv2.waitKey(1) & 0xFF == ord('q'): break
#function that keeps the camera always on and should return the frame value with the last image only when requested
def capture_cam():
cap = cv2.VideoCapture(1)
if (cap.isOpened() == False):
print("Unable to read camera feed")
# Default resolutions of the frame are obtained. The default resolutions are system dependent.
# We convert the resolutions from float to integer.
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
while(True):
ret, frame = cap.read()
if ret == True:
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'): break
else:
break
def main_process(finish_state):
thr1, frame = multiprocessing.Process(target=capture_cam)
thr1.start()
thr2 = multiprocessing.Process(target=actions_func, args=(frame,))
thr2.start()
if __name__ == '__main__':
main_process(finish_state)
print("continue the code with other things after all threads/processes except the main one were closed with the loop that started them... ")
I want a webcam to be open all the time capturing an image, for this I have created thread1 where it is supposed to run all the time regardless of the program.
What you need is to fix this program that is supposed to ask for frames from the function that always runs on thread1.
The problem is that I don't know when it might be time to ask thread1 for the last frame it showed, and to represent that I put the random.randint(1,5), although in reality I won't have knowledge of the maximum or minimum time in which the last frame will be requested from thread1
The truth is that I'm getting complicated with this program, and I really don't know if it's convenient to create a thread2 to do the frame requests or if it's better to just have thread1 and have the frame requests inside the main thread
Although they say thread they are actually parallel processes, try with threads but I think it is more convenient to use processes, right?
Traceback (most recent call last):
File "request_frames_thread.py", line 58, in <module>
main_process(finish_state)
File "request_frames_thread.py", line 50, in main_process
thr1, frame = multiprocessing.Process(target=capture_cam)
TypeError: cannot unpack non-iterable Process object
I would have the main process create a full duplex multiprocessing.Pipe instance which returns two multiprocessing.connection.Connection instances and pass one connection to each of your processes. These connections can be used for a simple two way communication vehicle for sending and receiving objects to one another. I would have the capture_cam process start a dameon thread (it will terminate when all your regular threads terminate and so it can be in an infinite loop) that is passed on of these connections to handle requests for the latest frame, which is stored in a global variable.
The only requirement is that a frame be serializable by the pickle module.
import multiprocessing
from threading import Thread
import time
import random
#function that requests frames
def actions_func(conn):
try:
while True:
time.sleep(random.randint(1,5))
# Ask for latest frame by sending any message:
conn.send('frame')
frame = conn.recv() # This is the response
cv2.imshow('requested_frame_1',frame)
time.sleep(random.randint(1,5))
# Ask for latest frame by sending any message:
conn.send('frame')
frame = conn.recv() # This is the response
cv2.imshow('requested_frame_2',frame)
if cv2.waitKey(1) & 0xFF == ord('q'): break
except BrokenPipeError:
# The capture_cam process has terminated.
pass
def handle_frame_requests(conn):
try:
while True:
# Any message coming in is a request for the latest frame:
request = conn.recv()
conn.send(frame) # The frame must be pickle-able
except EOFError:
# The actions_func process has ended
# and its connection has been closed.
pass
#function that keeps the camera always on and should return the frame value with the last image only when requested
def capture_cam(conn):
global frame
frame = None
# start dameon thread to handle frame requests:
Thread(target=handle_frame_requests, args=(conn,), daemon=True).start()
cap = cv2.VideoCapture(1)
if (cap.isOpened() == False):
print("Unable to read camera feed")
# Default resolutions of the frame are obtained. The default resolutions are system dependent.
# We convert the resolutions from float to integer.
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
while(True):
ret, frame = cap.read()
if ret == True:
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'): break
else:
break
def main_process(finish_state):
conn1, conn2 = multiprocessing.Pipe(duplex=True)
p1 = multiprocessing.Process(target=capture_cam, args=(conn1,))
p1.start()
p2 = multiprocessing.Process(target=actions_func, args=(conn2,))
p2.start()
if __name__ == '__main__':
finish_state = multiprocessing.Event()
main_process(finish_state)
I am trying to save the video but it's not working.
I followed the instructions from the openCV documentation.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
frame = cv2.flip(frame,0)
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
What is wrong?
Try this. It's working for me (Windows 10).
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
#fourcc = cv2.cv.CV_FOURCC(*'DIVX')
#out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
out = cv2.VideoWriter('output.avi', -1, 20.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
frame = cv2.flip(frame,0)
# write the flipped frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
jveitchmichaelis at https://github.com/ContinuumIO/anaconda-issues/issues/223 provided a thorough answer. Here I copied his answer:
The documentation in OpenCV says (hidden away) that you can only write
to avi using OpenCV3. Whether that's true or not I've not been able to
determine, but I've been unable to write to anything else.
However, OpenCV is mainly a computer vision library, not a video stream, codec and write one. Therefore, the developers tried to keep
this part as simple as possible. Due to this OpenCV for video
containers supports only the avi extension, its first version.
From: http://docs.opencv.org/3.1.0/d7/d9e/tutorial_video_write.html
My setup: I built OpenCV 3 from source using MSVC 2015, including
ffmpeg. I've also downloaded and installed XVID and openh264 from
Cisco, which I added to my PATH. I'm running Anaconda Python 3. I also
downloaded a recent build of ffmpeg and added the bin folder to my
path, though that shouldn't make a difference as its baked into
OpenCV.
I'm running in Win 10 64-bit.
This code seems to work fine on my computer. It will generate a video
containing random static:
writer = cv2.VideoWriter("output.avi",
cv2.VideoWriter_fourcc(*"MJPG"), 30,(640,480))
for frame in range(1000):
writer.write(np.random.randint(0, 255, (480,640,3)).astype('uint8'))
writer.release()
Some things I've learned through trial and error:
Only use '.avi', it's just a container, the codec is the important thing.
Be careful with specifying frame sizes. In the constructor you need to pass the frame size as (column, row) e.g. 640x480. However the
array you pass in, is indexed as (row, column). See in the above
example how it's switched?
If your input image has a different size to the VideoWriter, it will fail (often silently)
Only pass in 8 bit images, manually cast your arrays if you have to (.astype('uint8'))
In fact, never mind, just always cast. Even if you load in images using cv2.imread, you need to cast to uint8...
MJPG will fail if you don't pass in a 3 channel, 8-bit image. I get an assertion failure for this at least.
XVID also requires a 3 channel image but fails silently if you don't do this.
H264 seems to be fine with a single channel image
If you need raw output, say from a machine vision camera, you can use 'DIB '. 'RAW ' or an empty codec sometimes works. Oddly if I use
DIB, I get an ffmpeg error, but the video is saved fine. If I use RAW,
there isn't an error, but Windows Video player won't open it. All are
fine in VLC.
In the end I think the key point is that OpenCV is not designed to be
a video capture library - it doesn't even support sound. VideoWriter
is useful, but 99% of the time you're better off saving all your
images into a folder and using ffmpeg to turn them into a useful
video.
In my case, I found that size of Writer have to matched with the frame size both from camera or files. So that I read the frame size first and apply to writer setting as below.
(grabbed, frame) = camera.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print fwidth , fheight
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (fwidth,fheight))
You need to get the exact size of the capture like this:
import cv2
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
size = (width, height)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('your_video.avi', fourcc, 20.0, size)
while(True):
_, frame = cap.read()
cv2.imshow('Recording...', frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
I also faced same problem but it worked when I used 'MJPG' instead of 'XVID'
I used
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
instead of
fourcc = cv2.VideoWriter_fourcc(*'XVID')
Please make sure to set correct width and height. You can set it like bellow
cv2.VideoWriter('output.avi', fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))
This answer covers what's what in terms of variables and importantly, output size shall be same for input frame and video size.
import cv2
save_name = "output.mp4"
fps = 10
width = 600
height = 480
output_size = (width, height)
out = cv2.VideoWriter(save_name,cv2.VideoWriter_fourcc('M','J','P','G'), fps , output_size )
cap = cv2.VideoCapture(0) # 0 for webcam or you can put in videopath
while(True):
_, frame = cap.read()
cv2.imshow('Video Frame', frame)
out.write(cv2.resize(frame, output_size ))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
This is an answer was only tested in MacOS but it will probably also work in Linux and Windows.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# Get the Default resolutions
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# Define the codec and filename.
out = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
# write the frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
I had the same problem and then I tried this:
frame = cv2.flip(frame,180)
instead of
frame= cv2.flip(frame,0)
and it's working.
I wasn't having codec issues or dimension issues as the answers above. Instead, my issue was because my output frames were in greyscale.
I had to create a VideoWriter with the parameter isColor=False
out = cv2.VideoWriter(output_path,
cv2.VideoWriter_fourcc(*'mp4v'),
30,
(INPUT_VIDEO_WIDTH,INPUT_VIDEO_HEIGHT),
isColor=False
)
In the API Docs, it wrongly says that the flag is currently supported on Windows only. I have tested on Ubuntu 20.04, with opencv-python==4.2.0.34, and it finally writes out to file correctly.
The other answers work for saving a single video feed. But if you have more then one video source, say numerous IP camera feeds, RTSP streams, or webcams, you may want to record them all at once. Here's a method for saving multiple video feeds simultaneously using multithreading. The idea is to have two threads for each video source: 1) dedicated to only reading frames from the stream and 2) dedicated for processing frames (showing and saving).
Since cv2.VideoCapture.read() is a blocking operation, we must separate reading frames from saving frames. We can read frames in its own independent thread to improve performance by reducing latency due to I/O operations. By dedicating frame capture to its own thread, there will always be a frame ready to be processed instead of having to wait for the I/O operation to complete and return a fresh frame. The second thread is dedicated to processing and saving frames to the output file. We can encapsulate all of this into a single object, where it can be scaled regardless of the number of simultaneous video streams. Be sure to change the video src parameter to your own video source. Here's an example of simultaneously recording three video streams.
from threading import Thread
import cv2
import time
class VideoWriterWidget(object):
def __init__(self, video_file_name, src=0):
# Create a VideoCapture object
self.frame_name = str(src)
self.video_file = video_file_name
self.video_file_name = video_file_name + '.avi'
self.capture = cv2.VideoCapture(src)
# Default resolutions of the frame are obtained (system dependent)
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
# Set up codec and output video settings
self.codec = cv2.VideoWriter_fourcc('M','J','P','G')
self.output_video = cv2.VideoWriter(self.video_file_name, self.codec, 30, (self.frame_width, self.frame_height))
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
# Start another thread to show/save frames
self.start_recording()
print('initialized {}'.format(self.video_file))
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def show_frame(self):
# Display frames in main program
if self.status:
cv2.imshow(self.frame_name, self.frame)
# Press Q on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
self.output_video.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Save obtained frame into video output file
self.output_video.write(self.frame)
def start_recording(self):
# Create another thread to show/save frames
def start_recording_thread():
while True:
try:
self.show_frame()
self.save_frame()
except AttributeError:
pass
self.recording_thread = Thread(target=start_recording_thread, args=())
self.recording_thread.daemon = True
self.recording_thread.start()
if __name__ == '__main__':
src1 = 'Your link1'
video_writer_widget1 = VideoWriterWidget('Camera 1', src1)
src2 = 'Your link2'
video_writer_widget2 = VideoWriterWidget('Camera 2', src2)
src3 = 'Your link3'
video_writer_widget3 = VideoWriterWidget('Camera 3', src3)
# Since each video player is in its own thread, we need to keep the main thread alive.
# Keep spinning using time.sleep() so the background threads keep running
# Threads are set to daemon=True so they will automatically die
# when the main thread dies
while True:
time.sleep(5)
Related camera/IP/RTSP/streaming, FPS, video, threading, and multiprocessing posts
Python OpenCV streaming from camera - multithreading, timestamps
Video Streaming from IP Camera in Python Using OpenCV cv2.VideoCapture
How to capture multiple camera streams with OpenCV?
OpenCV real time streaming video capture is slow. How to drop frames or get synced with real time?
Storing RTSP stream as video file with OpenCV VideoWriter
OpenCV video saving
Python OpenCV multiprocessing cv2.VideoCapture mp4
Nuru answer actually works, only thing is remove this line frame = cv2.flip(frame,0) under if ret==True: loop which will output the video file without flipping
As an example :
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out_corner = cv2.VideoWriter('img_corner_1.avi',fourcc, 20.0, (640, 480))
At that place, have to define X,Y as width and height
But, when you create an image (a blank image for instance) you have to define Y,X as height and width :
img_corner = np.zeros((480, 640, 3), np.uint8)
I'm Kind of late, But VidGear Python Library's WriteGear API automates the process of pipelining OpenCV frames into FFmpeg on any platform in real-time with Hardware Encoders support and at the same time provides same opencv-python syntax. Here's a basic python example:
# import libraries
from vidgear.gears import WriteGear
import cv2
output_params = {"-vcodec":"libx264", "-crf": 0, "-preset": "fast"} #define (Codec,CRF,preset) FFmpeg tweak parameters for writer
stream = cv2.VideoCapture(0) #Open live webcam video stream on first index(i.e. 0) device
writer = WriteGear(output_filename = 'Output.mp4', compression_mode = True, logging = True, **output_params) #Define writer with output filename 'Output.mp4'
# infinite loop
while True:
(grabbed, frame) = stream.read()
# read frames
# check if frame empty
if not is grabbed:
#if True break the infinite loop
break
# {do something with frame here}
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# write a modified frame to writer
writer.write(gray)
# Show output window
cv2.imshow("Output Frame", frame)
key = cv2.waitKey(1) & 0xFF
# check for 'q' key-press
if key == ord("q"):
#if 'q' key-pressed break out
break
cv2.destroyAllWindows()
# close output window
stream.release()
# safely close video stream
writer.close()
# safely close writer
Source:https://abhitronix.github.io/vidgear/latest/gears/writegear/compression/usage/#using-compression-mode-with-opencv
You can check out VidGear Docs for more advanced applications and features.
As #ปรีดา ตั้งนภากร said: the sizes of Writer have to match with the frame from the camera or files.
You can use such code to check if your camera is (640, 480) or not:
print(int(cap.get(3)), int(cap.get(4)))
For myself, I found my camera is (1280, 720) and replaced (640, 480) with (1280, 720). Then it can save videos.
import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc('X','V','I','D')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
out = cv2.VideoWriter('output.mp4', fourcc, 20,(frame_width,frame_height),True )
print(int(cap.get(3)))
print(int(cap.get(4)))
while(cap.isOpened()):
ret,frame = cap.read()
if ret == True:
print(frame.shape)
out.write(frame)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()`enter code here`
cv2.destroyAllWindows()
This works fine but the problem of having video size relatively very small means nothing is captured. So make sure the height and width of a video and the image that you are going to recorded is same. If you are using some manipulation after capturing a video than you must confirm the size (before and after).
Hope it will save some1's hour
I wanted to write a grayscale image and nothing worked for me, I had to add a 0 to VideoWriter
out = cv2.VideoWriter(outfilename, fourcc, fps, (width, height), 0)
You have to specify the width and the height of the video according to the size of the image feed. Otherwise, it will create a file size of around 5.5KB.
result = cv2.VideoWriter(name,cv2.VideoWriter_fourcc(*'MJPG'), 10, (**960, 540**))