Rolling screen capture with ffmpeg on windows - python

I have the following code to capture a video stream from my webcam. I use ffmpeg to write to named windows pipe, then read it with python and display with opencv. The thing is that the opencv stream is 'rolling' as shown here https://www.youtube.com/watch?v=H78TRo3DZIo
If I capture the output to a video instead of a pipe, with the command:
ffmpeg -f dshow -video_size 1920x1080 -framerate 60 -i video="USB Video" -c:v copy out.avi
everything looks fine. What should I change to achieve the desired effect? (non-rolling stream)
My code below:
import cv2
import time
import subprocess
import numpy as np
w, h = 800, 600
# Get frame generator
gen = ffmpegGrab()
# Get start time
start = time.time()
# Read video frames from ffmpeg in loop
nFrames = 0
cmd = 'C:/Users/......./Downloads/ffmpeg-4.3.1-2020-11-19-full_build/bin/ffmpeg.exe -f dshow -framerate 60 -video_size 800x600 -i video="USB Video" -pix_fmt bgr24 -vcodec rawvideo -f image2pipe -'
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, bufsize=10**9)
while True:
# Read next frame from ffmpeg
frame = proc.stdout.read(w * h * 3)
frame = np.frombuffer(frame, dtype=np.uint8).reshape((h, w, 3))
cv2.imshow('screenshot', frame)
if cv2.waitKey(1) == ord("q"):
break
fps = nFrames / (time.time() - start)
print(f'FPS: {fps}')
cv2.destroyAllWindows()

Related

cv2.VideoWriter issues

I'm looking to record a Twitch Livestream by feeding it the direct livestream url using streamlink.streams(url) (which returns a .m3u8 url). With this, I have no problem reading the stream and even writing a few images from it, but when it comes to writing it as a video, I get errors.
P.S.: Yes, I know there's other options like Streamlink and yt-dwl, but I want to operate solely in python, not using CLI... which I believe those two are only dealing with (for recording).
Here's what I currently have:
if streamlink.streams(url):
stream = streamlink.streams(url)['best']
stream = str(stream).split(', ')
stream = stream[1].strip("'")
cap = cv2.VideoCapture(stream)
gst_out = "appsrc ! video/x-raw, format=BGR ! queue ! nvvidconv ! omxh264enc ! h264parse ! qtmux ! filesink location=stream "
out = cv2.VideoWriter(gst_out, cv2.VideoWriter_fourcc(*'mp4v'), 30, (1920, 1080))
while True:
_, frame = cap.read()
out.write(frame)
For this code, I get this error msg:
[tls # 0x1278a74f0] Error in the pull function.
And if I remove gst_out and feed stream instead as well as moving cap and out into the while loop like so:
if streamlink.streams(url):
stream = streamlink.streams(url)['best']
stream = str(stream).split(', ')
stream = stream[1].strip("'")
while True:
cap = cv2.VideoCapture(stream)
_, frame = cap.read()
out = cv2.VideoWriter(stream, cv2.VideoWriter_fourcc(*'mp4v'), 30, (1920, 1080))
out.write(frame)
I get:
OpenCV: FFMPEG: tag 0x7634706d/'mp4v' is not supported with codec id 12 and format 'hls / Apple HTTP Live Streaming'
What am I missing here?
The fist part uses GStreamer syntax, and OpenCV for Python is most likely not built with GStreamer.
The answer is going to be focused on the second part (also because I don't know GStreamer so well).
There are several issues:
cap = cv2.VideoCapture(stream) should be before the while True loop.
out = cv2.VideoWriter(stream, cv2.VideoWriter_fourcc(*'mp4v'), 30, (1920, 1080)) should be before the while True loop.
The first argument of cv2.VideoWriter should be MP4 file name, and not stream.
For getting a valid output file, we have to execute out.release() after the loop, but the loop may never end.
It is recommended to get frame size and rate of the input video, and set VideoWriter accordingly:
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
video_file_name = 'output.mp4'
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) # Open video file for writing
It is recommended to break the loop if ret is False:
ret, frame = cap.read()
if not ret:
break
One option to end the recording is when user press Esc key.
Break the loop if cv2.waitKey(1) == 27.
cv2.waitKey(1) is going to work only after executing cv2.imshow.
A simple solution is executing cv2.imshow every 30 frames (for example).
if (frame_counter % 30 == 0):
cv2.imshow('frame', frame) # Show frame every 30 frames (for testing)
if cv2.waitKey(1) == 27: # Press Esc for stop recording (cv2.waitKey is going to work only when cv2.imshow is used).
break
Complete code sample:
from streamlink import Streamlink
import cv2
def stream_to_url(url, quality='best'):
session = Streamlink()
streams = session.streams(url)
if streams:
return streams[quality].to_url()
else:
raise ValueError('Could not locate your stream.')
url = 'https://www.twitch.tv/noraexplorer' # Need to login to twitch.tv first (using the browser)...
quality='best'
stream_url = stream_to_url(url, quality) # Get the video URL
cap = cv2.VideoCapture(stream_url, cv2.CAP_FFMPEG) # Open video stream for capturing
# Get frame size and rate of the input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
video_file_name = 'output.mp4'
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) # Open video file for writing
frame_counter = 0
while True:
ret, frame = cap.read()
if not ret:
break
if (frame_counter % 30 == 0):
cv2.imshow('frame', frame) # Show frame every 30 frames (for testing)
out.write(frame) # Write frame to output.mp4
if cv2.waitKey(1) == 27: # Press Esc for stop recording (cv2.waitKey is going to work only when cv2.imshow is used).
break
frame_counter += 1
cap.release()
out.release()
cv2.destroyAllWindows()
Testing the setup using FFplay and subprocess module:
from streamlink import Streamlink
import subprocess
def stream_to_url(url, quality='best'):
session = Streamlink()
streams = session.streams(url)
if streams:
return streams[quality].to_url()
else:
raise ValueError('Could not locate your stream.')
#url = 'https://www.twitch.tv/noraexplorer' # Need to login to twitch.tv first (using the browser)...
url = 'https://www.twitch.tv/valorant'
quality='best'
stream_url = stream_to_url(url, quality) # Get the video URL
subprocess.run(['ffplay', stream_url])
Update:
Using ffmpeg-python for reading the video, and OpenCV for recording the video:
In cases where cv2.VideoCapture is not working, we may use FFmpeg CLI as sub-process.
ffmpeg-python module is Python binding for FFmpeg CLI.
Using ffmpeg-python is almost like using subprocess module, it used here mainly for simplifying the usage of FFprobe.
Using FFprobe for getting video frames resolution and framerate (without using OpenCV):
p = ffmpeg.probe(stream_url, select_streams='v');
width = p['streams'][0]['width']
height = p['streams'][0]['height']
r_frame_rate = p['streams'][0]['r_frame_rate'] # May return 60000/1001
if '/' in r_frame_rate:
fps = float(r_frame_rate.split("/")[0]) / float(r_frame_rate.split("/")[1]) # Convert from 60000/1001 to 59.94
elif r_frame_rate != '0':
fps = float(r_frame_rate)
else:
fps = 30 # Used as default
Getting the framerate may be a bit of a challenge...
Note: ffprobe CLI should be in the execution path.
Start FFmpeg sub-process with stdout as pipe:
ffmpeg_process = (
ffmpeg
.input(stream_url)
.video
.output('pipe:', format='rawvideo', pix_fmt='bgr24')
.run_async(pipe_stdout=True)
)
Note: ffmpeg CLI should be in the execution path.
Reading a frame from the pipe, and convert it from bytes to NumPy array:
in_bytes = ffmpeg_process.stdout.read(width*height*3)
frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3])
Closing FFmpeg sub-process:
Closing stdout pipe ends FFmpeg (with "broken pipe" error).
ffmpeg_process.stdout.close()
ffmpeg_process.wait() # Wait for the sub-process to finish
Complete code sample:
from streamlink import Streamlink
import cv2
import numpy as np
import ffmpeg
def stream_to_url(url, quality='best'):
session = Streamlink()
streams = session.streams(url)
if streams:
return streams[quality].to_url()
else:
raise ValueError('Could not locate your stream.')
#url = 'https://www.twitch.tv/noraexplorer' # Need to login to twitch.tv first (using the browser)...
url = 'https://www.twitch.tv/valorant'
quality='best'
stream_url = stream_to_url(url, quality) # Get the video URL
#subprocess.run(['ffplay', stream_url]) # Use FFplay for testing
# Use FFprobe to get video frames resolution and framerate.
################################################################################
p = ffmpeg.probe(stream_url, select_streams='v');
width = p['streams'][0]['width']
height = p['streams'][0]['height']
r_frame_rate = p['streams'][0]['r_frame_rate'] # May return 60000/1001
if '/' in r_frame_rate:
fps = float(r_frame_rate.split("/")[0]) / float(r_frame_rate.split("/")[1]) # Convert from 60000/1001 to 59.94
elif r_frame_rate != '0':
fps = float(r_frame_rate)
else:
fps = 30 # Used as default
#cap = cv2.VideoCapture(stream_url, cv2.CAP_FFMPEG) # Open video stream for capturing
#width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#fps = int(cap.get(cv2.CAP_PROP_FPS))
################################################################################
# Use FFmpeg sub-process instead of using cv2.VideoCapture
################################################################################
ffmpeg_process = (
ffmpeg
.input(stream_url, an=None) # an=None applies -an argument (used for ignoring the input audio - it is not required, just more elegant).
.video
.output('pipe:', format='rawvideo', pix_fmt='bgr24')
.run_async(pipe_stdout=True)
)
################################################################################
video_file_name = 'output.mp4'
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) # Open video file for writing
frame_counter = 0
while True:
#ret, frame = cap.read()
in_bytes = ffmpeg_process.stdout.read(width*height*3) # Read raw video frame from stdout as bytes array.
if len(in_bytes) < width*height*3: #if not ret:
break
frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]) # Convert bytes array to NumPy array.
if (frame_counter % 30 == 0):
cv2.imshow('frame', frame) # Show frame every 30 frames (for testing)
out.write(frame) # Write frame to output.mp4
if cv2.waitKey(1) == 27: # Press Esc for stop recording (cv2.waitKey is going to work only when cv2.imshow is used).
break
frame_counter += 1
#cap.release()
ffmpeg_process.stdout.close() # Close stdout pipe (it also closes FFmpeg).
out.release()
cv2.destroyAllWindows()
ffmpeg_process.wait() # Wait for the sub-process to finish
Note:
In case you care about the quality of the recorded video, using cv2.VideoWriter is not the best choice...

How to receive byte-stream by using gstreamer with python subprocess module or gst-launch-1.0 command?

I want to receive byte-stream by using gstreamer with python subprocess module.
Now I can successfully use ffmpeg to pull the byte-stream. As shown below.
import cv2
import subprocess as sp
height = 714
width = 420
rtsp_url = 'rtsp://127.0.0.1:8554/video'
# command
command = ['ffmpeg',
'-i', rtsp_url,
'-f', 'rawvideo',
'-s',str(width)+'*'+str(height),
'-pix_fmt', 'bgr24',
'-fflags', 'nobuffer',
'-']
p = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
while True:
raw_image = p.stdout.read(width*height*3)
image = np.fromstring(raw_image, dtype='uint8')
image = image.reshape((height,width,3)).copy()
cv2.imshow('image', image)
key = cv2.waitKey(20)
I want to use gstreamer command instead of ffmpeg. So far, I have realized writing byte-stream to a file by using gstreamer command line.
gst-launch-1.0 rtspsrc location=rtsp://127.0.0.1:8554/video latency=0 drop-on-latency=true ! rtph264depay ! video/x-h264, stream-format='byte-stream' ! filesink location=/home/name/stdout
But it can't output byte-stream to pipe, so the terminal dosen't display byte-stream, not like ffmpeg command. How to change this command to output byte-stream through pipe so I can read from pipe.
Thank you for taking the time to answer for me!
This is RTSP streaming code.
import cv2
import time
import subprocess as sp
import numpy as np
rtsp_url = 'rtsp://127.0.0.1:8554/video'
video_path = r'test.mp4'
cap = cv2.VideoCapture(video_path)
# Get video information
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('fps={}'.format(fps))
# command
command = ['ffmpeg',
'-re',
'-y',
'-stream_loop', '-1',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(width, height),
'-r', str(fps),
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
# '-flags2', 'local_header',
'-bsf:v', "'dump_extra=freq=k'",
'-keyint_min', '60',
'-g', '60',
'-sc_threshold', '0',
'-f', 'rtsp',
'-rtsp_transport', 'tcp',
'-muxdelay', '0.1',
rtsp_url]
p = sp.Popen(command, stdin=sp.PIPE)
cnt = 0
t_start = time.time()
while (cap.isOpened()):
t_cur = time.time()-t_start
ret, frame = cap.read()
if not ret:
cnt += 1
print("count: {}".format(cnt))
cap = cv2.VideoCapture(video_path)
continue
p.stdin.write(frame.tobytes())
cv2.imshow('real_time', frame)
key = cv2.waitKey(20)
if key == 27:
p.terminate()
break
I have managed to create an example that works in Linux.
I was not able to simulate an RTSP camera, so I used MP4 file as input.
Creating the MP4 input file using FFmpeg CLI within Python (for testing):
sp.run(shlex.split(f'ffmpeg -y -f lavfi -i testsrc=size={width}x{height}:rate=25:duration=100 -vcodec libx264 -pix_fmt yuv420p {input_file_name}'))
The GStreamer command is:
p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet filesrc location={input_file_name} ! qtdemux ! video/x-h264 ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! filesink location={stdout_file_name}'), stdout=sp.PIPE)
--quiet is used because GStreamer prints messages to stdout.
filesrc location... is used for reading the MP4 input - replace it with RTSP pipeline.
videoconvert ! capsfilter caps="video/x-raw, format=BGR" converts the video format to raw BGR.
filesink location=/dev/stdout redirects the output to stdout (in Linux).
Code sample:
import cv2
import numpy as np
import subprocess as sp
import shlex
from sys import platform
width = 714
height = 420
input_file_name = 'input.mp4' # For testing, use MP4 input file instead of RTSP input.
# Build MP4 synthetic input video file for testing:
sp.run(shlex.split(f'ffmpeg -y -f lavfi -i testsrc=size={width}x{height}:rate=25:duration=100 -vcodec libx264 -pix_fmt yuv420p {input_file_name}'))
if platform == "win32":
# stdout_file_name = "con:"
# gstreamer_exe = 'c:/gstreamer/1.0/msvc_x86_64/bin/gst-launch-1.0.exe'
raise Exception('win32 system is not supported')
else:
stdout_file_name = "/dev/stdout"
gstreamer_exe = 'gst-launch-1.0'
# https://stackoverflow.com/questions/29794053/streaming-mp4-video-file-on-gstreamer
p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet filesrc location={input_file_name} ! qtdemux ! video/x-h264 ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! filesink location={stdout_file_name}'), stdout=sp.PIPE)
while True:
raw_image = p.stdout.read(width * height * 3)
if len(raw_image) < width*height*3:
break
image = np.frombuffer(raw_image, dtype='uint8').reshape((height, width, 3))
cv2.imshow('image', image)
key = cv2.waitKey(1)
p.stdout.close()
p.wait()
cv2.destroyAllWindows()
Update:
Based on your new question, I managed to create RTSP capturing example:
import cv2
import numpy as np
import subprocess as sp
import shlex
width = 240
height = 160
rtsp_url = 'rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4' # For testing, use public RTSP input.
gstreamer_exe = 'gst-launch-1.0' # '/usr/bin/gst-launch-1.0'
# https://stackoverflow.com/questions/29794053/streaming-mp4-video-file-on-gstreamer
p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet rtspsrc location={rtsp_url} ! queue2 ! rtph264depay ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! fdsink'), stdout=sp.PIPE)
while True:
raw_image = p.stdout.read(width * height * 3)
if len(raw_image) < width*height*3:
break
image = np.frombuffer(raw_image, np.uint8).reshape((height, width, 3))
cv2.imshow('image', image)
key = cv2.waitKey(1)
p.stdout.close()
p.wait()
cv2.destroyAllWindows()

Stream images from python openCV with ffmpeg

In order to try an embedded AI, I want to stream an image dataset through a rtsp stream.
What I tried to do is to read one image every X seconds and send it to the stream and infere my AI on it. I tried to use this github repo :https://gist.github.com/takidog/2c981c34d5d5b41c0d712f8ef4ac60d3#file-main-py
This is what I tried so far :
import cv2
import time
import subprocess as sp
import glob, os
__PATH = "./DATASET"
os.chdir(__PATH)
IMG_LIST = glob.glob("*.jpg")
IMG_LIST_LEN = len(IMG_LIST)
IMG_INDEX = 0
IMG_DELAY = 2
IMG_WIDTH = 1280
IMG_HEIGHT = 720
IMG_SIZE = str(IMG_WIDTH)+"x"+str(IMG_HEIGHT)
FPS = 5
RTSP_SERVER = "rtsp://localhost:31415/stream"
COMMAND = ['ffmpeg',
'-re',
'-s', IMG_SIZE,
'-r', str(FPS),
'-i', '-',
'-bufsize', '64M',
'-maxrate', "4M",
'-rtsp_transport', 'tcp',
'-muxdelay','0.1',
RTSP_SERVER]
process = sp.Popen(COMMAND,stdin=sp.PIPE)
while(True):
CURRENT_IMG = cv2.imread(IMG_LIST[IMG_INDEX])
IMG_INDEX = (IMG_INDEX+1)%IMG_LIST_LEN
while(CURRENT_IMG.shape[0]!=720): #We dump images with a bad format
CURRENT_IMG = cv2.imread(IMG_LIST[IMG_INDEX])
IMG_INDEX = (IMG_INDEX+1)%IMG_LIST_LEN
_,FRAME = cv2.imencode('.png', CURRENT_IMG)
process.stdin.write(FRAME.tobytes())
time.sleep(1/FPS)
Surprise surprise this does not work and gives me this error :
Input #0, png_pipe, from 'pipe:':
Duration: N/A, bitrate: N/A
Stream #0:0: Video: png, rgb24(pc), 1280x720, 25 fps, 25 tbr, 25 tbn, 25 tbc
[NULL # 0x55ba3fe1b860] Unable to find a suitable output format for 'rtsp://localhost:31415/stream'
rtsp://localhost:31415/stream: Invalid argument
Traceback (most recent call last):
File "main.py", line 47, in <module>
process.stdin.write(FRAME.tobytes())
BrokenPipeError: [Errno 32] Broken pipe
Here is a reproducible sample - hoping you can copy paste and execute, but nothing is promised...
The example applies the following stages:
Create 10 synthetic JPEG images in ./test_dataset folder, to be used as input.
Execute FFplay sub-process as RTSP listener.
When using TCP protocol we should start the TCP server first (FFplay is used as a TCP server in out case).
We also need the receiver process, because without it, FFmpeg streamer process halts after the first frame.
Execute FFmpeg sub-process for RTSP streaming.
Cyclically read JPEG image to NumPy array (in BGR color format), and write the array as raw video frame to stdin pipe.
Note: It is more efficient to write raw video frames, than encoding each frame to PNG (as used by your reference sample).
Here is the code:
import cv2
#import time
import subprocess as sp
import glob
import os
img_width = 1280
img_height = 720
test_path = './test_dataset' # Folder with synthetic sample images.
os.makedirs(test_path, exist_ok=True) # Create folder for input images.
os.chdir(test_path)
ffmpeg_cmd = 'ffmpeg' # May use full path like: 'c:\\FFmpeg\\bin\\ffmpeg.exe'
ffplay_cmd = 'ffplay' # May use full path like: 'c:\\FFmpeg\\bin\\ffplay.exe'
# Create 10 synthetic JPEG images for testing (image0001.jpg, image0002.jpg, ..., image0010.jpg).
sp.run([ffmpeg_cmd, '-y', '-f', 'lavfi', '-i', f'testsrc=size={img_width}x{img_height}:rate=1:duration=10', 'image%04d.jpg'])
img_list = glob.glob("*.jpg")
img_list_len = len(img_list)
img_index = 0
fps = 5
rtsp_server = 'rtsp://localhost:31415/live.stream'
# You will need to start the server up first, before the sending client (when using TCP). See: https://trac.ffmpeg.org/wiki/StreamingGuide#Pointtopointstreaming
ffplay_process = sp.Popen([ffplay_cmd, '-rtsp_flags', 'listen', rtsp_server]) # Use FFplay sub-process for receiving the RTSP video.
command = [ffmpeg_cmd,
'-re',
'-f', 'rawvideo', # Apply raw video as input - it's more efficient than encoding each frame to PNG
'-s', f'{img_width}x{img_height}',
'-pixel_format', 'bgr24',
'-r', f'{fps}',
'-i', '-',
'-pix_fmt', 'yuv420p',
'-c:v', 'libx264',
'-bufsize', '64M',
'-maxrate', '4M',
'-rtsp_transport', 'tcp',
'-f', 'rtsp',
#'-muxdelay', '0.1',
rtsp_server]
process = sp.Popen(command, stdin=sp.PIPE) # Execute FFmpeg sub-process for RTSP streaming
while True:
current_img = cv2.imread(img_list[img_index]) # Read a JPEG image to NumPy array (in BGR color format) - assume the resolution is correct.
img_index = (img_index+1) % img_list_len # Cyclically repeat images
process.stdin.write(current_img.tobytes()) # Write raw frame to stdin pipe.
cv2.imshow('current_img', current_img) # Show image for testing
# time.sleep(1/FPS)
key = cv2.waitKey(int(round(1000/fps))) # We need to call cv2.waitKey after cv2.imshow
if key == 27: # Press Esc for exit
break
process.stdin.close() # Close stdin pipe
process.wait() # Wait for FFmpeg sub-process to finish
ffplay_process.kill() # Forcefully close FFplay sub-process
cv2.destroyAllWindows() # Close OpenCV window

How to pipe output from ffmpeg using python?

I am trying to pipe output from FFmpeg in Python. I am reading images from a video grabber card and I am successful in reading this to an output file from the command line using dshow. I am trying to grab the images from the card to my OpenCv code to be able to further play with the data. Unfortunately, when I pipe out the images, I just get a display of the video as shown in the link:
link: s000.tinyupload.com/?file_id=15940665795196022618.
The code I used is as shown below:
import cv2
import subprocess as sp
import numpy
import sys
import os
old_stdout=sys.stdout
log_file=open("message.log","w")
sys.stdout=log_file
FFMPEG_BIN = "C:/ffmpeg/bin/ffmpeg.exe"
command = [ FFMPEG_BIN, '-y',
'-f', 'dshow', '-rtbufsize', '100M',
'-i', 'video=Datapath VisionAV Video 01' ,
'-video_size', '640x480',
'-pix_fmt', 'bgr24', '-r','25',
'-f', 'image2pipe', '-' ]
pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
while True:
# Capture frame-by-frame
raw_image = pipe.stdout.read(640*480*3)
# transform the byte read into a numpy array
image = numpy.frombuffer(raw_image, dtype='uint8')
print(image)
image = image.reshape((480,640,3))
if image is not None:
cv2.imshow('Video', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pipe.stdout.flush()
sys.stdout=old_stdout
log_file.close()
cv2.destroyAllWindows()
Please do provide me some pointers to fix this issue. Help is greatly appreciated.
I struggled longer with the console application FFmpeg, and finally gave up.
It's easier with this extension:
pip install ffmpeg-python
Karl Kroening has published here a very good integration of FFmpeg into Python.
With these examples a solution should be possible:
https://github.com/kkroening/ffmpeg-python
After you call the sp.Popen you have communicate with it.You can use the following code:
try:
pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)`
ffmpeg_output, _ = pipe.communicate()
except sp.CalledProcessError as err:
print("FFmpeg stdout output on error:\n" + err.output)
Finally, you can print the output to make sure the above commands worked:
print(ffmpeg_output)
The above statement is going to display the output returned by the communication with the process.
This works for me
import subprocess as sp
import json
import os
import numpy
import PIL
from imutils.video import FPS
import cv2
def video_frames_ffmpeg():
width = 640
height = 360
iterator = 0
cmd = ['ffmpeg', '-loglevel', 'quiet',
'-f', 'dshow',
'-i', 'video=HD USB Camera',
#'-vf','scale=%d:%d,smartblur'%(width,height),
'-preset' ,'ultrafast', '-tune', 'zerolatency',
'-f', 'rawvideo',
'-pix_fmt','bgr24',
'-']
p = sp.Popen(cmd, stdout=sp.PIPE)
while True:
arr = numpy.frombuffer(p.stdout.read(width*height*3), dtype=numpy.uint8)
iterator += 1
if len(arr) == 0:
p.wait()
print("awaiting")
#return
if iterator >= 1000:
break
frame = arr.reshape((height, width,3))
cv2.putText(frame, "frame{}".format(iterator), (75, 70),
cv2.cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2)
im = Image.fromarray(frame)
im.save("ffmpeg_test/test%d.jpeg" % iterator)
yield arr
from PIL import Image
from imutils.video import FPS
for i, frame in enumerate(video_frames_ffmpeg()):
if i == 0:
fps = FPS().start()
else: fps.update()
fps.stop()
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
Try:
command = [ FFMPEG_BIN,
'-rtbufsize', '100M',
'-i', '/dev/video0' , #change to here to your camera device
'-video_size', '640x480',
'-pix_fmt', 'bgr24',# '-r','25',
'-f', 'image2pipe', #'-'
'-vcodec', 'rawvideo', '-an', '-'
]
I don't know how '-vcodec', 'rawvideo', '-an', '-' this helps,
and without it my code doesn't work.

Pipe opencv images to ffmpeg using python

How can I pipe openCV images to ffmpeg (running ffmpeg as a subprocess)?
(I am using spyder/anaconda)
I am reading frames from a video file and do some processing on each frame.
import cv2
cap = cv2.VideoCapture(self.avi_path)
img = cap.read()
gray = cv2.cvtColor(img[1], cv2.COLOR_BGR2GRAY)
bgDiv=gray/vidMed #background division
then, to pipe the processed frame to ffmpeg, I found this command in a related question:
sys.stdout.write( bgDiv.tostring() )
next, I am trying to run ffmpeg as a subprocess:
cmd='ffmpeg.exe -f rawvideo -pix_fmt gray -s 2048x2048 -r 30 -i - -an -f avi -r 30 foo.avi'
sp.call(cmd,shell=True)
(this also from the mentioned post)
However, this fills my IPython console with cryptic hieroglyphs and then crashes it. any advice?
ultimately, I would like to pipe out 4 streams and have ffmpeg encode those 4 streams in parallel.
I had similar problem once. I opened an issue on Github, turns out it may be a platform issue.
Related to your question, you can as well pipe OpenCV images to FFMPEG. Here's a sample code:
# This script copies the video frame by frame
import cv2
import subprocess as sp
input_file = 'input_file_name.mp4'
output_file = 'output_file_name.mp4'
cap = cv2.VideoCapture(input_file)
ret, frame = cap.read()
height, width, ch = frame.shape
ffmpeg = 'FFMPEG'
dimension = '{}x{}'.format(width, height)
f_format = 'bgr24' # remember OpenCV uses bgr format
fps = str(cap.get(cv2.CAP_PROP_FPS))
command = [ffmpeg,
'-y',
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-s', dimension,
'-pix_fmt', 'bgr24',
'-r', fps,
'-i', '-',
'-an',
'-vcodec', 'mpeg4',
'-b:v', '5000k',
output_file ]
proc = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)
while True:
ret, frame = cap.read()
if not ret:
break
proc.stdin.write(frame.tostring())
cap.release()
proc.stdin.close()
proc.stderr.close()
proc.wait()
I'm Kind of late, But my powerful VidGear Python Library automates the process of pipelining OpenCV frames into FFmpeg on any platform with its WriteGear API's Compression Mode. OP, You can implement your answer as follows:
# import libraries
from vidgear.gears import WriteGear
import cv2
output_params = {"-s":"2048x2048", "-r":30} #define FFmpeg tweak parameters for writer
stream = cv2.VideoCapture(0) #Open live webcam video stream on first index(i.e. 0) device
writer = WriteGear(output_filename = 'Output.mp4', compression_mode = True, logging = True, **output_params) #Define writer with output filename 'Output.mp4'
# infinite loop
while True:
(grabbed, frame) = stream.read()
# read frames
# check if frame empty
if not is grabbed:
#if True break the infinite loop
break
# {do something with frame here}
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# write a modified frame to writer
writer.write(gray)
# Show output window
cv2.imshow("Output Frame", frame)
key = cv2.waitKey(1) & 0xFF
# check for 'q' key-press
if key == ord("q"):
#if 'q' key-pressed break out
break
cv2.destroyAllWindows()
# close output window
stream.release()
# safely close video stream
writer.close()
# safely close writer
Source: https://abhitronix.github.io/vidgear/latest/gears/writegear/compression/usage/#using-compression-mode-with-opencv
You can check out VidGear Docs for more advanced applications and features.
Hope that helps!
You can use this pkg. ffmpegcv has Reader and Writer in ffmpeg backbone, similar to cv2.
#!pip install ffmpegcv
import ffmpegcv
vfile_in = 'A.mp4'
vfile_out = 'A_h264.mp4'
vidin = ffmpegcv.VideoCapture(vfile_in)
w, h = vidin.width, vidin.height
vidout = ffmpegcv.VideoWriter(vfile_out, 'h264_nvenc', vidin.fps, (w, h))
for frame in vidin:
vidout.write(frame)
vidin.release()
vidout.release()

Categories

Resources