In order to try an embedded AI, I want to stream an image dataset through a rtsp stream.
What I tried to do is to read one image every X seconds and send it to the stream and infere my AI on it. I tried to use this github repo :https://gist.github.com/takidog/2c981c34d5d5b41c0d712f8ef4ac60d3#file-main-py
This is what I tried so far :
import cv2
import time
import subprocess as sp
import glob, os
__PATH = "./DATASET"
os.chdir(__PATH)
IMG_LIST = glob.glob("*.jpg")
IMG_LIST_LEN = len(IMG_LIST)
IMG_INDEX = 0
IMG_DELAY = 2
IMG_WIDTH = 1280
IMG_HEIGHT = 720
IMG_SIZE = str(IMG_WIDTH)+"x"+str(IMG_HEIGHT)
FPS = 5
RTSP_SERVER = "rtsp://localhost:31415/stream"
COMMAND = ['ffmpeg',
'-re',
'-s', IMG_SIZE,
'-r', str(FPS),
'-i', '-',
'-bufsize', '64M',
'-maxrate', "4M",
'-rtsp_transport', 'tcp',
'-muxdelay','0.1',
RTSP_SERVER]
process = sp.Popen(COMMAND,stdin=sp.PIPE)
while(True):
CURRENT_IMG = cv2.imread(IMG_LIST[IMG_INDEX])
IMG_INDEX = (IMG_INDEX+1)%IMG_LIST_LEN
while(CURRENT_IMG.shape[0]!=720): #We dump images with a bad format
CURRENT_IMG = cv2.imread(IMG_LIST[IMG_INDEX])
IMG_INDEX = (IMG_INDEX+1)%IMG_LIST_LEN
_,FRAME = cv2.imencode('.png', CURRENT_IMG)
process.stdin.write(FRAME.tobytes())
time.sleep(1/FPS)
Surprise surprise this does not work and gives me this error :
Input #0, png_pipe, from 'pipe:':
Duration: N/A, bitrate: N/A
Stream #0:0: Video: png, rgb24(pc), 1280x720, 25 fps, 25 tbr, 25 tbn, 25 tbc
[NULL # 0x55ba3fe1b860] Unable to find a suitable output format for 'rtsp://localhost:31415/stream'
rtsp://localhost:31415/stream: Invalid argument
Traceback (most recent call last):
File "main.py", line 47, in <module>
process.stdin.write(FRAME.tobytes())
BrokenPipeError: [Errno 32] Broken pipe
Here is a reproducible sample - hoping you can copy paste and execute, but nothing is promised...
The example applies the following stages:
Create 10 synthetic JPEG images in ./test_dataset folder, to be used as input.
Execute FFplay sub-process as RTSP listener.
When using TCP protocol we should start the TCP server first (FFplay is used as a TCP server in out case).
We also need the receiver process, because without it, FFmpeg streamer process halts after the first frame.
Execute FFmpeg sub-process for RTSP streaming.
Cyclically read JPEG image to NumPy array (in BGR color format), and write the array as raw video frame to stdin pipe.
Note: It is more efficient to write raw video frames, than encoding each frame to PNG (as used by your reference sample).
Here is the code:
import cv2
#import time
import subprocess as sp
import glob
import os
img_width = 1280
img_height = 720
test_path = './test_dataset' # Folder with synthetic sample images.
os.makedirs(test_path, exist_ok=True) # Create folder for input images.
os.chdir(test_path)
ffmpeg_cmd = 'ffmpeg' # May use full path like: 'c:\\FFmpeg\\bin\\ffmpeg.exe'
ffplay_cmd = 'ffplay' # May use full path like: 'c:\\FFmpeg\\bin\\ffplay.exe'
# Create 10 synthetic JPEG images for testing (image0001.jpg, image0002.jpg, ..., image0010.jpg).
sp.run([ffmpeg_cmd, '-y', '-f', 'lavfi', '-i', f'testsrc=size={img_width}x{img_height}:rate=1:duration=10', 'image%04d.jpg'])
img_list = glob.glob("*.jpg")
img_list_len = len(img_list)
img_index = 0
fps = 5
rtsp_server = 'rtsp://localhost:31415/live.stream'
# You will need to start the server up first, before the sending client (when using TCP). See: https://trac.ffmpeg.org/wiki/StreamingGuide#Pointtopointstreaming
ffplay_process = sp.Popen([ffplay_cmd, '-rtsp_flags', 'listen', rtsp_server]) # Use FFplay sub-process for receiving the RTSP video.
command = [ffmpeg_cmd,
'-re',
'-f', 'rawvideo', # Apply raw video as input - it's more efficient than encoding each frame to PNG
'-s', f'{img_width}x{img_height}',
'-pixel_format', 'bgr24',
'-r', f'{fps}',
'-i', '-',
'-pix_fmt', 'yuv420p',
'-c:v', 'libx264',
'-bufsize', '64M',
'-maxrate', '4M',
'-rtsp_transport', 'tcp',
'-f', 'rtsp',
#'-muxdelay', '0.1',
rtsp_server]
process = sp.Popen(command, stdin=sp.PIPE) # Execute FFmpeg sub-process for RTSP streaming
while True:
current_img = cv2.imread(img_list[img_index]) # Read a JPEG image to NumPy array (in BGR color format) - assume the resolution is correct.
img_index = (img_index+1) % img_list_len # Cyclically repeat images
process.stdin.write(current_img.tobytes()) # Write raw frame to stdin pipe.
cv2.imshow('current_img', current_img) # Show image for testing
# time.sleep(1/FPS)
key = cv2.waitKey(int(round(1000/fps))) # We need to call cv2.waitKey after cv2.imshow
if key == 27: # Press Esc for exit
break
process.stdin.close() # Close stdin pipe
process.wait() # Wait for FFmpeg sub-process to finish
ffplay_process.kill() # Forcefully close FFplay sub-process
cv2.destroyAllWindows() # Close OpenCV window
Related
I need to run multiple streams from python open cv to rtmp/rtsp with FFMPG. Now I can see two streams are going via ffmpeg (in console the information are mixed). In destinations the first stream is empty while second stream plays correctly (first stream metedata is reaching to destination).
Multiple stream semaratly in destination.
## st.txt = 'rtsp://ip:port/source/1' & 'rtsp://ip:port/source/2'
from multiprocessing.pool import ThreadPool
import cv2
import random
import subprocess
def f(x):
name = (x.split('/'))[-1]
y=30
if len(x)==1:
x = int(x)
cam = cv2.VideoCapture(x)
width = cam.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cam.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cam.get(cv2.CAP_PROP_FPS)
def streamer():
command = ['ffmpeg',
'-r', str(fps ),
'-y',
'-f', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(int(width),int(height)),
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'fast',
'-bufsize','7000k',
'-flvflags', 'no_duration_filesize',
'-g','180',
'-f', 'flv',
'rtmp://ip:port/live/'+ str(name)]
return command
p_stream = subprocess.Popen(streamer() , stdin=subprocess.PIPE)
while cam.isOpened():
ret, frame = cam.read()
if not ret:
break
cv2.imshow(str(name),frame)
p_stream.stdin.write(frame.tobytes())
key = cv2.waitKey(1)
if key == ('w'):
cam.release()
cv2.destroyAllWindows()
break
def ls():
with open(r'st.txt') as f:
lns = [line.rstrip('\n') for line in f]
return lns
if __name__ == '__main__':
pool = ThreadPool()
results = pool.map(f, ls())
Tried this code in better hardware and it works for multiple stream at the same time.
I'm looking to record a Twitch Livestream by feeding it the direct livestream url using streamlink.streams(url) (which returns a .m3u8 url). With this, I have no problem reading the stream and even writing a few images from it, but when it comes to writing it as a video, I get errors.
P.S.: Yes, I know there's other options like Streamlink and yt-dwl, but I want to operate solely in python, not using CLI... which I believe those two are only dealing with (for recording).
Here's what I currently have:
if streamlink.streams(url):
stream = streamlink.streams(url)['best']
stream = str(stream).split(', ')
stream = stream[1].strip("'")
cap = cv2.VideoCapture(stream)
gst_out = "appsrc ! video/x-raw, format=BGR ! queue ! nvvidconv ! omxh264enc ! h264parse ! qtmux ! filesink location=stream "
out = cv2.VideoWriter(gst_out, cv2.VideoWriter_fourcc(*'mp4v'), 30, (1920, 1080))
while True:
_, frame = cap.read()
out.write(frame)
For this code, I get this error msg:
[tls # 0x1278a74f0] Error in the pull function.
And if I remove gst_out and feed stream instead as well as moving cap and out into the while loop like so:
if streamlink.streams(url):
stream = streamlink.streams(url)['best']
stream = str(stream).split(', ')
stream = stream[1].strip("'")
while True:
cap = cv2.VideoCapture(stream)
_, frame = cap.read()
out = cv2.VideoWriter(stream, cv2.VideoWriter_fourcc(*'mp4v'), 30, (1920, 1080))
out.write(frame)
I get:
OpenCV: FFMPEG: tag 0x7634706d/'mp4v' is not supported with codec id 12 and format 'hls / Apple HTTP Live Streaming'
What am I missing here?
The fist part uses GStreamer syntax, and OpenCV for Python is most likely not built with GStreamer.
The answer is going to be focused on the second part (also because I don't know GStreamer so well).
There are several issues:
cap = cv2.VideoCapture(stream) should be before the while True loop.
out = cv2.VideoWriter(stream, cv2.VideoWriter_fourcc(*'mp4v'), 30, (1920, 1080)) should be before the while True loop.
The first argument of cv2.VideoWriter should be MP4 file name, and not stream.
For getting a valid output file, we have to execute out.release() after the loop, but the loop may never end.
It is recommended to get frame size and rate of the input video, and set VideoWriter accordingly:
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
video_file_name = 'output.mp4'
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) # Open video file for writing
It is recommended to break the loop if ret is False:
ret, frame = cap.read()
if not ret:
break
One option to end the recording is when user press Esc key.
Break the loop if cv2.waitKey(1) == 27.
cv2.waitKey(1) is going to work only after executing cv2.imshow.
A simple solution is executing cv2.imshow every 30 frames (for example).
if (frame_counter % 30 == 0):
cv2.imshow('frame', frame) # Show frame every 30 frames (for testing)
if cv2.waitKey(1) == 27: # Press Esc for stop recording (cv2.waitKey is going to work only when cv2.imshow is used).
break
Complete code sample:
from streamlink import Streamlink
import cv2
def stream_to_url(url, quality='best'):
session = Streamlink()
streams = session.streams(url)
if streams:
return streams[quality].to_url()
else:
raise ValueError('Could not locate your stream.')
url = 'https://www.twitch.tv/noraexplorer' # Need to login to twitch.tv first (using the browser)...
quality='best'
stream_url = stream_to_url(url, quality) # Get the video URL
cap = cv2.VideoCapture(stream_url, cv2.CAP_FFMPEG) # Open video stream for capturing
# Get frame size and rate of the input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
video_file_name = 'output.mp4'
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) # Open video file for writing
frame_counter = 0
while True:
ret, frame = cap.read()
if not ret:
break
if (frame_counter % 30 == 0):
cv2.imshow('frame', frame) # Show frame every 30 frames (for testing)
out.write(frame) # Write frame to output.mp4
if cv2.waitKey(1) == 27: # Press Esc for stop recording (cv2.waitKey is going to work only when cv2.imshow is used).
break
frame_counter += 1
cap.release()
out.release()
cv2.destroyAllWindows()
Testing the setup using FFplay and subprocess module:
from streamlink import Streamlink
import subprocess
def stream_to_url(url, quality='best'):
session = Streamlink()
streams = session.streams(url)
if streams:
return streams[quality].to_url()
else:
raise ValueError('Could not locate your stream.')
#url = 'https://www.twitch.tv/noraexplorer' # Need to login to twitch.tv first (using the browser)...
url = 'https://www.twitch.tv/valorant'
quality='best'
stream_url = stream_to_url(url, quality) # Get the video URL
subprocess.run(['ffplay', stream_url])
Update:
Using ffmpeg-python for reading the video, and OpenCV for recording the video:
In cases where cv2.VideoCapture is not working, we may use FFmpeg CLI as sub-process.
ffmpeg-python module is Python binding for FFmpeg CLI.
Using ffmpeg-python is almost like using subprocess module, it used here mainly for simplifying the usage of FFprobe.
Using FFprobe for getting video frames resolution and framerate (without using OpenCV):
p = ffmpeg.probe(stream_url, select_streams='v');
width = p['streams'][0]['width']
height = p['streams'][0]['height']
r_frame_rate = p['streams'][0]['r_frame_rate'] # May return 60000/1001
if '/' in r_frame_rate:
fps = float(r_frame_rate.split("/")[0]) / float(r_frame_rate.split("/")[1]) # Convert from 60000/1001 to 59.94
elif r_frame_rate != '0':
fps = float(r_frame_rate)
else:
fps = 30 # Used as default
Getting the framerate may be a bit of a challenge...
Note: ffprobe CLI should be in the execution path.
Start FFmpeg sub-process with stdout as pipe:
ffmpeg_process = (
ffmpeg
.input(stream_url)
.video
.output('pipe:', format='rawvideo', pix_fmt='bgr24')
.run_async(pipe_stdout=True)
)
Note: ffmpeg CLI should be in the execution path.
Reading a frame from the pipe, and convert it from bytes to NumPy array:
in_bytes = ffmpeg_process.stdout.read(width*height*3)
frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3])
Closing FFmpeg sub-process:
Closing stdout pipe ends FFmpeg (with "broken pipe" error).
ffmpeg_process.stdout.close()
ffmpeg_process.wait() # Wait for the sub-process to finish
Complete code sample:
from streamlink import Streamlink
import cv2
import numpy as np
import ffmpeg
def stream_to_url(url, quality='best'):
session = Streamlink()
streams = session.streams(url)
if streams:
return streams[quality].to_url()
else:
raise ValueError('Could not locate your stream.')
#url = 'https://www.twitch.tv/noraexplorer' # Need to login to twitch.tv first (using the browser)...
url = 'https://www.twitch.tv/valorant'
quality='best'
stream_url = stream_to_url(url, quality) # Get the video URL
#subprocess.run(['ffplay', stream_url]) # Use FFplay for testing
# Use FFprobe to get video frames resolution and framerate.
################################################################################
p = ffmpeg.probe(stream_url, select_streams='v');
width = p['streams'][0]['width']
height = p['streams'][0]['height']
r_frame_rate = p['streams'][0]['r_frame_rate'] # May return 60000/1001
if '/' in r_frame_rate:
fps = float(r_frame_rate.split("/")[0]) / float(r_frame_rate.split("/")[1]) # Convert from 60000/1001 to 59.94
elif r_frame_rate != '0':
fps = float(r_frame_rate)
else:
fps = 30 # Used as default
#cap = cv2.VideoCapture(stream_url, cv2.CAP_FFMPEG) # Open video stream for capturing
#width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#fps = int(cap.get(cv2.CAP_PROP_FPS))
################################################################################
# Use FFmpeg sub-process instead of using cv2.VideoCapture
################################################################################
ffmpeg_process = (
ffmpeg
.input(stream_url, an=None) # an=None applies -an argument (used for ignoring the input audio - it is not required, just more elegant).
.video
.output('pipe:', format='rawvideo', pix_fmt='bgr24')
.run_async(pipe_stdout=True)
)
################################################################################
video_file_name = 'output.mp4'
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) # Open video file for writing
frame_counter = 0
while True:
#ret, frame = cap.read()
in_bytes = ffmpeg_process.stdout.read(width*height*3) # Read raw video frame from stdout as bytes array.
if len(in_bytes) < width*height*3: #if not ret:
break
frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]) # Convert bytes array to NumPy array.
if (frame_counter % 30 == 0):
cv2.imshow('frame', frame) # Show frame every 30 frames (for testing)
out.write(frame) # Write frame to output.mp4
if cv2.waitKey(1) == 27: # Press Esc for stop recording (cv2.waitKey is going to work only when cv2.imshow is used).
break
frame_counter += 1
#cap.release()
ffmpeg_process.stdout.close() # Close stdout pipe (it also closes FFmpeg).
out.release()
cv2.destroyAllWindows()
ffmpeg_process.wait() # Wait for the sub-process to finish
Note:
In case you care about the quality of the recorded video, using cv2.VideoWriter is not the best choice...
I am trying to pipe output from FFmpeg in Python. I am reading images from a video grabber card and I am successful in reading this to an output file from the command line using dshow. I am trying to grab the images from the card to my OpenCv code to be able to further play with the data. Unfortunately, when I pipe out the images, I just get a display of the video as shown in the link:
link: s000.tinyupload.com/?file_id=15940665795196022618.
The code I used is as shown below:
import cv2
import subprocess as sp
import numpy
import sys
import os
old_stdout=sys.stdout
log_file=open("message.log","w")
sys.stdout=log_file
FFMPEG_BIN = "C:/ffmpeg/bin/ffmpeg.exe"
command = [ FFMPEG_BIN, '-y',
'-f', 'dshow', '-rtbufsize', '100M',
'-i', 'video=Datapath VisionAV Video 01' ,
'-video_size', '640x480',
'-pix_fmt', 'bgr24', '-r','25',
'-f', 'image2pipe', '-' ]
pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
while True:
# Capture frame-by-frame
raw_image = pipe.stdout.read(640*480*3)
# transform the byte read into a numpy array
image = numpy.frombuffer(raw_image, dtype='uint8')
print(image)
image = image.reshape((480,640,3))
if image is not None:
cv2.imshow('Video', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pipe.stdout.flush()
sys.stdout=old_stdout
log_file.close()
cv2.destroyAllWindows()
Please do provide me some pointers to fix this issue. Help is greatly appreciated.
I struggled longer with the console application FFmpeg, and finally gave up.
It's easier with this extension:
pip install ffmpeg-python
Karl Kroening has published here a very good integration of FFmpeg into Python.
With these examples a solution should be possible:
https://github.com/kkroening/ffmpeg-python
After you call the sp.Popen you have communicate with it.You can use the following code:
try:
pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)`
ffmpeg_output, _ = pipe.communicate()
except sp.CalledProcessError as err:
print("FFmpeg stdout output on error:\n" + err.output)
Finally, you can print the output to make sure the above commands worked:
print(ffmpeg_output)
The above statement is going to display the output returned by the communication with the process.
This works for me
import subprocess as sp
import json
import os
import numpy
import PIL
from imutils.video import FPS
import cv2
def video_frames_ffmpeg():
width = 640
height = 360
iterator = 0
cmd = ['ffmpeg', '-loglevel', 'quiet',
'-f', 'dshow',
'-i', 'video=HD USB Camera',
#'-vf','scale=%d:%d,smartblur'%(width,height),
'-preset' ,'ultrafast', '-tune', 'zerolatency',
'-f', 'rawvideo',
'-pix_fmt','bgr24',
'-']
p = sp.Popen(cmd, stdout=sp.PIPE)
while True:
arr = numpy.frombuffer(p.stdout.read(width*height*3), dtype=numpy.uint8)
iterator += 1
if len(arr) == 0:
p.wait()
print("awaiting")
#return
if iterator >= 1000:
break
frame = arr.reshape((height, width,3))
cv2.putText(frame, "frame{}".format(iterator), (75, 70),
cv2.cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2)
im = Image.fromarray(frame)
im.save("ffmpeg_test/test%d.jpeg" % iterator)
yield arr
from PIL import Image
from imutils.video import FPS
for i, frame in enumerate(video_frames_ffmpeg()):
if i == 0:
fps = FPS().start()
else: fps.update()
fps.stop()
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
Try:
command = [ FFMPEG_BIN,
'-rtbufsize', '100M',
'-i', '/dev/video0' , #change to here to your camera device
'-video_size', '640x480',
'-pix_fmt', 'bgr24',# '-r','25',
'-f', 'image2pipe', #'-'
'-vcodec', 'rawvideo', '-an', '-'
]
I don't know how '-vcodec', 'rawvideo', '-an', '-' this helps,
and without it my code doesn't work.
There is a similar question here:
Getting 'av_interleaved_write_frame(): Broken pipe' error
But what should I do if I want to write the data?
I put pipe_out.stdin.write(image.tostring()) in the while loop, like this
FFMPEG_BIN = "/home/media/Downloads/ffmpeg"
import subprocess as sp
import sys
width = 360
height = 240
command_in = [ FFMPEG_BIN,
'-i', '/home/media/Videos/mytestvideo/zhou.avi',
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-']
pipe_in = sp.Popen(command_in, stdout = sp.PIPE, bufsize = 10**8)
command_out = [ FFMPEG_BIN,
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-s', '360x240', # size of one frame
'-pix_fmt', 'bgr24',
'-r', '28', # frames per second
'-i', '-', # The imput comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
#'-vcodec', 'mpeg',
'my_output_videofile.mp4' ]
pipe_out = sp.Popen( command_out, stdin=sp.PIPE, stderr=sp.PIPE)
import numpy
import cv2
import pylab
# read width*height*3 bytes (= 1 frame)
while True:
raw_image = pipe_in.stdout.read(width*height*3)
image = numpy.fromstring(raw_image, dtype='uint8')
image = image.reshape((height,width,3))
pipe_in.communicate()
pipe_out.stdin.write(image.tostring())
pipe_out.communicate()
pipe_in.stdout.flush()
#cv2.imshow('image',image)
#cv2.waitKey(0)
# throw away the data in the pipe's buffer.
'''
pipe_in.stdin.close()
pipe_in.stderr.close()
pipe_in.wait()
pipe_out.stdout.close()
pipe_out.stderr.close()
pipe_out.wait()
'''
#pipe_out.stdin.write(image.tostring())
However, the output video has only 1 frame(the first frame of input video)
Any ideas?
Thanks!
#Pureheart, try something like this:
import numpy
import cv2
import pylab
# read width*height*3 bytes (= 1 frame)
while True:
t_end = time.time() + 15
while time.time() < t_end:
raw_image = pipe_in.stdout.read(width*height*3)
image = numpy.fromstring(raw_image, dtype='uint8')
image = image.reshape((height,width,3))
pipe_in.communicate()
pipe_out.stdin.write(image.tostring())
pipe_out.communicate()
pipe_in.stdout.flush()
proc.stdin.close()
How can I pipe openCV images to ffmpeg (running ffmpeg as a subprocess)?
(I am using spyder/anaconda)
I am reading frames from a video file and do some processing on each frame.
import cv2
cap = cv2.VideoCapture(self.avi_path)
img = cap.read()
gray = cv2.cvtColor(img[1], cv2.COLOR_BGR2GRAY)
bgDiv=gray/vidMed #background division
then, to pipe the processed frame to ffmpeg, I found this command in a related question:
sys.stdout.write( bgDiv.tostring() )
next, I am trying to run ffmpeg as a subprocess:
cmd='ffmpeg.exe -f rawvideo -pix_fmt gray -s 2048x2048 -r 30 -i - -an -f avi -r 30 foo.avi'
sp.call(cmd,shell=True)
(this also from the mentioned post)
However, this fills my IPython console with cryptic hieroglyphs and then crashes it. any advice?
ultimately, I would like to pipe out 4 streams and have ffmpeg encode those 4 streams in parallel.
I had similar problem once. I opened an issue on Github, turns out it may be a platform issue.
Related to your question, you can as well pipe OpenCV images to FFMPEG. Here's a sample code:
# This script copies the video frame by frame
import cv2
import subprocess as sp
input_file = 'input_file_name.mp4'
output_file = 'output_file_name.mp4'
cap = cv2.VideoCapture(input_file)
ret, frame = cap.read()
height, width, ch = frame.shape
ffmpeg = 'FFMPEG'
dimension = '{}x{}'.format(width, height)
f_format = 'bgr24' # remember OpenCV uses bgr format
fps = str(cap.get(cv2.CAP_PROP_FPS))
command = [ffmpeg,
'-y',
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-s', dimension,
'-pix_fmt', 'bgr24',
'-r', fps,
'-i', '-',
'-an',
'-vcodec', 'mpeg4',
'-b:v', '5000k',
output_file ]
proc = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)
while True:
ret, frame = cap.read()
if not ret:
break
proc.stdin.write(frame.tostring())
cap.release()
proc.stdin.close()
proc.stderr.close()
proc.wait()
I'm Kind of late, But my powerful VidGear Python Library automates the process of pipelining OpenCV frames into FFmpeg on any platform with its WriteGear API's Compression Mode. OP, You can implement your answer as follows:
# import libraries
from vidgear.gears import WriteGear
import cv2
output_params = {"-s":"2048x2048", "-r":30} #define FFmpeg tweak parameters for writer
stream = cv2.VideoCapture(0) #Open live webcam video stream on first index(i.e. 0) device
writer = WriteGear(output_filename = 'Output.mp4', compression_mode = True, logging = True, **output_params) #Define writer with output filename 'Output.mp4'
# infinite loop
while True:
(grabbed, frame) = stream.read()
# read frames
# check if frame empty
if not is grabbed:
#if True break the infinite loop
break
# {do something with frame here}
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# write a modified frame to writer
writer.write(gray)
# Show output window
cv2.imshow("Output Frame", frame)
key = cv2.waitKey(1) & 0xFF
# check for 'q' key-press
if key == ord("q"):
#if 'q' key-pressed break out
break
cv2.destroyAllWindows()
# close output window
stream.release()
# safely close video stream
writer.close()
# safely close writer
Source: https://abhitronix.github.io/vidgear/latest/gears/writegear/compression/usage/#using-compression-mode-with-opencv
You can check out VidGear Docs for more advanced applications and features.
Hope that helps!
You can use this pkg. ffmpegcv has Reader and Writer in ffmpeg backbone, similar to cv2.
#!pip install ffmpegcv
import ffmpegcv
vfile_in = 'A.mp4'
vfile_out = 'A_h264.mp4'
vidin = ffmpegcv.VideoCapture(vfile_in)
w, h = vidin.width, vidin.height
vidout = ffmpegcv.VideoWriter(vfile_out, 'h264_nvenc', vidin.fps, (w, h))
for frame in vidin:
vidout.write(frame)
vidin.release()
vidout.release()