There is a similar question here:
Getting 'av_interleaved_write_frame(): Broken pipe' error
But what should I do if I want to write the data?
I put pipe_out.stdin.write(image.tostring()) in the while loop, like this
FFMPEG_BIN = "/home/media/Downloads/ffmpeg"
import subprocess as sp
import sys
width = 360
height = 240
command_in = [ FFMPEG_BIN,
'-i', '/home/media/Videos/mytestvideo/zhou.avi',
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-']
pipe_in = sp.Popen(command_in, stdout = sp.PIPE, bufsize = 10**8)
command_out = [ FFMPEG_BIN,
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-s', '360x240', # size of one frame
'-pix_fmt', 'bgr24',
'-r', '28', # frames per second
'-i', '-', # The imput comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
#'-vcodec', 'mpeg',
'my_output_videofile.mp4' ]
pipe_out = sp.Popen( command_out, stdin=sp.PIPE, stderr=sp.PIPE)
import numpy
import cv2
import pylab
# read width*height*3 bytes (= 1 frame)
while True:
raw_image = pipe_in.stdout.read(width*height*3)
image = numpy.fromstring(raw_image, dtype='uint8')
image = image.reshape((height,width,3))
pipe_in.communicate()
pipe_out.stdin.write(image.tostring())
pipe_out.communicate()
pipe_in.stdout.flush()
#cv2.imshow('image',image)
#cv2.waitKey(0)
# throw away the data in the pipe's buffer.
'''
pipe_in.stdin.close()
pipe_in.stderr.close()
pipe_in.wait()
pipe_out.stdout.close()
pipe_out.stderr.close()
pipe_out.wait()
'''
#pipe_out.stdin.write(image.tostring())
However, the output video has only 1 frame(the first frame of input video)
Any ideas?
Thanks!
#Pureheart, try something like this:
import numpy
import cv2
import pylab
# read width*height*3 bytes (= 1 frame)
while True:
t_end = time.time() + 15
while time.time() < t_end:
raw_image = pipe_in.stdout.read(width*height*3)
image = numpy.fromstring(raw_image, dtype='uint8')
image = image.reshape((height,width,3))
pipe_in.communicate()
pipe_out.stdin.write(image.tostring())
pipe_out.communicate()
pipe_in.stdout.flush()
proc.stdin.close()
Related
I need to run multiple streams from python open cv to rtmp/rtsp with FFMPG. Now I can see two streams are going via ffmpeg (in console the information are mixed). In destinations the first stream is empty while second stream plays correctly (first stream metedata is reaching to destination).
Multiple stream semaratly in destination.
## st.txt = 'rtsp://ip:port/source/1' & 'rtsp://ip:port/source/2'
from multiprocessing.pool import ThreadPool
import cv2
import random
import subprocess
def f(x):
name = (x.split('/'))[-1]
y=30
if len(x)==1:
x = int(x)
cam = cv2.VideoCapture(x)
width = cam.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cam.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cam.get(cv2.CAP_PROP_FPS)
def streamer():
command = ['ffmpeg',
'-r', str(fps ),
'-y',
'-f', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(int(width),int(height)),
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'fast',
'-bufsize','7000k',
'-flvflags', 'no_duration_filesize',
'-g','180',
'-f', 'flv',
'rtmp://ip:port/live/'+ str(name)]
return command
p_stream = subprocess.Popen(streamer() , stdin=subprocess.PIPE)
while cam.isOpened():
ret, frame = cam.read()
if not ret:
break
cv2.imshow(str(name),frame)
p_stream.stdin.write(frame.tobytes())
key = cv2.waitKey(1)
if key == ('w'):
cam.release()
cv2.destroyAllWindows()
break
def ls():
with open(r'st.txt') as f:
lns = [line.rstrip('\n') for line in f]
return lns
if __name__ == '__main__':
pool = ThreadPool()
results = pool.map(f, ls())
Tried this code in better hardware and it works for multiple stream at the same time.
Firstly I have an endless cam stream which includes audio and video.
How to get divided .wav files according to time intervals from RTSP streaming while streaming.
I have tried the code below but I couldn't get the audio data before the stream ended
command = ['ffmpeg.exe',
'-i', 'rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4',
'-f', 's16le',
'-acodec', 'libmp3lame',
'-ar', '44100',
'-ac', '2',
'-']
pipe = sp.Popen(command, stdout=sp.PIPE)
raw_audio = self.pipe.stdout.read()
print(raw_audio)
Try -f segment output container. Something like:
command = ['ffmpeg.exe',
"-i", r"rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4",
'-vn', '-acodec', 'pcm_s16le', '-ar', '44100', '-ac', '2',
"-f", "segment", '-segment_time','3','out%03d.wav']
)
Now, if what you really need are the raw samples and not necessarily .wav files, you need to fix your command by removing the '-acodec', 'libmp3lame' option and specify the number of samples to read:
# how to read a block of audio data from stdout
n = 44100 * 3 # # of samples (sampling rate * duration)
nbytes = n * 2 * 2 # (#samples * #ch * 2 bytes/sample)
while True:
raw_audio = np.frombuffer(self.pipe.stdout.read(nread),shape=(n,2), dtype=np.int16)
... # do your thing
I want to receive byte-stream by using gstreamer with python subprocess module.
Now I can successfully use ffmpeg to pull the byte-stream. As shown below.
import cv2
import subprocess as sp
height = 714
width = 420
rtsp_url = 'rtsp://127.0.0.1:8554/video'
# command
command = ['ffmpeg',
'-i', rtsp_url,
'-f', 'rawvideo',
'-s',str(width)+'*'+str(height),
'-pix_fmt', 'bgr24',
'-fflags', 'nobuffer',
'-']
p = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
while True:
raw_image = p.stdout.read(width*height*3)
image = np.fromstring(raw_image, dtype='uint8')
image = image.reshape((height,width,3)).copy()
cv2.imshow('image', image)
key = cv2.waitKey(20)
I want to use gstreamer command instead of ffmpeg. So far, I have realized writing byte-stream to a file by using gstreamer command line.
gst-launch-1.0 rtspsrc location=rtsp://127.0.0.1:8554/video latency=0 drop-on-latency=true ! rtph264depay ! video/x-h264, stream-format='byte-stream' ! filesink location=/home/name/stdout
But it can't output byte-stream to pipe, so the terminal dosen't display byte-stream, not like ffmpeg command. How to change this command to output byte-stream through pipe so I can read from pipe.
Thank you for taking the time to answer for me!
This is RTSP streaming code.
import cv2
import time
import subprocess as sp
import numpy as np
rtsp_url = 'rtsp://127.0.0.1:8554/video'
video_path = r'test.mp4'
cap = cv2.VideoCapture(video_path)
# Get video information
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('fps={}'.format(fps))
# command
command = ['ffmpeg',
'-re',
'-y',
'-stream_loop', '-1',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(width, height),
'-r', str(fps),
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
# '-flags2', 'local_header',
'-bsf:v', "'dump_extra=freq=k'",
'-keyint_min', '60',
'-g', '60',
'-sc_threshold', '0',
'-f', 'rtsp',
'-rtsp_transport', 'tcp',
'-muxdelay', '0.1',
rtsp_url]
p = sp.Popen(command, stdin=sp.PIPE)
cnt = 0
t_start = time.time()
while (cap.isOpened()):
t_cur = time.time()-t_start
ret, frame = cap.read()
if not ret:
cnt += 1
print("count: {}".format(cnt))
cap = cv2.VideoCapture(video_path)
continue
p.stdin.write(frame.tobytes())
cv2.imshow('real_time', frame)
key = cv2.waitKey(20)
if key == 27:
p.terminate()
break
I have managed to create an example that works in Linux.
I was not able to simulate an RTSP camera, so I used MP4 file as input.
Creating the MP4 input file using FFmpeg CLI within Python (for testing):
sp.run(shlex.split(f'ffmpeg -y -f lavfi -i testsrc=size={width}x{height}:rate=25:duration=100 -vcodec libx264 -pix_fmt yuv420p {input_file_name}'))
The GStreamer command is:
p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet filesrc location={input_file_name} ! qtdemux ! video/x-h264 ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! filesink location={stdout_file_name}'), stdout=sp.PIPE)
--quiet is used because GStreamer prints messages to stdout.
filesrc location... is used for reading the MP4 input - replace it with RTSP pipeline.
videoconvert ! capsfilter caps="video/x-raw, format=BGR" converts the video format to raw BGR.
filesink location=/dev/stdout redirects the output to stdout (in Linux).
Code sample:
import cv2
import numpy as np
import subprocess as sp
import shlex
from sys import platform
width = 714
height = 420
input_file_name = 'input.mp4' # For testing, use MP4 input file instead of RTSP input.
# Build MP4 synthetic input video file for testing:
sp.run(shlex.split(f'ffmpeg -y -f lavfi -i testsrc=size={width}x{height}:rate=25:duration=100 -vcodec libx264 -pix_fmt yuv420p {input_file_name}'))
if platform == "win32":
# stdout_file_name = "con:"
# gstreamer_exe = 'c:/gstreamer/1.0/msvc_x86_64/bin/gst-launch-1.0.exe'
raise Exception('win32 system is not supported')
else:
stdout_file_name = "/dev/stdout"
gstreamer_exe = 'gst-launch-1.0'
# https://stackoverflow.com/questions/29794053/streaming-mp4-video-file-on-gstreamer
p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet filesrc location={input_file_name} ! qtdemux ! video/x-h264 ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! filesink location={stdout_file_name}'), stdout=sp.PIPE)
while True:
raw_image = p.stdout.read(width * height * 3)
if len(raw_image) < width*height*3:
break
image = np.frombuffer(raw_image, dtype='uint8').reshape((height, width, 3))
cv2.imshow('image', image)
key = cv2.waitKey(1)
p.stdout.close()
p.wait()
cv2.destroyAllWindows()
Update:
Based on your new question, I managed to create RTSP capturing example:
import cv2
import numpy as np
import subprocess as sp
import shlex
width = 240
height = 160
rtsp_url = 'rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4' # For testing, use public RTSP input.
gstreamer_exe = 'gst-launch-1.0' # '/usr/bin/gst-launch-1.0'
# https://stackoverflow.com/questions/29794053/streaming-mp4-video-file-on-gstreamer
p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet rtspsrc location={rtsp_url} ! queue2 ! rtph264depay ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! fdsink'), stdout=sp.PIPE)
while True:
raw_image = p.stdout.read(width * height * 3)
if len(raw_image) < width*height*3:
break
image = np.frombuffer(raw_image, np.uint8).reshape((height, width, 3))
cv2.imshow('image', image)
key = cv2.waitKey(1)
p.stdout.close()
p.wait()
cv2.destroyAllWindows()
In order to try an embedded AI, I want to stream an image dataset through a rtsp stream.
What I tried to do is to read one image every X seconds and send it to the stream and infere my AI on it. I tried to use this github repo :https://gist.github.com/takidog/2c981c34d5d5b41c0d712f8ef4ac60d3#file-main-py
This is what I tried so far :
import cv2
import time
import subprocess as sp
import glob, os
__PATH = "./DATASET"
os.chdir(__PATH)
IMG_LIST = glob.glob("*.jpg")
IMG_LIST_LEN = len(IMG_LIST)
IMG_INDEX = 0
IMG_DELAY = 2
IMG_WIDTH = 1280
IMG_HEIGHT = 720
IMG_SIZE = str(IMG_WIDTH)+"x"+str(IMG_HEIGHT)
FPS = 5
RTSP_SERVER = "rtsp://localhost:31415/stream"
COMMAND = ['ffmpeg',
'-re',
'-s', IMG_SIZE,
'-r', str(FPS),
'-i', '-',
'-bufsize', '64M',
'-maxrate', "4M",
'-rtsp_transport', 'tcp',
'-muxdelay','0.1',
RTSP_SERVER]
process = sp.Popen(COMMAND,stdin=sp.PIPE)
while(True):
CURRENT_IMG = cv2.imread(IMG_LIST[IMG_INDEX])
IMG_INDEX = (IMG_INDEX+1)%IMG_LIST_LEN
while(CURRENT_IMG.shape[0]!=720): #We dump images with a bad format
CURRENT_IMG = cv2.imread(IMG_LIST[IMG_INDEX])
IMG_INDEX = (IMG_INDEX+1)%IMG_LIST_LEN
_,FRAME = cv2.imencode('.png', CURRENT_IMG)
process.stdin.write(FRAME.tobytes())
time.sleep(1/FPS)
Surprise surprise this does not work and gives me this error :
Input #0, png_pipe, from 'pipe:':
Duration: N/A, bitrate: N/A
Stream #0:0: Video: png, rgb24(pc), 1280x720, 25 fps, 25 tbr, 25 tbn, 25 tbc
[NULL # 0x55ba3fe1b860] Unable to find a suitable output format for 'rtsp://localhost:31415/stream'
rtsp://localhost:31415/stream: Invalid argument
Traceback (most recent call last):
File "main.py", line 47, in <module>
process.stdin.write(FRAME.tobytes())
BrokenPipeError: [Errno 32] Broken pipe
Here is a reproducible sample - hoping you can copy paste and execute, but nothing is promised...
The example applies the following stages:
Create 10 synthetic JPEG images in ./test_dataset folder, to be used as input.
Execute FFplay sub-process as RTSP listener.
When using TCP protocol we should start the TCP server first (FFplay is used as a TCP server in out case).
We also need the receiver process, because without it, FFmpeg streamer process halts after the first frame.
Execute FFmpeg sub-process for RTSP streaming.
Cyclically read JPEG image to NumPy array (in BGR color format), and write the array as raw video frame to stdin pipe.
Note: It is more efficient to write raw video frames, than encoding each frame to PNG (as used by your reference sample).
Here is the code:
import cv2
#import time
import subprocess as sp
import glob
import os
img_width = 1280
img_height = 720
test_path = './test_dataset' # Folder with synthetic sample images.
os.makedirs(test_path, exist_ok=True) # Create folder for input images.
os.chdir(test_path)
ffmpeg_cmd = 'ffmpeg' # May use full path like: 'c:\\FFmpeg\\bin\\ffmpeg.exe'
ffplay_cmd = 'ffplay' # May use full path like: 'c:\\FFmpeg\\bin\\ffplay.exe'
# Create 10 synthetic JPEG images for testing (image0001.jpg, image0002.jpg, ..., image0010.jpg).
sp.run([ffmpeg_cmd, '-y', '-f', 'lavfi', '-i', f'testsrc=size={img_width}x{img_height}:rate=1:duration=10', 'image%04d.jpg'])
img_list = glob.glob("*.jpg")
img_list_len = len(img_list)
img_index = 0
fps = 5
rtsp_server = 'rtsp://localhost:31415/live.stream'
# You will need to start the server up first, before the sending client (when using TCP). See: https://trac.ffmpeg.org/wiki/StreamingGuide#Pointtopointstreaming
ffplay_process = sp.Popen([ffplay_cmd, '-rtsp_flags', 'listen', rtsp_server]) # Use FFplay sub-process for receiving the RTSP video.
command = [ffmpeg_cmd,
'-re',
'-f', 'rawvideo', # Apply raw video as input - it's more efficient than encoding each frame to PNG
'-s', f'{img_width}x{img_height}',
'-pixel_format', 'bgr24',
'-r', f'{fps}',
'-i', '-',
'-pix_fmt', 'yuv420p',
'-c:v', 'libx264',
'-bufsize', '64M',
'-maxrate', '4M',
'-rtsp_transport', 'tcp',
'-f', 'rtsp',
#'-muxdelay', '0.1',
rtsp_server]
process = sp.Popen(command, stdin=sp.PIPE) # Execute FFmpeg sub-process for RTSP streaming
while True:
current_img = cv2.imread(img_list[img_index]) # Read a JPEG image to NumPy array (in BGR color format) - assume the resolution is correct.
img_index = (img_index+1) % img_list_len # Cyclically repeat images
process.stdin.write(current_img.tobytes()) # Write raw frame to stdin pipe.
cv2.imshow('current_img', current_img) # Show image for testing
# time.sleep(1/FPS)
key = cv2.waitKey(int(round(1000/fps))) # We need to call cv2.waitKey after cv2.imshow
if key == 27: # Press Esc for exit
break
process.stdin.close() # Close stdin pipe
process.wait() # Wait for FFmpeg sub-process to finish
ffplay_process.kill() # Forcefully close FFplay sub-process
cv2.destroyAllWindows() # Close OpenCV window
I am trying to pipe output from FFmpeg in Python. I am reading images from a video grabber card and I am successful in reading this to an output file from the command line using dshow. I am trying to grab the images from the card to my OpenCv code to be able to further play with the data. Unfortunately, when I pipe out the images, I just get a display of the video as shown in the link:
link: s000.tinyupload.com/?file_id=15940665795196022618.
The code I used is as shown below:
import cv2
import subprocess as sp
import numpy
import sys
import os
old_stdout=sys.stdout
log_file=open("message.log","w")
sys.stdout=log_file
FFMPEG_BIN = "C:/ffmpeg/bin/ffmpeg.exe"
command = [ FFMPEG_BIN, '-y',
'-f', 'dshow', '-rtbufsize', '100M',
'-i', 'video=Datapath VisionAV Video 01' ,
'-video_size', '640x480',
'-pix_fmt', 'bgr24', '-r','25',
'-f', 'image2pipe', '-' ]
pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
while True:
# Capture frame-by-frame
raw_image = pipe.stdout.read(640*480*3)
# transform the byte read into a numpy array
image = numpy.frombuffer(raw_image, dtype='uint8')
print(image)
image = image.reshape((480,640,3))
if image is not None:
cv2.imshow('Video', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pipe.stdout.flush()
sys.stdout=old_stdout
log_file.close()
cv2.destroyAllWindows()
Please do provide me some pointers to fix this issue. Help is greatly appreciated.
I struggled longer with the console application FFmpeg, and finally gave up.
It's easier with this extension:
pip install ffmpeg-python
Karl Kroening has published here a very good integration of FFmpeg into Python.
With these examples a solution should be possible:
https://github.com/kkroening/ffmpeg-python
After you call the sp.Popen you have communicate with it.You can use the following code:
try:
pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)`
ffmpeg_output, _ = pipe.communicate()
except sp.CalledProcessError as err:
print("FFmpeg stdout output on error:\n" + err.output)
Finally, you can print the output to make sure the above commands worked:
print(ffmpeg_output)
The above statement is going to display the output returned by the communication with the process.
This works for me
import subprocess as sp
import json
import os
import numpy
import PIL
from imutils.video import FPS
import cv2
def video_frames_ffmpeg():
width = 640
height = 360
iterator = 0
cmd = ['ffmpeg', '-loglevel', 'quiet',
'-f', 'dshow',
'-i', 'video=HD USB Camera',
#'-vf','scale=%d:%d,smartblur'%(width,height),
'-preset' ,'ultrafast', '-tune', 'zerolatency',
'-f', 'rawvideo',
'-pix_fmt','bgr24',
'-']
p = sp.Popen(cmd, stdout=sp.PIPE)
while True:
arr = numpy.frombuffer(p.stdout.read(width*height*3), dtype=numpy.uint8)
iterator += 1
if len(arr) == 0:
p.wait()
print("awaiting")
#return
if iterator >= 1000:
break
frame = arr.reshape((height, width,3))
cv2.putText(frame, "frame{}".format(iterator), (75, 70),
cv2.cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2)
im = Image.fromarray(frame)
im.save("ffmpeg_test/test%d.jpeg" % iterator)
yield arr
from PIL import Image
from imutils.video import FPS
for i, frame in enumerate(video_frames_ffmpeg()):
if i == 0:
fps = FPS().start()
else: fps.update()
fps.stop()
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
Try:
command = [ FFMPEG_BIN,
'-rtbufsize', '100M',
'-i', '/dev/video0' , #change to here to your camera device
'-video_size', '640x480',
'-pix_fmt', 'bgr24',# '-r','25',
'-f', 'image2pipe', #'-'
'-vcodec', 'rawvideo', '-an', '-'
]
I don't know how '-vcodec', 'rawvideo', '-an', '-' this helps,
and without it my code doesn't work.