Seamless video loop in gstreamer - python

I'm trying to loop the video playback using gstreamer and it's python bindings. First attempt was to hook EOS message and generate seek message for the pipeline:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import time
if not Gst.init_check()[0]:
print("gstreamer initialization failed")
source0 = Gst.ElementFactory.make("filesrc", "source0")
assert source0 is not None
source0.set_property("location", "video0.mp4")
qtdemux0 = Gst.ElementFactory.make("qtdemux", "demux0")
assert qtdemux0 is not None
decoder0 = Gst.ElementFactory.make("nxvideodec", "video_decoder0")
assert decoder0 is not None
def demux0_pad_added(demux, pad):
if pad.name == 'video_0': # We expect exactly first one video stream
pad.link(decoder0.get_static_pad("sink"))
qtdemux0.connect("pad-added", demux0_pad_added)
video_sink = Gst.ElementFactory.make("nxvideosink", "video_sink")
assert video_sink is not None
pipeline0 = Gst.Pipeline()
assert pipeline0 is not None
pipeline0.add(source0)
pipeline0.add(qtdemux0)
pipeline0.add(decoder0)
pipeline0.add(video_sink)
source0.link(qtdemux0)
"""qtdemux0 -> decoder0 dynamic linking"""
decoder0.link(video_sink)
######################################################
def main():
message_bus = pipeline0.get_bus()
pipeline0.set_state(Gst.State.PLAYING)
while True:
if message_bus.have_pending(): # Working without glib mainloop
message = message_bus.pop()
if message.type == Gst.MessageType.EOS: # End-Of-Stream: loop the video, seek to beginning
pipeline0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.FLUSH,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
elif message.type == Gst.MessageType.ERROR:
print("ERROR", message)
break
time.sleep(0.01) # Tried 0.001 - same result
if __name__ == "__main__":
main()
And it actually works quite fine except one thing - seek to the beginning is not really seamless. I can see tiny glitch. Because the video is an infinite animation this tiny glitch actually become noticeable. My second attempt was to use queue for decoded frames and hook EOS event:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import time
if not Gst.init_check()[0]:
print("gstreamer initialization failed")
source0 = Gst.ElementFactory.make("filesrc", "source0")
assert source0 is not None
source0.set_property("location", "video0.mp4")
qtdemux0 = Gst.ElementFactory.make("qtdemux", "demux0")
assert qtdemux0 is not None
decoder0 = Gst.ElementFactory.make("nxvideodec", "video_decoder0")
assert decoder0 is not None
def demux0_pad_added(demux, pad):
if pad.name == 'video_0': # We expect exactly first one video stream
pad.link(decoder0.get_static_pad("sink"))
qtdemux0.connect("pad-added", demux0_pad_added)
queue = Gst.ElementFactory.make("queue", "queue")
assert queue is not None
video_sink = Gst.ElementFactory.make("nxvideosink", "video_sink")
assert video_sink is not None
pipeline0 = Gst.Pipeline()
assert pipeline0 is not None
pipeline0.add(source0)
pipeline0.add(qtdemux0)
pipeline0.add(decoder0)
pipeline0.add(queue)
pipeline0.add(video_sink)
source0.link(qtdemux0)
"""qtdemux0 -> decoder0 dynamic linking"""
decoder0.link(queue)
queue.link(video_sink)
######################################################
def cb_event(pad, info, *user_data):
event = info.get_event()
if event is not None and event.type == Gst.EventType.EOS:
decoder0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.FLUSH,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
return Gst.PadProbeReturn.DROP
return Gst.PadProbeReturn.PASS
def main():
dec0_src_pad = decoder0.get_static_pad("src")
dec0_src_pad.add_probe(Gst.PadProbeType.BLOCK | Gst.PadProbeType.EVENT_DOWNSTREAM, cb_event)
message_bus = pipeline0.get_bus()
pipeline0.set_state(Gst.State.PLAYING)
while True:
# do nothing
time.sleep(1)
if __name__ == "__main__":
main()
After the first EOS event the playback is just stalled. I've tried several different things like: pass the EOS event, drop EOS and add offset to the decoder's source pad, send seek event to the pipeline itself and others. But I can't get it work.
In an effort to understand I also tried to enable debug mode and write my own kinda logger of pipeline activity using pad probes. Debug mode was not very useful, the log is very bulky and missing some details. My own log includes upstream/downstream events and the buffers timing information. However, I still can not understand what is wrong and how to get it to work.
Obviously I not just missing something but do not understand some fundamental thing about how gstreamer pipeline works.
So, the question is: What should I do with the second version of code to get it work? Additional question: Are there some tools or techniques to get clear idea of what is happening inside the pipeline and its contained elements?
I will very appreciate detailed answers. It is more important for me to understand what I am doing wrong than to just bring the program to work.
p.s. Program is run under GNU/Linux on the NanoPi S2 board. Video is stored in the MP4 container (without audio) and compressed with h264. Please feel free to post code samples in any language, not necessarily Python.

Well, okay. I didn't get an answer so I continued research and finally found solution.
Below I'll show two different approaches. First - direct answer to the question with working code sample. Second - different approach, which seems to be more native for gstreamer and definitely is more simple. Both give desired result - seamless video loop.
Corrected code (the answer, but not the best approach)
Changes:
Added video duration query. Every loop we should increase time offset for a video duration value. It makes possible to emulate infinite contiguous stream.
The seek event emitting moved to a separate thread. According to this post, we can not emit seek event from the streaming thread. Also, have a look at this file (link from mentioned post).
Event callback now drops FLUSH events (contiguous stream should not have a FLUSH events).
Video decoder changed from nxvideodec to avdec_h264. This is not relevant to the initial question and is done for a very special reason.
Code:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import time
import threading
if not Gst.init_check()[0]:
print("gstreamer initialization failed")
source0 = Gst.ElementFactory.make("filesrc", "source0")
assert source0 is not None
source0.set_property("location", "video0.mp4")
qtdemux0 = Gst.ElementFactory.make("qtdemux", "demux0")
assert qtdemux0 is not None
decoder0 = Gst.ElementFactory.make("avdec_h264", "video_decoder0")
assert decoder0 is not None
def demux0_pad_added(demux, pad):
if pad.name == 'video_0': # We expect exactly first one video stream
pad.link(decoder0.get_static_pad("sink"))
qtdemux0.connect("pad-added", demux0_pad_added)
queue = Gst.ElementFactory.make("queue", "queue")
assert queue is not None
video_sink = Gst.ElementFactory.make("nxvideosink", "video_sink")
assert video_sink is not None
pipeline0 = Gst.Pipeline()
assert pipeline0 is not None
pipeline0.add(source0)
pipeline0.add(qtdemux0)
pipeline0.add(decoder0)
pipeline0.add(queue)
pipeline0.add(video_sink)
source0.link(qtdemux0)
"""qtdemux0 -> decoder0 dynamic linking"""
decoder0.link(queue)
queue.link(video_sink)
# UPD: Get video duration
pipeline0.set_state(Gst.State.PAUSED)
assert pipeline0.get_state(Gst.CLOCK_TIME_NONE).state == Gst.State.PAUSED
duration_ok, duration = pipeline0.query_duration(Gst.Format.TIME)
assert duration_ok
######################################################
seek_requested = threading.Event()
# UPD: Seek thread. Wait for seek request from callback and generate seek event
def seek_thread_func(queue_sink_pad):
cumulative_offset = 0
while True:
seek_requested.wait()
seek_requested.clear()
decoder0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.FLUSH,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
# Add offset. It is important step to ensure that downstream elements will 'see' infinite contiguous stream
cumulative_offset += duration
queue_sink_pad.set_offset(cumulative_offset)
def cb_event(pad, info):
event = info.get_event()
if event is not None:
if event.type == Gst.EventType.EOS: # UPD: Set 'seek_requested' flag
seek_requested.set()
return Gst.PadProbeReturn.DROP
elif event.type == Gst.EventType.FLUSH_START or event.type == Gst.EventType.FLUSH_STOP: # UPD: Drop FLUSH
return Gst.PadProbeReturn.DROP
return Gst.PadProbeReturn.OK
def main():
queue_sink_pad = queue.get_static_pad("sink")
# UPD: Create separate 'seek thread'
threading.Thread(target=seek_thread_func, daemon=True, args=(queue_sink_pad,)).start()
dec0_src_pad = decoder0.get_static_pad("src")
dec0_src_pad.add_probe(Gst.PadProbeType.EVENT_DOWNSTREAM | Gst.PadProbeType.EVENT_FLUSH,
cb_event)
pipeline0.set_state(Gst.State.PLAYING)
while True:
# do nothing
time.sleep(1)
if __name__ == "__main__":
main()
This code works. Seek is effectively performed while buffers from the queue still playing. However, I believe it can contain some flaws or even bugs. For example, SEGMENT events passed downstream with the RESET flag; it doesn't seems right. Much more clear (and probably more correct/reliable) way to implement this approach is to create a gstreamer plugin. Plugin will manage events and tune event's and buffer's timestamp.
But there is a more simple and native solution:
Using segment seek and SEGMENT_DONE message
According to the documentation:
Segment seeking (using the GST_SEEK_FLAG_SEGMENT) will not emit an EOS
at the end of the playback segment but will post a SEGMENT_DONE
message on the bus. This message is posted by the element driving the
playback in the pipeline, typically a demuxer. After receiving the
message, the application can reconnect the pipeline or issue other
seek events in the pipeline. Since the message is posted as early as
possible in the pipeline, the application has some time to issue a new
seek to make the transition seamless. Typically the allowed delay is
defined by the buffer sizes of the sinks as well as the size of any
queues in the pipeline.
Message SEGMENT_DONE indeed is posted earlier than the queue becomes empty. This gives more than enough time to perform next seek. So all we need to do is to issue segment seek in very beginning of the playback. Then wait for SEGMENT_DONE message and send next non-flushing seek event.
Here is working example:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import time
if not Gst.init_check()[0]:
print("gstreamer initialization failed")
source0 = Gst.ElementFactory.make("filesrc", "source0")
assert source0 is not None
source0.set_property("location", "video0.mp4")
qtdemux0 = Gst.ElementFactory.make("qtdemux", "demux0")
assert qtdemux0 is not None
decoder0 = Gst.ElementFactory.make("nxvideodec", "video_decoder0")
assert decoder0 is not None
def demux0_pad_added(demux, pad):
if pad.name == 'video_0': # We expect exactly first one video stream
pad.link(decoder0.get_static_pad("sink"))
qtdemux0.connect("pad-added", demux0_pad_added)
queue = Gst.ElementFactory.make("queue", "queue")
assert queue is not None
video_sink = Gst.ElementFactory.make("nxvideosink", "video_sink")
assert video_sink is not None
pipeline0 = Gst.Pipeline()
assert pipeline0 is not None
pipeline0.add(source0)
pipeline0.add(qtdemux0)
pipeline0.add(decoder0)
pipeline0.add(queue)
pipeline0.add(video_sink)
source0.link(qtdemux0)
"""qtdemux0 -> decoder0 dynamic linking"""
decoder0.link(queue)
queue.link(video_sink)
######################################################
def main():
message_bus = pipeline0.get_bus()
pipeline0.set_state(Gst.State.PLAYING)
pipeline0.get_state(Gst.CLOCK_TIME_NONE)
pipeline0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.SEGMENT,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
while True:
if message_bus.have_pending(): # Working without glib mainloop
message = message_bus.pop()
if message.type == Gst.MessageType.SEGMENT_DONE:
pipeline0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.SEGMENT,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
elif message.type == Gst.MessageType.ERROR:
print("bus ERROR", message)
break
time.sleep(0.01)
if __name__ == "__main__":
main()
With default queue configuration the SEGMENT_DONE message is posted approximately 1 second earlier than last video frame is played. Non-flushing seek ensures that none of the frames will be lost. Together this gives perfect result - truly seamless video loop.
Note: I switch pipeline to the PLAYING state and then perform initial non-flushing seek. Alternatively we can switch pipeline to the PAUSED state, perform flushing segment seek and then switch pipeline to the PLAYING state.
Note 2: Different sources suggests slightly different solution. See link below.
Related topics and sources:
http://gstreamer-devel.966125.n4.nabble.com/Flushing-the-data-in-partial-pipeline-tp4681893p4681899.html
https://cgit.freedesktop.org/gstreamer/gst-editing-services/tree/plugins/nle/nlesource.c
http://gstreamer-devel.966125.n4.nabble.com/Loop-a-file-using-playbin-without-artefacts-td4671952.html
http://gstreamer-devel.966125.n4.nabble.com/attachment/4671975/0/gstseamlessloop.py
https://github.com/GStreamer/gst-plugins-good/blob/master/tests/icles/test-segment-seeks.c

I'm using the SEGMENT_DONE method:
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
Gst.init(None)
pipeline = Gst.parse_launch('uridecodebin uri=file://%s name=d d. ! autovideosink d. ! autoaudiosink' %sys.argv[1])
bus = pipeline.get_bus()
bus.add_signal_watch()
def on_segment_done(bus, msg):
pipeline.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.SEGMENT,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
return True
bus.connect('message::segment-done', on_segment_done)
pipeline.set_state(Gst.State.PLAYING)
pipeline.get_state(Gst.CLOCK_TIME_NONE)
pipeline.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.SEGMENT,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
GLib.MainLoop().run()

I recommend to look at the gst-play-1.0 application as well as the playbin element of GStreamer.
See here: https://github.com/GStreamer/gst-plugins-base/blob/master/tools/gst-play.c
This one supports a --gapless option to play many files without gaps. It makes use of the about-to-finish signal of the playbin element.
This particular application does that with multiple files instead of the same, but I guess you can try giving the same file multiple times for a test if its really seamless or exhibits the same issue with your approach 1).
Basically I think that EOS is just a bit too late to get the first frame ready again in time due to deinit/init/processing of the decoder and flushing the pipeline. Also the flushing will kind of reset your stream, the pipeline goes into prerolling again and syncs to a new clock. It really is not a continuous stream from the inside.
Alternatively perhaps GStreamer Editing Services can do this too. But this probably works with multiple tracks which means it may try to instantiate multiple decoder instances at the same time to do parallel processing - which may be an issue on your board.
Last resort could be to demux the MP4 to a raw bistream, loop this bitstream continuous into a socket and decode from that. Then it will appear as an infinite bitstream that is being played back.
Edit:
Perhaps it is also worth a shot to try out multifilesrc with its loop property to see if that one operates gapless or has to perform a flush between files as well.

Related

Memory Error Exception using PIL to Process Video Stream on Raspberry Pi

I have written a Python script that runs on a Raspberry Pi and utilizes the PiCamera library to capture video, the Python Image Library (PIL) to extract individual frames, and then does some image processing on it using DIPLib and OpenCV. The goal of the program is to continuously capture frames of 3D printer filament and return the diameter value. The actual image processing portion works just fine- it's the frame capture that is causing me issues.
I am following the PiCamera Rapid Capture and processing tutorial from PiCamera and using the Python Threading library as they have done to ideally utilize more of th Pi's processor for processing and not getting bottlenecked and falling behind.
The implementation of this code is built to "Drop Frames" when there are not any threads available for processing. As I understand it, this should prevent the Pi from storing any extra frames in the buffer for processing, thus preventing a memory overflow (not sure if that's the correct terminology) from happening. Unfortunately this is exactly what is happening.
I am running the PiCamera at about 3 frames-per-second which gives ~10 threads the ability to process all the incoming images, that is until the memory starts to overflow. However, if I leave the script running for 5-10 minutes, the memory (as shown using htop) slowly compounds until it reaches maximum capacity- at which point the script basically drops all incoming frames.
UPDATE: here is the error it shows:
Exception has occurred: MemoryError
exception: no description
File "/home/pi/Desktop/FilamentPuller_01/pi_camera_threading_03.py", line 45, in run
img = np.array(Image.open(self.stream)
My theory is that the video recording functionality of PiCamera is holding a buffer of some sort, but I am not sure how to see it or how to stop it from doing that. I've been using VSCode on the Pi to debug, and each thread doesn't seem to holding any more data at a time than they should- essentially there should be no reason for them to compound more data from one cycle to the next as all the variables are reused.
I have included my code below, please let me know what other information I can provide to help with solving this issue. Thank you for any insight you might have
import io
import sys
import time
import threading
import cv2
import numpy as np
import os
import picamera
from PIL import Image
import csv
from diameter_detection import diameter_detection
from moving_average import MovingAverageFilter
from serial_write import serial_write
##### CAMERA SETTINGS ######
focalValue = 40 # focus
cameraResolution = (1080, 1000)
cameraZoom = (0.3,0,0.3,0.8)
cameraFrameRate = 1
# create an array for storing filtered diameter values
filtered_dia_meas = []
# create moving average filter
ma5_filter = MovingAverageFilter(2)
class ImageProcessor(threading.Thread):
def __init__(self, owner):
super(ImageProcessor, self).__init__()
self.stream = io.BytesIO()
self.event = threading.Event()
self.terminated = False
self.owner = owner
self.start()
def run(self):
# This method runs in a separate thread
while not self.terminated:
# Wait for an image to be written to the stream
if self.event.wait(1):
try:
self.stream.seek(0)
# Read the image and do some processing on it
img = np.array(Image.open(self.stream))
try:
diameter = diameter_detection(img)
except:
serial_write(0)
print('Could not read diameter, pausing and retrying...')
time.sleep(0.1)
# add the diameter to the filter
ma5_filter.step(diameter)
#filtered_dia_meas.append(ma5_filter.current_state())
# display the current filtered diameter to the Terminal
print(ma5_filter.current_state())
try:
# attempt to send the diameter to the connected serial device
serial_write(ma5_filter.current_state())
except:
print('Serial write failed!')
# Set done to True if you want the script to terminate
# at some point
#self.owner.done=True
finally:
# Reset the stream and event
self.stream.seek(0)
self.stream.truncate()
self.event.clear()
# Return ourselves to the available pool
with self.owner.lock:
self.owner.pool.append(self)
class ProcessOutput(object):
def __init__(self):
self.done = False
# Construct a pool of 10 image processors along with a lock
# to control access between threads
self.lock = threading.Lock()
self.pool = [ImageProcessor(self) for i in range(10)]
print('Threaded processes created')
self.processor = None
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame; set the current processor going and grab
# a spare one
if self.processor:
self.processor.event.set()
with self.lock:
if self.pool:
self.processor = self.pool.pop()
else:
# No processor's available, we'll have to skip this frame
print('Frame Skipped!')
self.processor = None
if self.processor:
self.processor.stream.write(buf)
def flush(self):
# When told to flush (end of recording), shut
# down in an orderly fashion. First, add the current processor
# back to the pool
if self.processor:
with self.lock:
self.pool.append(self.processor)
self.processor = None
# Now, empty the pool, joining each thread as we go
while True:
with self.lock:
try:
proc = self.pool.pop()
except IndexError:
pass # pool is empty
proc.terminated = True
proc.join()
with picamera.PiCamera(resolution=cameraResolution) as camera:
print('Succesfully created camera object')
camera.framerate = cameraFrameRate
camera.zoom = (0.3,0,0.3,0.8)
# set focus motor
os.system("i2cset -y 0 0x0c %d %d" % (focalValue,0))
print('Camera focus set')
time.sleep(2)
print('Starting recording...')
output = ProcessOutput()
camera.start_recording(output, format='mjpeg')
while not output.done:
camera.wait_recording(1)
camera.stop_recording()

pyaudio stream.read() doesn't return data until stream stops

I'll be very detailed so there is no lack of information about what is happening
I have a class named Stream, that has the main function of reading data output from any selected audio device connected to the PC and return the data to another class (this part is not implemented in the code yet, but it's just for context). Here is the Stream class:
Stream.py
import pyaudio
import struct
from threading import Thread
from LEDController.BaseClass import BaseClass
from LEDController.Device import Device
class Stream(BaseClass):
def __init__(self, device: Device):
super().__init__(__name__)
self.__p = pyaudio.PyAudio()
self.stream = None
# Stream Settings
self.defaultFrames = 1024 * 8
self.format = pyaudio.paInt16
self.channels = device.channels
self.rate = device.defaultSampleRate
self.input = True
self.frames_per_buffer = self.defaultFrames
self.input_device_index = device.index
self.as_loopback = True
self.__open()
self.stop()
def __open(self):
try:
self.logger.info('Trying to Open Stream')
# Open Stream
self.stream = self.__p.open(
format=self.format,
channels=self.channels,
rate=self.rate,
input=self.input,
frames_per_buffer=self.defaultFrames,
input_device_index=self.input_device_index,
as_loopback=self.as_loopback,
# wasapi_fill_silence=True
)
except Exception as e:
self.logger.error(e)
else:
self.logger.info('Stream Opened')
def start(self):
try:
self.stream.start_stream()
self.read_thread = Thread(target=self.read)
self.read_thread.start()
self.logger.info("Stream Started")
return True
except Exception as e:
return self.logger.error(e)
def stop(self):
try:
self.stream.stop_stream()
self.logger.info("Stream Stopped")
return False
except Exception as e:
return self.logger.error(e)
def is_stopped(self):
return self.stream.is_stopped()
def read(self):
try:
while not self.is_stopped():
# Process Data
data = self.stream.read(self.defaultFrames) # This line doesn't return anything
data_int = struct.unpack(str(2 * self.defaultFrames) + 'h', data)
print(data_int)
# TODO return data
except Exception as e:
self.logger.error(e)
What it's doing:
At first, it receives a Device Object (code is not that important. However, I can edit and add it if needed).
The Device object has a few pieces of information about the current selected device (such as device default sample rate, channels, etc, as you can see in Stream.__init__(). So the Stream class uses those pieces of information to open a PyAudio Stream with PyAudio.open(...) and right after that, it stops the stream until the user (there is a GUI) opens it.
The Problem
The Stream.read() method it's called by a Thread, so it keeps running, processing, and returning the data in real-time. However when I call Stream.start() and it starts the read method, the same keeps stuck in the following line:
data = self.stream.read(self.defaultFrames)
I've added a print statement in this method to see the data flowing. However, this statement only gets called after stopping the stream, and no error is shown.
Output
2020-06-03 09:15:21,718 — LEDController.Stream.Stream - 57 — INFO — Stream Started
2020-06-03 09:15:24,371 — LEDController.Stream.Stream - 65 — INFO — Stream Stopped
(output of the print statement, it's quite long so I'll not put it here)
As you can see, no data is printed while the stream is open.
Looking at this output and after a few tests, I've figured out that self.stream.read(...) it's not returning any data, while the stream is opened and only returns when the stream stops.
After that conclusion, I've started looking for solutions and doing more tests, but I couldn't find anything, and only for the recorded, this problem was not happening before (it started like yesterday)
and I don't remember changing anything in my code.
Things that I've also tried, but haven't worked
Removing the Thread and calling Stream.read() normally
Reinstalling PyAudio
If you need any additional information, just tell me in the comments. I've tried to be as specific as possible and sorry if I didn't make my self clear enough. Just let me know and I'll update the question and fix the problem. Thanks in advance.

Python Queues memory leaks when called inside thread

I have python TCP client and need to send media(.mpg) file in a loop to a 'C' TCP server.
I have following code, where in separate thread I am reading the 10K blocks of file and sending it and doing it all over again in loop, I think it is because of my implementation of thread module, or tcp send. I am using Queues to print the logs on my GUI ( Tkinter ) but after some times it goes out of memory..
UPDATE 1 - Added more code as requested
Thread class "Sendmpgthread" used to create thread to send data
.
.
def __init__ ( self, otherparams,MainGUI):
.
.
self.MainGUI = MainGUI
self.lock = threading.Lock()
Thread.__init__(self)
#This is the one causing leak, this is called inside loop
def pushlog(self,msg):
self.MainGUI.queuelog.put(msg)
def send(self, mysocket, block):
size = len(block)
pos = 0;
while size > 0:
try:
curpos = mysocket.send(block[pos:])
except socket.timeout, msg:
if self.over:
self.pushlog(Exit Send)
return False
except socket.error, msg:
print 'Exception'
return False
pos = pos + curpos
size = size - curpos
return True
def run(self):
media_file = None
mysocket = None
try:
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysocket.connect((self.ip, string.atoi(self.port)))
media_file = open(self.file, 'rb')
while not self.over:
chunk = media_file.read(10000)
if not chunk: # EOF Reset it
print 'resetting stream'
media_file.seek(0, 0)
continue
if not self.send(mysocket, chunk): # If some error or thread is killed
break;
#disabling this solves the issue
self.pushlog('print how much data sent')
except socket.error, msg:
print 'print exception'
except Exception, msg:
print 'print exception'
try:
if media_file is not None:
media_file.close()
media_file = None
if mysocket is not None:
mysocket.close()
mysocket = None
finally:
print 'some cleaning'
def kill(self):
self.over = True
I figured out that it is because of wrong implementation of Queue as commenting that piece resolves the issue
UPDATE 2 - MainGUI class which is called from above Thread class
class MainGUI(Frame):
def __init__(self, other args):
#some code
.
.
#from the above thread class used to send data
self.send_mpg_status = Sendmpgthread(params)
self.send_mpg_status.start()
self.after(100, self.updatelog)
self.queuelog = Queue.Queue()
def updatelog(self):
try:
msg = self.queuelog.get_nowait()
while msg is not None:
self.printlog(msg)
msg = self.queuelog.get_nowait()
except Queue.Empty:
pass
if self.send_mpg_status: # only continue when sending
self.after(100, self.updatelog)
def printlog(self,msg):
#print in GUI
Since printlog is adding to a tkinter text control, the memory occupied by that control will grow with each message (it has to store all the log messages in order to display them).
Unless storing all the logs is critical, a common solution is to limit the maximum number of log lines displayed.
A naive implementation is to eliminate extra lines from the begining after the control reaches a maximum number of messages. Add a function to get the number of lines in the control and then, in printlog something similar to:
while getnumlines(self.edit) > self.maxloglines:
self.edit.delete('1.0', '1.end')
(above code not tested)
update: some general guidelines
Keep in mind that what might look like a memory leak does not always mean that a function is wrong, or that the memory is no longer accessible. Many times there is missing cleanup code for a container that is accumulating elements.
A basic general approach for this kind of problems:
form an opinion on what part of the code might be causing the problem
check it by commenting that code out (or keep commenting code until you find a candidate)
look for containers in the responsible code, add code to print their size
decide what elements can be safely removed from that container, and when to do it
test the result
I can't see anything obviously wrong with your code snippet.
To reduce memory usage a bit under Python 2.7, I'd use buffer(block, pos) instead of block[pos:]. Also I'd use mysocket.sendall(block) instead of your send method.
If the ideas above don't solve your problem, then the bug is most probably elsewhere in your code. Could you please post the shortest possible version of the full Python script which still grows out-of-memory (http://sscce.org/)? That increases your change of getting useful help.
Out of memory errors are indicative of data being generated but not consumed or released. Looking through your code I would guess these two areas:
Messages are being pushed onto a Queue.Queue() instance in the pushlog method. Are they being consumed?
The MainGui printlog method may be writing text somewhere. eg. Is it continually writing to some kind of GUI widget without any pruning of messages?
From the code you've posted, here's what I would try:
Put a print statement in updatelog. If this is not being continually called for some reason such as a failed after() call, then the queuelog will continue to grow without bound.
If updatelog is continually being called, then turn your focus to printlog. Comment the contents of this function to see if out of memory errors still occur. If they don't, then something in printlog may be holding on to the logged data, you'll need to dig deeper to find out what.
Apart from this, the code could be cleaned up a bit. self.queuelog is not created until after the thread is started which gives rise to a race condition where the thread may try to write into the queue before it has been created. Creation of queuelog should be moved to somewhere before the thread is started.
updatelog could also be refactored to remove redundancy:
def updatelog(self):
try:
while True:
msg = self.queuelog.get_nowait()
self.printlog(msg)
except Queue.Empty:
pass
And I assume the the kill function is called from the GUI thread. To avoid thread race conditions, the self.over should be a thread safe variable such as a threading.Event object.
def __init__(...):
self.over = threading.Event()
def kill(self):
self.over.set()
There is no data piling up in your TCP sending loop.
Memory error is probably caused by logging queue, as you have not posted complete code try using following class for logging:
from threading import Thread, Event, Lock
from time import sleep, time as now
class LogRecord(object):
__slots__ = ["txt", "params"]
def __init__(self, txt, params):
self.txt, self.params = txt, params
class AsyncLog(Thread):
DEBUGGING_EMULATE_SLOW_IO = True
def __init__(self, queue_max_size=15, queue_min_size=5):
Thread.__init__(self)
self.queue_max_size, self.queue_min_size = queue_max_size, queue_min_size
self._queuelock = Lock()
self._queue = [] # protected by _queuelock
self._discarded_count = 0 # protected by _queuelock
self._pushed_event = Event()
self.setDaemon(True)
self.start()
def log(self, message, **params):
with self._queuelock:
self._queue.append(LogRecord(message, params))
if len(self._queue) > self.queue_max_size:
# empty the queue:
self._discarded_count += len(self._queue) - self.queue_min_size
del self._queue[self.queue_min_size:] # empty the queue instead of creating new list (= [])
self._pushed_event.set()
def run(self):
while 1: # no reason for exit condition here
logs, discarded_count = None, 0
with self._queuelock:
if len(self._queue) > 0:
# select buffered messages for printing, releasing lock ASAP
logs = self._queue[:]
del self._queue[:]
self._pushed_event.clear()
discarded_count = self._discarded_count
self._discarded_count = 0
if not logs:
self._pushed_event.wait()
self._pushed_event.clear()
continue
else:
# print logs
if discarded_count:
print ".. {0} log records missing ..".format(discarded_count)
for log_record in logs:
self.write_line(log_record)
if self.DEBUGGING_EMULATE_SLOW_IO:
sleep(0.5)
def write_line(self, log_record):
print log_record.txt, " ".join(["{0}={1}".format(name, value) for name, value in log_record.params.items()])
if __name__ == "__main__":
class MainGUI:
def __init__(self):
self._async_log = AsyncLog()
self.log = self._async_log.log # stored as bound method
def do_this_test(self):
print "I am about to log 100 times per sec, while text output frequency is 2Hz (twice per second)"
def log_100_records_in_one_second(itteration_index):
for i in xrange(100):
self.log("something happened", timestamp=now(), session=3.1415, itteration=itteration_index)
sleep(0.01)
for iter_index in range(3):
log_100_records_in_one_second(iter_index)
test = MainGUI()
test.do_this_test()
I have noticed that you do not sleep() anywhere in the sending loop, this means data is read as fast as it can and is sent as fast as it can. Note that this is not desirable behavior when playing media files - container time-stamps are there to dictate data-rate.

Play Different Video Files Depending On Value of a Variable At Runtime

I have 4 video files (different scenes of a movie).
There's a starting scene that will be played when I run the player.
And before that scene ends, let's say the video player reads an int value (0-100) from external file (all happens at runtime), and depending on that int value, it has to determine which scene to play next.
pseudo example:
if (x > 0 && x < 30)
videoSource = scene2
else if (x >= 30 && x < 60)
videoSource = scene3
else if (x >= 60 && x <= 100)
videoSource = scene 4
How can I make it change video sources at runtime, depending on that variable?
I don't care about the format of the video file, (Avi, mp4...) whatever works will be fine.
I don't know how to approach this problem. I've searched for something that has the potential to accomplish this, like pyglet or GStreamer, but I didn't find a clear solution.
EDIT: I have the basic player and video player with pyglet, and I was able to play the video without depending on a variable using this code:
import pyglet
vidPath="sample.mpg"
window = pyglet.window.Window()
player = pyglet.media.Player()
source = pyglet.media.StreamingSource()
MediaLoad = pyglet.media.load(vidPath)
player.queue(MediaLoad)
player.play()
#window.event
def on_draw():
window.clear()
if player.source and player.source.video_format:
player.get_texture().blit(0,0)
pyglet.app.run()
How would I go about this? Guidance in the right direction and/or some sample code would be highly appreciated.
Thanks in advance.
Answer revised based on comments
If your goal is to constantly read a file that is receiving writes from the output of another process, you have a couple aspects that need to be solved...
You either need to read a file periodically that is constantly being overwritten, or you need to tail the output of a file that is being appended to with new values.
Your script currently blocks when you start the pyglet event loop, so this file check will have to be in a different thread, and then you would have to communicate the update event.
I can't fully comment on step 2 because I have never used pyglet and I am not familiar with how it uses events or signals. But I can at least suggest half of it with a thread.
Here is a super basic example of using a thread to read a file and report when a line is found:
import time
from threading import Thread
class Monitor(object):
def __init__(self):
self._stop = False
def run(self, inputFile, secs=3):
self._stop = False
with open(inputFile) as monitor:
while True:
line = monitor.readline().strip()
if line.isdigit():
# this is where you would notify somehow
print int(line)
time.sleep(secs)
if self._stop:
return
def stop(self):
self._stop = True
if __name__ == "__main__":
inputFile = "write.txt"
monitor = Monitor()
monitorThread = Thread(target=monitor.run, args=(inputFile, 1))
monitorThread.start()
try:
while True:
time.sleep(.25)
except:
monitor.stop()
The sleep loop at the end of the code is just a way to emulate your event loop and block.
Here is a test to show how it would work. First I open a python shell and open a new file:
>>> f = open("write.txt", 'w+', 10)
Then you can start this script. And back in the shell you can start writing lines:
>>> f.write('50\n'); f.flush()
In your script terminal you will see it read and print the lines.
The other way would be if your process that is writing to this file is constantly overwriting it, you would instead just reread the file by setting monitor.seek(0) and calling readline().
Again this is a really simple example to get you started. There are more advanced ways of solving this I am sure. The next step would be to figure out how you can signal the pyglet event loop to call a method that will change your video source.
Update
You should review this section of the pyglet docs on how to create your own event dispatcher: http://pyglet.org/doc/programming_guide/creating_your_own_event_dispatcher.html
Again, without much knowledge of pyglet, here is what it might look like:
class VideoNotifier(pyglet.event.EventDispatcher):
def updateIndex(self, value):
self.dispatch_events('on_update_index', value)
VideoNotifier.register_event('on_update_index')
videoNotifier = VideoNotifier()
#videoNotifier.event
def on_update_index(newIndex):
# thread has notified of an update
# Change the video here
pass
And for your thread class, you would pass in the dispatcher instance, and use the updateIndex() event to notify:
class Monitor(object):
def __init__(self, dispatcher):
self._stop = False
self._dispatcher = dispatcher
def run(self, inputFile, secs=3):
...
...
# should notify video of new value
line = int(line_from_file)
self._dispatcher.updateIndex(line)
...
...
Hope that gets you started!

How to capture frames from Apple iSight using Python and PyObjC?

I am trying to capture a single frame from the Apple iSight camera built into a Macbook Pro using Python (version 2.7 or 2.6) and the PyObjC (version 2.2).
As a starting point, I used this old StackOverflow question. To verify that it makes sense, I cross-referenced against Apple's MyRecorder example that it seems to be based on. Unfortunately, my script does not work.
My big questions are:
Am I initializing the camera correctly?
Am I starting the event loop correctly?
Was there any other setup I was supposed to do?
In the example script pasted below, the intended operation is that after calling startImageCapture(), I should start printing "Got a frame..." messages from the CaptureDelegate. However, the camera's light never turns on and the delegate's callback is never executed.
Also, there are no failures during startImageCapture(), all functions claim to succeed, and it successfully finds the iSight device. Analyzing the session object in pdb shows that it has valid input and output objects, the output has a delegate assigned, the device is not in use by another processes, and the session is marked as running after startRunning() is called.
Here's the code:
#!/usr/bin/env python2.7
import sys
import os
import time
import objc
import QTKit
import AppKit
from Foundation import NSObject
from Foundation import NSTimer
from PyObjCTools import AppHelper
objc.setVerbose(True)
class CaptureDelegate(NSObject):
def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput,
videoFrame, sampleBuffer,
connection):
# This should get called for every captured frame
print "Got a frame: %s" % videoFrame
class QuitClass(NSObject):
def quitMainLoop_(self, aTimer):
# Just stop the main loop.
print "Quitting main loop."
AppHelper.stopEventLoop()
def startImageCapture():
error = None
# Create a QT Capture session
session = QTKit.QTCaptureSession.alloc().init()
# Find iSight device and open it
dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo)
print "Device: %s" % dev
if not dev.open_(error):
print "Couldn't open capture device."
return
# Create an input instance with the device we found and add to session
input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev)
if not session.addInput_error_(input, error):
print "Couldn't add input device."
return
# Create an output instance with a delegate for callbacks and add to session
output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init()
delegate = CaptureDelegate.alloc().init()
output.setDelegate_(delegate)
if not session.addOutput_error_(output, error):
print "Failed to add output delegate."
return
# Start the capture
print "Initiating capture..."
session.startRunning()
def main():
# Open camera and start capturing frames
startImageCapture()
# Setup a timer to quit in 10 seconds (hack for now)
quitInst = QuitClass.alloc().init()
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(10.0,
quitInst,
'quitMainLoop:',
None,
False)
# Start Cocoa's main event loop
AppHelper.runConsoleEventLoop(installInterrupt=True)
print "After event loop"
if __name__ == "__main__":
main()
Thanks for any help you can provide!
OK, I spent a day diving through the depths of PyObjC and got it working.
For future record, the reason the code in the question did not work: variable scope and garbage collection. The session variable was deleted when it fell out of scope, which happened before the event processor ran. Something must be done to retain it so it is not freed before it has time to run.
Moving everything into a class and making session a class variable made the callbacks start working. Additionally, the code below demonstrates getting the frame's pixel data into bitmap format and saving it via Cocoa calls, and also how to copy it back into Python's world-view as a buffer or string.
The script below will capture a single frame
#!/usr/bin/env python2.7
#
# camera.py -- by Trevor Bentley (02/04/2011)
#
# This work is licensed under a Creative Commons Attribution 3.0 Unported License.
#
# Run from the command line on an Apple laptop running OS X 10.6, this script will
# take a single frame capture using the built-in iSight camera and save it to disk
# using three methods.
#
import sys
import os
import time
import objc
import QTKit
from AppKit import *
from Foundation import NSObject
from Foundation import NSTimer
from PyObjCTools import AppHelper
class NSImageTest(NSObject):
def init(self):
self = super(NSImageTest, self).init()
if self is None:
return None
self.session = None
self.running = True
return self
def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput,
videoFrame, sampleBuffer,
connection):
self.session.stopRunning() # I just want one frame
# Get a bitmap representation of the frame using CoreImage and Cocoa calls
ciimage = CIImage.imageWithCVImageBuffer_(videoFrame)
rep = NSCIImageRep.imageRepWithCIImage_(ciimage)
bitrep = NSBitmapImageRep.alloc().initWithCIImage_(ciimage)
bitdata = bitrep.representationUsingType_properties_(NSBMPFileType, objc.NULL)
# Save image to disk using Cocoa
t0 = time.time()
bitdata.writeToFile_atomically_("grab.bmp", False)
t1 = time.time()
print "Cocoa saved in %.5f seconds" % (t1-t0)
# Save a read-only buffer of image to disk using Python
t0 = time.time()
bitbuf = bitdata.bytes()
f = open("python.bmp", "w")
f.write(bitbuf)
f.close()
t1 = time.time()
print "Python saved buffer in %.5f seconds" % (t1-t0)
# Save a string-copy of the buffer to disk using Python
t0 = time.time()
bitbufstr = str(bitbuf)
f = open("python2.bmp", "w")
f.write(bitbufstr)
f.close()
t1 = time.time()
print "Python saved string in %.5f seconds" % (t1-t0)
# Will exit on next execution of quitMainLoop_()
self.running = False
def quitMainLoop_(self, aTimer):
# Stop the main loop after one frame is captured. Call rapidly from timer.
if not self.running:
AppHelper.stopEventLoop()
def startImageCapture(self, aTimer):
error = None
print "Finding camera"
# Create a QT Capture session
self.session = QTKit.QTCaptureSession.alloc().init()
# Find iSight device and open it
dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo)
print "Device: %s" % dev
if not dev.open_(error):
print "Couldn't open capture device."
return
# Create an input instance with the device we found and add to session
input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev)
if not self.session.addInput_error_(input, error):
print "Couldn't add input device."
return
# Create an output instance with a delegate for callbacks and add to session
output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init()
output.setDelegate_(self)
if not self.session.addOutput_error_(output, error):
print "Failed to add output delegate."
return
# Start the capture
print "Initiating capture..."
self.session.startRunning()
def main(self):
# Callback that quits after a frame is captured
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(0.1,
self,
'quitMainLoop:',
None,
True)
# Turn on the camera and start the capture
self.startImageCapture(None)
# Start Cocoa's main event loop
AppHelper.runConsoleEventLoop(installInterrupt=True)
print "Frame capture completed."
if __name__ == "__main__":
test = NSImageTest.alloc().init()
test.main()
QTKit is deprecated and PyObjC is a big dependency (and seems to be tricky to build if you want it in HomeBrew). Plus PyObjC did not have most of AVFoundation so I created a simple camera extension for Python that uses AVFoundation to record a video or snap a picture, it requires no dependencies (Cython intermediate files are committed to avoid the need to have Cython for most users).
It should be possible to build it like this:
pip install -e git+https://github.com/dashesy/pyavfcam.git
Then we can use it to take a picture:
import pyavfcam
# Open the default video source
cam = pyavfcam.AVFCam(sinks='image')
frame = cam.snap_picture('test.jpg') # frame is a memory buffer np.asarray(frame) can retrieve
Not related to this question, but if the AVFCam class is sub-classed, the overridden methods will be called with the result.

Categories

Resources