I want to add playlist functionality to my music player. The first track in the list plays. And typing "next" in the console and hitting return should start playing the next track but song stops playing and nothing happens.
setting the state to GST_STATE_READY instead of GST_STATE_NULL before
changing the "location" also does not work.
Can someone correct my code and tell me where I am wrong?
import pygst, gobject, time, sys
pygst.require("0.10")
import gst
class AudioPlayer:
def __init__(self):
self.songs = ['Holy Diver.mp3','Paranoid.mp3','Fast as a Shark.mp3']
# create a new gstreamer pipeline
self.pipeline = gst.Pipeline("mypipeline")
# add a file source to the pipeline
self.filesrc = gst.element_factory_make("filesrc","source")
self.pipeline.add(self.filesrc)
# add a generic decoder to the pipeline and link it to thesource
self.decode = gst.element_factory_make("decodebin","decode")
self.decode.connect("new-decoded-pad", self.decode_link)
self.pipeline.add(self.decode)
self.filesrc.link(self.decode)
# add a convertor to the pipeline
self.convert = gst.element_factory_make("audioconvert","convert")
self.pipeline.add(self.convert)
# add an alsa sink to the pipeline and link it to theconvertor
self.sink = gst.element_factory_make("alsasink", "sink")
self.pipeline.add(self.sink)
self.convert.link(self.sink)
# start playing
self.filesrc.set_property("location", self.songs.pop(0))
self.pipeline.set_state(gst.STATE_PLAYING)
def decode_link(self, dbin, pad, islast):
pad.link(self.convert.get_pad("sink"))
def next(self):
self.convert.unlink(self.sink)
self.filesrc.set_state(gst.STATE_NULL)
self.filesrc.set_property("location", self.songs.pop(0))
self.convert.link(self.sink)
self.pipeline.set_state(gst.STATE_PLAYING)
return True
player = AudioPlayer()
loop = gobject.MainLoop()
gobject.threads_init()
context = loop.get_context()
while 1:
value = sys.stdin.readline()
if value == "next\n":
player.next()
context.iteration(True)
In next you are setting the wrong thing to NULL:
self.filesrc.set_state(gst.STATE_NULL)
should be
self.pipeline.set_state(gst.STATE_NULL)
That is the crux of your problem, you aren't stopping the pipeline but also you do not need to unlink and relink self.convert either but that is a side issue.
Related
I'm trying to loop the video playback using gstreamer and it's python bindings. First attempt was to hook EOS message and generate seek message for the pipeline:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import time
if not Gst.init_check()[0]:
print("gstreamer initialization failed")
source0 = Gst.ElementFactory.make("filesrc", "source0")
assert source0 is not None
source0.set_property("location", "video0.mp4")
qtdemux0 = Gst.ElementFactory.make("qtdemux", "demux0")
assert qtdemux0 is not None
decoder0 = Gst.ElementFactory.make("nxvideodec", "video_decoder0")
assert decoder0 is not None
def demux0_pad_added(demux, pad):
if pad.name == 'video_0': # We expect exactly first one video stream
pad.link(decoder0.get_static_pad("sink"))
qtdemux0.connect("pad-added", demux0_pad_added)
video_sink = Gst.ElementFactory.make("nxvideosink", "video_sink")
assert video_sink is not None
pipeline0 = Gst.Pipeline()
assert pipeline0 is not None
pipeline0.add(source0)
pipeline0.add(qtdemux0)
pipeline0.add(decoder0)
pipeline0.add(video_sink)
source0.link(qtdemux0)
"""qtdemux0 -> decoder0 dynamic linking"""
decoder0.link(video_sink)
######################################################
def main():
message_bus = pipeline0.get_bus()
pipeline0.set_state(Gst.State.PLAYING)
while True:
if message_bus.have_pending(): # Working without glib mainloop
message = message_bus.pop()
if message.type == Gst.MessageType.EOS: # End-Of-Stream: loop the video, seek to beginning
pipeline0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.FLUSH,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
elif message.type == Gst.MessageType.ERROR:
print("ERROR", message)
break
time.sleep(0.01) # Tried 0.001 - same result
if __name__ == "__main__":
main()
And it actually works quite fine except one thing - seek to the beginning is not really seamless. I can see tiny glitch. Because the video is an infinite animation this tiny glitch actually become noticeable. My second attempt was to use queue for decoded frames and hook EOS event:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import time
if not Gst.init_check()[0]:
print("gstreamer initialization failed")
source0 = Gst.ElementFactory.make("filesrc", "source0")
assert source0 is not None
source0.set_property("location", "video0.mp4")
qtdemux0 = Gst.ElementFactory.make("qtdemux", "demux0")
assert qtdemux0 is not None
decoder0 = Gst.ElementFactory.make("nxvideodec", "video_decoder0")
assert decoder0 is not None
def demux0_pad_added(demux, pad):
if pad.name == 'video_0': # We expect exactly first one video stream
pad.link(decoder0.get_static_pad("sink"))
qtdemux0.connect("pad-added", demux0_pad_added)
queue = Gst.ElementFactory.make("queue", "queue")
assert queue is not None
video_sink = Gst.ElementFactory.make("nxvideosink", "video_sink")
assert video_sink is not None
pipeline0 = Gst.Pipeline()
assert pipeline0 is not None
pipeline0.add(source0)
pipeline0.add(qtdemux0)
pipeline0.add(decoder0)
pipeline0.add(queue)
pipeline0.add(video_sink)
source0.link(qtdemux0)
"""qtdemux0 -> decoder0 dynamic linking"""
decoder0.link(queue)
queue.link(video_sink)
######################################################
def cb_event(pad, info, *user_data):
event = info.get_event()
if event is not None and event.type == Gst.EventType.EOS:
decoder0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.FLUSH,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
return Gst.PadProbeReturn.DROP
return Gst.PadProbeReturn.PASS
def main():
dec0_src_pad = decoder0.get_static_pad("src")
dec0_src_pad.add_probe(Gst.PadProbeType.BLOCK | Gst.PadProbeType.EVENT_DOWNSTREAM, cb_event)
message_bus = pipeline0.get_bus()
pipeline0.set_state(Gst.State.PLAYING)
while True:
# do nothing
time.sleep(1)
if __name__ == "__main__":
main()
After the first EOS event the playback is just stalled. I've tried several different things like: pass the EOS event, drop EOS and add offset to the decoder's source pad, send seek event to the pipeline itself and others. But I can't get it work.
In an effort to understand I also tried to enable debug mode and write my own kinda logger of pipeline activity using pad probes. Debug mode was not very useful, the log is very bulky and missing some details. My own log includes upstream/downstream events and the buffers timing information. However, I still can not understand what is wrong and how to get it to work.
Obviously I not just missing something but do not understand some fundamental thing about how gstreamer pipeline works.
So, the question is: What should I do with the second version of code to get it work? Additional question: Are there some tools or techniques to get clear idea of what is happening inside the pipeline and its contained elements?
I will very appreciate detailed answers. It is more important for me to understand what I am doing wrong than to just bring the program to work.
p.s. Program is run under GNU/Linux on the NanoPi S2 board. Video is stored in the MP4 container (without audio) and compressed with h264. Please feel free to post code samples in any language, not necessarily Python.
Well, okay. I didn't get an answer so I continued research and finally found solution.
Below I'll show two different approaches. First - direct answer to the question with working code sample. Second - different approach, which seems to be more native for gstreamer and definitely is more simple. Both give desired result - seamless video loop.
Corrected code (the answer, but not the best approach)
Changes:
Added video duration query. Every loop we should increase time offset for a video duration value. It makes possible to emulate infinite contiguous stream.
The seek event emitting moved to a separate thread. According to this post, we can not emit seek event from the streaming thread. Also, have a look at this file (link from mentioned post).
Event callback now drops FLUSH events (contiguous stream should not have a FLUSH events).
Video decoder changed from nxvideodec to avdec_h264. This is not relevant to the initial question and is done for a very special reason.
Code:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import time
import threading
if not Gst.init_check()[0]:
print("gstreamer initialization failed")
source0 = Gst.ElementFactory.make("filesrc", "source0")
assert source0 is not None
source0.set_property("location", "video0.mp4")
qtdemux0 = Gst.ElementFactory.make("qtdemux", "demux0")
assert qtdemux0 is not None
decoder0 = Gst.ElementFactory.make("avdec_h264", "video_decoder0")
assert decoder0 is not None
def demux0_pad_added(demux, pad):
if pad.name == 'video_0': # We expect exactly first one video stream
pad.link(decoder0.get_static_pad("sink"))
qtdemux0.connect("pad-added", demux0_pad_added)
queue = Gst.ElementFactory.make("queue", "queue")
assert queue is not None
video_sink = Gst.ElementFactory.make("nxvideosink", "video_sink")
assert video_sink is not None
pipeline0 = Gst.Pipeline()
assert pipeline0 is not None
pipeline0.add(source0)
pipeline0.add(qtdemux0)
pipeline0.add(decoder0)
pipeline0.add(queue)
pipeline0.add(video_sink)
source0.link(qtdemux0)
"""qtdemux0 -> decoder0 dynamic linking"""
decoder0.link(queue)
queue.link(video_sink)
# UPD: Get video duration
pipeline0.set_state(Gst.State.PAUSED)
assert pipeline0.get_state(Gst.CLOCK_TIME_NONE).state == Gst.State.PAUSED
duration_ok, duration = pipeline0.query_duration(Gst.Format.TIME)
assert duration_ok
######################################################
seek_requested = threading.Event()
# UPD: Seek thread. Wait for seek request from callback and generate seek event
def seek_thread_func(queue_sink_pad):
cumulative_offset = 0
while True:
seek_requested.wait()
seek_requested.clear()
decoder0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.FLUSH,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
# Add offset. It is important step to ensure that downstream elements will 'see' infinite contiguous stream
cumulative_offset += duration
queue_sink_pad.set_offset(cumulative_offset)
def cb_event(pad, info):
event = info.get_event()
if event is not None:
if event.type == Gst.EventType.EOS: # UPD: Set 'seek_requested' flag
seek_requested.set()
return Gst.PadProbeReturn.DROP
elif event.type == Gst.EventType.FLUSH_START or event.type == Gst.EventType.FLUSH_STOP: # UPD: Drop FLUSH
return Gst.PadProbeReturn.DROP
return Gst.PadProbeReturn.OK
def main():
queue_sink_pad = queue.get_static_pad("sink")
# UPD: Create separate 'seek thread'
threading.Thread(target=seek_thread_func, daemon=True, args=(queue_sink_pad,)).start()
dec0_src_pad = decoder0.get_static_pad("src")
dec0_src_pad.add_probe(Gst.PadProbeType.EVENT_DOWNSTREAM | Gst.PadProbeType.EVENT_FLUSH,
cb_event)
pipeline0.set_state(Gst.State.PLAYING)
while True:
# do nothing
time.sleep(1)
if __name__ == "__main__":
main()
This code works. Seek is effectively performed while buffers from the queue still playing. However, I believe it can contain some flaws or even bugs. For example, SEGMENT events passed downstream with the RESET flag; it doesn't seems right. Much more clear (and probably more correct/reliable) way to implement this approach is to create a gstreamer plugin. Plugin will manage events and tune event's and buffer's timestamp.
But there is a more simple and native solution:
Using segment seek and SEGMENT_DONE message
According to the documentation:
Segment seeking (using the GST_SEEK_FLAG_SEGMENT) will not emit an EOS
at the end of the playback segment but will post a SEGMENT_DONE
message on the bus. This message is posted by the element driving the
playback in the pipeline, typically a demuxer. After receiving the
message, the application can reconnect the pipeline or issue other
seek events in the pipeline. Since the message is posted as early as
possible in the pipeline, the application has some time to issue a new
seek to make the transition seamless. Typically the allowed delay is
defined by the buffer sizes of the sinks as well as the size of any
queues in the pipeline.
Message SEGMENT_DONE indeed is posted earlier than the queue becomes empty. This gives more than enough time to perform next seek. So all we need to do is to issue segment seek in very beginning of the playback. Then wait for SEGMENT_DONE message and send next non-flushing seek event.
Here is working example:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import time
if not Gst.init_check()[0]:
print("gstreamer initialization failed")
source0 = Gst.ElementFactory.make("filesrc", "source0")
assert source0 is not None
source0.set_property("location", "video0.mp4")
qtdemux0 = Gst.ElementFactory.make("qtdemux", "demux0")
assert qtdemux0 is not None
decoder0 = Gst.ElementFactory.make("nxvideodec", "video_decoder0")
assert decoder0 is not None
def demux0_pad_added(demux, pad):
if pad.name == 'video_0': # We expect exactly first one video stream
pad.link(decoder0.get_static_pad("sink"))
qtdemux0.connect("pad-added", demux0_pad_added)
queue = Gst.ElementFactory.make("queue", "queue")
assert queue is not None
video_sink = Gst.ElementFactory.make("nxvideosink", "video_sink")
assert video_sink is not None
pipeline0 = Gst.Pipeline()
assert pipeline0 is not None
pipeline0.add(source0)
pipeline0.add(qtdemux0)
pipeline0.add(decoder0)
pipeline0.add(queue)
pipeline0.add(video_sink)
source0.link(qtdemux0)
"""qtdemux0 -> decoder0 dynamic linking"""
decoder0.link(queue)
queue.link(video_sink)
######################################################
def main():
message_bus = pipeline0.get_bus()
pipeline0.set_state(Gst.State.PLAYING)
pipeline0.get_state(Gst.CLOCK_TIME_NONE)
pipeline0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.SEGMENT,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
while True:
if message_bus.have_pending(): # Working without glib mainloop
message = message_bus.pop()
if message.type == Gst.MessageType.SEGMENT_DONE:
pipeline0.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.SEGMENT,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
elif message.type == Gst.MessageType.ERROR:
print("bus ERROR", message)
break
time.sleep(0.01)
if __name__ == "__main__":
main()
With default queue configuration the SEGMENT_DONE message is posted approximately 1 second earlier than last video frame is played. Non-flushing seek ensures that none of the frames will be lost. Together this gives perfect result - truly seamless video loop.
Note: I switch pipeline to the PLAYING state and then perform initial non-flushing seek. Alternatively we can switch pipeline to the PAUSED state, perform flushing segment seek and then switch pipeline to the PLAYING state.
Note 2: Different sources suggests slightly different solution. See link below.
Related topics and sources:
http://gstreamer-devel.966125.n4.nabble.com/Flushing-the-data-in-partial-pipeline-tp4681893p4681899.html
https://cgit.freedesktop.org/gstreamer/gst-editing-services/tree/plugins/nle/nlesource.c
http://gstreamer-devel.966125.n4.nabble.com/Loop-a-file-using-playbin-without-artefacts-td4671952.html
http://gstreamer-devel.966125.n4.nabble.com/attachment/4671975/0/gstseamlessloop.py
https://github.com/GStreamer/gst-plugins-good/blob/master/tests/icles/test-segment-seeks.c
I'm using the SEGMENT_DONE method:
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
Gst.init(None)
pipeline = Gst.parse_launch('uridecodebin uri=file://%s name=d d. ! autovideosink d. ! autoaudiosink' %sys.argv[1])
bus = pipeline.get_bus()
bus.add_signal_watch()
def on_segment_done(bus, msg):
pipeline.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.SEGMENT,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
return True
bus.connect('message::segment-done', on_segment_done)
pipeline.set_state(Gst.State.PLAYING)
pipeline.get_state(Gst.CLOCK_TIME_NONE)
pipeline.seek(1.0,
Gst.Format.TIME,
Gst.SeekFlags.SEGMENT,
Gst.SeekType.SET, 0,
Gst.SeekType.NONE, 0)
GLib.MainLoop().run()
I recommend to look at the gst-play-1.0 application as well as the playbin element of GStreamer.
See here: https://github.com/GStreamer/gst-plugins-base/blob/master/tools/gst-play.c
This one supports a --gapless option to play many files without gaps. It makes use of the about-to-finish signal of the playbin element.
This particular application does that with multiple files instead of the same, but I guess you can try giving the same file multiple times for a test if its really seamless or exhibits the same issue with your approach 1).
Basically I think that EOS is just a bit too late to get the first frame ready again in time due to deinit/init/processing of the decoder and flushing the pipeline. Also the flushing will kind of reset your stream, the pipeline goes into prerolling again and syncs to a new clock. It really is not a continuous stream from the inside.
Alternatively perhaps GStreamer Editing Services can do this too. But this probably works with multiple tracks which means it may try to instantiate multiple decoder instances at the same time to do parallel processing - which may be an issue on your board.
Last resort could be to demux the MP4 to a raw bistream, loop this bitstream continuous into a socket and decode from that. Then it will appear as an infinite bitstream that is being played back.
Edit:
Perhaps it is also worth a shot to try out multifilesrc with its loop property to see if that one operates gapless or has to perform a flush between files as well.
I'm trying a to create a basic media player using libvlc which will be controlled through dbus. I'm using the gtk and libvlc bindings for python. The code is based on the official example from the vlc website
The only thing I modified is to add the dbus interface to the vlc instance
# Create a single vlc.Instance() to be shared by (possible) multiple players.
instance = vlc.Instance()
print vlc.libvlc_add_intf(instance, "dbus"); // this is what i added. // returns 0 which is ok
All is well, the demo works and plays any video files. but for some reason the dbus control module doesn't work (I can't believe I just said the dreaded "doesn't work" words):
I already have the working client dbus code which binds to the MPRIS 2 interface. I can control a normal instance of a VLC media player - that works just fine, but with the above example nothing happens. The dbus control module is loaded properly, since libvlc_add_intf doesn't return an error and i can see the MPRIS 2 service in D-Feet (org.mpris.MediaPlayer2.vlc).
Even in D-Feet, trying to call any of the methods of the dbus vlc object returns no error but nothing happens.
Do I need to configure something else in order to make the dbus module control the libvlc player?
Thanks
UPDATE
It seems that creating the vlc Instance and setting a higher verbosity, shows that the DBus calls are received but they have no effect whatsoever on the player itself.
Also, adding the RC interface to the instance instead of DBus, has some problems too: When I run the example from the command line it drops me to the RC interface console where i can type the control commands, but it has the same behaviour as DBus - nothing happens, no error, nada, absolutely nothing. It ignores the commands completely.
Any thoughts?
UPDATE 2
Here is the code that uses libvlc to create a basic player:
from dbus.mainloop.glib import DBusGMainLoop
import gtk
import gobject
import sys
import vlc
from gettext import gettext as _
# Create a single vlc.Instance() to be shared by (possible) multiple players.
instance = vlc.Instance("--one-instance --verbose 2")
class VLCWidget(gtk.DrawingArea):
"""Simple VLC widget.
Its player can be controlled through the 'player' attribute, which
is a vlc.MediaPlayer() instance.
"""
def __init__(self, *p):
gtk.DrawingArea.__init__(self)
self.player = instance.media_player_new()
def handle_embed(*args):
if sys.platform == 'win32':
self.player.set_hwnd(self.window.handle)
else:
self.player.set_xwindow(self.window.xid)
return True
self.connect("map", handle_embed)
self.set_size_request(640, 480)
class VideoPlayer:
"""Example simple video player.
"""
def __init__(self):
self.vlc = VLCWidget()
def main(self, fname):
self.vlc.player.set_media(instance.media_new(fname))
w = gtk.Window()
w.add(self.vlc)
w.show_all()
w.connect("destroy", gtk.main_quit)
self.vlc.player.play()
DBusGMainLoop(set_as_default = True)
gtk.gdk.threads_init()
gobject.MainLoop().run()
if __name__ == '__main__':
if not sys.argv[1:]:
print "You must provide at least 1 movie filename"
sys.exit(1)
if len(sys.argv[1:]) == 1:
# Only 1 file. Simple interface
p=VideoPlayer()
p.main(sys.argv[1])
the script can be run from the command line like:
python example_vlc.py file.avi
The client code which connects to the vlc dbus object is too long to post so instead pretend that i'm using D-Feet to get the bus connection and post messages to it.
Once the example is running, i can see the players dbus interface in d-feet, but i am unable to control it. Is there anything else that i should add to the code above to make it work?
I can't see your implementation of your event loop, so it's hard to tell what might be causing commands to not be recognized or to be dropped. Is it possible your threads are losing the stacktrace information and are actually throwing exceptions?
You might get more responses if you added either a psuedo-code version of your event loop and DBus command parsing or a simplified version?
The working programs found on nullege.com use ctypes. One which acted as a server used rpyc. Ignoring that one.
The advantages of ctypes over dbus is a huge speed advantage (calling the C library code, not interacting using python) as well as not requiring the library to implement the dbus interface.
Didn't find any examples using gtk or dbus ;-(
Notable examples
PyNuvo vlc.py
Milonga Tango DJing program
Using dbus / gtk
dbus uses gobject mainloop, not gtk mainloop. Totally different beasts. Don't cross the streams! Some fixes:
Don't need this. Threads are evil.
gtk.gdk.threads_init()
gtk.main_quit() shouldn't work when using gobject Mainloop. gobject mainloop can't live within ur class.
if __name__ == '__main__':
loop = gobject.MainLoop()
loop.run()
Pass in loop into ur class. Then call to quit the app
loop.quit()
dbus (notify) / gtk working example
Not going to write ur vlc app for u. But here is a working example of using dbus / gtk. Just adapt to vlc. Assumed u took my advise on gtk above. As u know any instance of DesktopNotify must be called while using gobject.Mainloop . But u can place it anywhere within ur main class.
desktop_notify.py
from __future__ import print_function
import gobject
import time, dbus
from dbus.exceptions import DBusException
from dbus.mainloop.glib import DBusGMainLoop
class DesktopNotify(object):
""" Notify-OSD ubuntu's implementation has a 20 message limit. U've been warned. When queue is full, delete old message before adding new messages."""
#Static variables
dbus_loop = None
dbus_proxy = None
dbus_interface = None
loop = None
#property
def dbus_name(self):
return ("org.freedesktop.Notifications")
#property
def dbus_path(self):
return ("/org/freedesktop/Notifications")
#property
def dbus_interface(self):
return self.dbus_name
def __init__(self, strInit="initializing passive notification messaging")
strProxyInterface = "<class 'dbus.proxies.Interface'>"
""" Reinitializing dbus when making a 2nd class instance would be bad"""
if str(type(DesktopNotify.dbus_interface)) != strProxyInterface:
DesktopNotify.dbus_loop = DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus(mainloop=DesktopNotify.dbus_loop)
DesktopNotify.dbus_proxy = bus.get_object(self.dbus_name, self.dbus_path)
DesktopNotify.dbus_interface = dbus.Interface(DesktopNotify.dbus_proxy, self.dbus_interface )
DesktopNotify.dbus_proxy.connect_to_signal("NotificationClosed", self.handle_closed)
def handle_closed(self, *arg, **kwargs):
""" Notification closed by user or by code. Print message or not"""
lngNotificationId = int(arg[0])
lngReason = int(arg[1])
def pop(self, lngID):
""" ID stored in database, but i'm going to skip this and keep it simple"""
try:
DesktopNotify.dbus_interface.CloseNotification(lngID)
except DBusException as why:
print(self.__class__.__name__ + ".pop probably no message with id, lngID, why)
finally:
pass
def push(self, strMsgTitle, strMsg, dictField):
""" Create a new passive notification (took out retrying and handling full queues)"""
now = time.localtime( time.time() )
strMsgTime = strMsg + " " + time.asctime(now)
del now
strMsgTime = strMsgTime % dictField
app_name="[your app name]"
app_icon = ''
actions = ''
hint = ''
expire_timeout = 10000 #Use seconds * 1000
summary = strMsgTitle
body = strMsgTime
lngNotificationID = None
try:
lngNotificationID = DesktopNotify.dbus_interfacec.Notify(app_name, 0, app_icon, summary, body, actions, hint, expire_timeout)
except DBusException as why:
#Excellent spot to delete oldest notification and then retry
print(self.__class__.__name__ + ".push Being lazy. Posting passive notification was unsuccessful.", why)
finally:
#Excellent spot to add to database upon success
pass
Here is the code sample:
class RunGui (QtGui.QMainWindow)
def __init__(self, parent=None):
...
QtCore.Qobject.connect(self.ui.actionNew, QtCore.SIGNAL("triggered()"), self.new_select)
...
def normal_output_written(self, qprocess):
self.ui.text_edit.append("caught outputReady signal") #works
self.ui.text_edit.append(str(qprocess.readAllStandardOutput())) # doesn't work
def new_select(self):
...
dialog_np = NewProjectDialog()
dialog_np.exec_()
if dialog_np.is_OK:
section = dialog_np.get_section()
project = dialog_np.get_project()
...
np = NewProject()
np.outputReady.connect(lambda: self.normal_output_written(np.qprocess))
np.errorReady.connect(lambda: self.error_output_written(np.qprocess))
np.inputNeeded.connect(lambda: self.input_from_line_edit(np.qprocess))
np.params = partial(np.create_new_project, section, project, otherargs)
np.start()
class NewProject(QtCore.QThread):
outputReady = QtCore.pyqtSignal(object)
errorReady = QtCore.pyqtSignal(object)
inputNeeded = QtCore.pyqtSignal(object)
params = None
message = ""
def __init__(self):
super(NewProject, self).__init__()
self.qprocess = QtCore.QProcess()
self.qprocess.moveToThread(self)
self._inputQueue = Queue()
def run(self):
self.params()
def create_new_project(self, section, project, otherargs):
...
# PyDev for some reason skips the breakpoints inside the thread
self.qprocess.start(command)
self.qprocess.waitForReadyRead()
self.outputReady.emit(self.qprocess) # works - I'm getting signal in RunGui.normal_output_written()
print(str(self.qprocess.readAllStandardOutput())) # prints empty line
.... # other actions inside the method requiring "command" to finish properly.
The idea is beaten to death - get the GUI to run scripts and communicate with the processes. The challenge in this particular example is that the script started in QProcess as command runs an app, that requires user input (confirmation) along the way. Therefore I have to be able to start the script, get all output and parse it, wait for the question to appear in the output and then communicate back the answer, allow it to finish and only then to proceed further with other actions inside create_new_project()
I don't know if this will fix your overall issue, but there are a few design issues I see here.
You are passing around the qprocess between threads instead of just emitting your custom signals with the results of the qprocess
You are using class-level attributes that should probably be instance attributes
Technically you don't even need the QProcess, since you are running it in your thread and actively using blocking calls. It could easily be a subprocess.Popen...but anyways, I might suggest changes like this:
class RunGui (QtGui.QMainWindow)
...
def normal_output_written(self, msg):
self.ui.text_edit.append(msg)
def new_select(self):
...
np = NewProject()
np.outputReady.connect(self.normal_output_written)
np.params = partial(np.create_new_project, section, project, otherargs)
np.start()
class NewProject(QtCore.QThread):
outputReady = QtCore.pyqtSignal(object)
errorReady = QtCore.pyqtSignal(object)
inputNeeded = QtCore.pyqtSignal(object)
def __init__(self):
super(NewProject, self).__init__()
self._inputQueue = Queue()
self.params = None
def run(self):
self.params()
def create_new_project(self, section, project, otherargs):
...
qprocess = QtCore.QProcess()
qprocess.start(command)
if not qprocess.waitForStarted():
# handle a failed command here
return
if not qprocess.waitForReadyRead():
# handle a timeout or error here
return
msg = str(self.qprocess.readAllStandardOutput())
self.outputReady.emit(msg)
Don't pass around the QProcess. Just emit the data. And create it from within the threads method so that it is automatically owned by that thread. Your outside classes should really not have any knowledge of that QProcess object. It doesn't even need to be a member attribute since its only needed during the operation.
Also make sure you are properly checking that your command both successfully started, and is running and outputting data.
Update
To clarify some problems you might be having (per the comments), I wanted to suggest that QProcess might not be the best option if you need to have interactive control with processes that expect periodic user input. It should work find for running scripts that just produce output from start to finish, though really using subprocess would be much easier. For scripts that need user input over time, your best bet may be to use pexpect. It allows you to spawn a process, and then watch for various patterns that you know will indicate the need for input:
foo.py
import time
i = raw_input("Please enter something: ")
print "Output:", i
time.sleep(.1)
print "Another line"
time.sleep(.1)
print "Done"
test.py
import pexpect
import time
child = pexpect.spawn("python foo.py")
child.setecho(False)
ret = -1
while ret < 0:
time.sleep(.05)
ret = child.expect("Please enter something: ")
child.sendline('FOO')
while True:
line = child.readline()
if not line:
break
print line.strip()
# Output: FOO
# Another line
# Done
I have 4 video files (different scenes of a movie).
There's a starting scene that will be played when I run the player.
And before that scene ends, let's say the video player reads an int value (0-100) from external file (all happens at runtime), and depending on that int value, it has to determine which scene to play next.
pseudo example:
if (x > 0 && x < 30)
videoSource = scene2
else if (x >= 30 && x < 60)
videoSource = scene3
else if (x >= 60 && x <= 100)
videoSource = scene 4
How can I make it change video sources at runtime, depending on that variable?
I don't care about the format of the video file, (Avi, mp4...) whatever works will be fine.
I don't know how to approach this problem. I've searched for something that has the potential to accomplish this, like pyglet or GStreamer, but I didn't find a clear solution.
EDIT: I have the basic player and video player with pyglet, and I was able to play the video without depending on a variable using this code:
import pyglet
vidPath="sample.mpg"
window = pyglet.window.Window()
player = pyglet.media.Player()
source = pyglet.media.StreamingSource()
MediaLoad = pyglet.media.load(vidPath)
player.queue(MediaLoad)
player.play()
#window.event
def on_draw():
window.clear()
if player.source and player.source.video_format:
player.get_texture().blit(0,0)
pyglet.app.run()
How would I go about this? Guidance in the right direction and/or some sample code would be highly appreciated.
Thanks in advance.
Answer revised based on comments
If your goal is to constantly read a file that is receiving writes from the output of another process, you have a couple aspects that need to be solved...
You either need to read a file periodically that is constantly being overwritten, or you need to tail the output of a file that is being appended to with new values.
Your script currently blocks when you start the pyglet event loop, so this file check will have to be in a different thread, and then you would have to communicate the update event.
I can't fully comment on step 2 because I have never used pyglet and I am not familiar with how it uses events or signals. But I can at least suggest half of it with a thread.
Here is a super basic example of using a thread to read a file and report when a line is found:
import time
from threading import Thread
class Monitor(object):
def __init__(self):
self._stop = False
def run(self, inputFile, secs=3):
self._stop = False
with open(inputFile) as monitor:
while True:
line = monitor.readline().strip()
if line.isdigit():
# this is where you would notify somehow
print int(line)
time.sleep(secs)
if self._stop:
return
def stop(self):
self._stop = True
if __name__ == "__main__":
inputFile = "write.txt"
monitor = Monitor()
monitorThread = Thread(target=monitor.run, args=(inputFile, 1))
monitorThread.start()
try:
while True:
time.sleep(.25)
except:
monitor.stop()
The sleep loop at the end of the code is just a way to emulate your event loop and block.
Here is a test to show how it would work. First I open a python shell and open a new file:
>>> f = open("write.txt", 'w+', 10)
Then you can start this script. And back in the shell you can start writing lines:
>>> f.write('50\n'); f.flush()
In your script terminal you will see it read and print the lines.
The other way would be if your process that is writing to this file is constantly overwriting it, you would instead just reread the file by setting monitor.seek(0) and calling readline().
Again this is a really simple example to get you started. There are more advanced ways of solving this I am sure. The next step would be to figure out how you can signal the pyglet event loop to call a method that will change your video source.
Update
You should review this section of the pyglet docs on how to create your own event dispatcher: http://pyglet.org/doc/programming_guide/creating_your_own_event_dispatcher.html
Again, without much knowledge of pyglet, here is what it might look like:
class VideoNotifier(pyglet.event.EventDispatcher):
def updateIndex(self, value):
self.dispatch_events('on_update_index', value)
VideoNotifier.register_event('on_update_index')
videoNotifier = VideoNotifier()
#videoNotifier.event
def on_update_index(newIndex):
# thread has notified of an update
# Change the video here
pass
And for your thread class, you would pass in the dispatcher instance, and use the updateIndex() event to notify:
class Monitor(object):
def __init__(self, dispatcher):
self._stop = False
self._dispatcher = dispatcher
def run(self, inputFile, secs=3):
...
...
# should notify video of new value
line = int(line_from_file)
self._dispatcher.updateIndex(line)
...
...
Hope that gets you started!
I am trying to capture a single frame from the Apple iSight camera built into a Macbook Pro using Python (version 2.7 or 2.6) and the PyObjC (version 2.2).
As a starting point, I used this old StackOverflow question. To verify that it makes sense, I cross-referenced against Apple's MyRecorder example that it seems to be based on. Unfortunately, my script does not work.
My big questions are:
Am I initializing the camera correctly?
Am I starting the event loop correctly?
Was there any other setup I was supposed to do?
In the example script pasted below, the intended operation is that after calling startImageCapture(), I should start printing "Got a frame..." messages from the CaptureDelegate. However, the camera's light never turns on and the delegate's callback is never executed.
Also, there are no failures during startImageCapture(), all functions claim to succeed, and it successfully finds the iSight device. Analyzing the session object in pdb shows that it has valid input and output objects, the output has a delegate assigned, the device is not in use by another processes, and the session is marked as running after startRunning() is called.
Here's the code:
#!/usr/bin/env python2.7
import sys
import os
import time
import objc
import QTKit
import AppKit
from Foundation import NSObject
from Foundation import NSTimer
from PyObjCTools import AppHelper
objc.setVerbose(True)
class CaptureDelegate(NSObject):
def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput,
videoFrame, sampleBuffer,
connection):
# This should get called for every captured frame
print "Got a frame: %s" % videoFrame
class QuitClass(NSObject):
def quitMainLoop_(self, aTimer):
# Just stop the main loop.
print "Quitting main loop."
AppHelper.stopEventLoop()
def startImageCapture():
error = None
# Create a QT Capture session
session = QTKit.QTCaptureSession.alloc().init()
# Find iSight device and open it
dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo)
print "Device: %s" % dev
if not dev.open_(error):
print "Couldn't open capture device."
return
# Create an input instance with the device we found and add to session
input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev)
if not session.addInput_error_(input, error):
print "Couldn't add input device."
return
# Create an output instance with a delegate for callbacks and add to session
output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init()
delegate = CaptureDelegate.alloc().init()
output.setDelegate_(delegate)
if not session.addOutput_error_(output, error):
print "Failed to add output delegate."
return
# Start the capture
print "Initiating capture..."
session.startRunning()
def main():
# Open camera and start capturing frames
startImageCapture()
# Setup a timer to quit in 10 seconds (hack for now)
quitInst = QuitClass.alloc().init()
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(10.0,
quitInst,
'quitMainLoop:',
None,
False)
# Start Cocoa's main event loop
AppHelper.runConsoleEventLoop(installInterrupt=True)
print "After event loop"
if __name__ == "__main__":
main()
Thanks for any help you can provide!
OK, I spent a day diving through the depths of PyObjC and got it working.
For future record, the reason the code in the question did not work: variable scope and garbage collection. The session variable was deleted when it fell out of scope, which happened before the event processor ran. Something must be done to retain it so it is not freed before it has time to run.
Moving everything into a class and making session a class variable made the callbacks start working. Additionally, the code below demonstrates getting the frame's pixel data into bitmap format and saving it via Cocoa calls, and also how to copy it back into Python's world-view as a buffer or string.
The script below will capture a single frame
#!/usr/bin/env python2.7
#
# camera.py -- by Trevor Bentley (02/04/2011)
#
# This work is licensed under a Creative Commons Attribution 3.0 Unported License.
#
# Run from the command line on an Apple laptop running OS X 10.6, this script will
# take a single frame capture using the built-in iSight camera and save it to disk
# using three methods.
#
import sys
import os
import time
import objc
import QTKit
from AppKit import *
from Foundation import NSObject
from Foundation import NSTimer
from PyObjCTools import AppHelper
class NSImageTest(NSObject):
def init(self):
self = super(NSImageTest, self).init()
if self is None:
return None
self.session = None
self.running = True
return self
def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput,
videoFrame, sampleBuffer,
connection):
self.session.stopRunning() # I just want one frame
# Get a bitmap representation of the frame using CoreImage and Cocoa calls
ciimage = CIImage.imageWithCVImageBuffer_(videoFrame)
rep = NSCIImageRep.imageRepWithCIImage_(ciimage)
bitrep = NSBitmapImageRep.alloc().initWithCIImage_(ciimage)
bitdata = bitrep.representationUsingType_properties_(NSBMPFileType, objc.NULL)
# Save image to disk using Cocoa
t0 = time.time()
bitdata.writeToFile_atomically_("grab.bmp", False)
t1 = time.time()
print "Cocoa saved in %.5f seconds" % (t1-t0)
# Save a read-only buffer of image to disk using Python
t0 = time.time()
bitbuf = bitdata.bytes()
f = open("python.bmp", "w")
f.write(bitbuf)
f.close()
t1 = time.time()
print "Python saved buffer in %.5f seconds" % (t1-t0)
# Save a string-copy of the buffer to disk using Python
t0 = time.time()
bitbufstr = str(bitbuf)
f = open("python2.bmp", "w")
f.write(bitbufstr)
f.close()
t1 = time.time()
print "Python saved string in %.5f seconds" % (t1-t0)
# Will exit on next execution of quitMainLoop_()
self.running = False
def quitMainLoop_(self, aTimer):
# Stop the main loop after one frame is captured. Call rapidly from timer.
if not self.running:
AppHelper.stopEventLoop()
def startImageCapture(self, aTimer):
error = None
print "Finding camera"
# Create a QT Capture session
self.session = QTKit.QTCaptureSession.alloc().init()
# Find iSight device and open it
dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo)
print "Device: %s" % dev
if not dev.open_(error):
print "Couldn't open capture device."
return
# Create an input instance with the device we found and add to session
input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev)
if not self.session.addInput_error_(input, error):
print "Couldn't add input device."
return
# Create an output instance with a delegate for callbacks and add to session
output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init()
output.setDelegate_(self)
if not self.session.addOutput_error_(output, error):
print "Failed to add output delegate."
return
# Start the capture
print "Initiating capture..."
self.session.startRunning()
def main(self):
# Callback that quits after a frame is captured
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(0.1,
self,
'quitMainLoop:',
None,
True)
# Turn on the camera and start the capture
self.startImageCapture(None)
# Start Cocoa's main event loop
AppHelper.runConsoleEventLoop(installInterrupt=True)
print "Frame capture completed."
if __name__ == "__main__":
test = NSImageTest.alloc().init()
test.main()
QTKit is deprecated and PyObjC is a big dependency (and seems to be tricky to build if you want it in HomeBrew). Plus PyObjC did not have most of AVFoundation so I created a simple camera extension for Python that uses AVFoundation to record a video or snap a picture, it requires no dependencies (Cython intermediate files are committed to avoid the need to have Cython for most users).
It should be possible to build it like this:
pip install -e git+https://github.com/dashesy/pyavfcam.git
Then we can use it to take a picture:
import pyavfcam
# Open the default video source
cam = pyavfcam.AVFCam(sinks='image')
frame = cam.snap_picture('test.jpg') # frame is a memory buffer np.asarray(frame) can retrieve
Not related to this question, but if the AVFCam class is sub-classed, the overridden methods will be called with the result.