Python threading question - returning control to parent - python

Basically, I have a python program which listens for DeviceAdded DBus events (e.g. when someone plugs in a USB drive), and when an event occurs, I want to create a thread which collects metadata on that newly connected device. However, I want to do this asynchronously - that is, allow one thread to keep collecting metadata on the device while returning control to the parent which can keep listening for these events. At the moment, my thread blocks until the collection is finished. Here is a sample of my code:
class DeviceAddedListener:
def __init__(self):
self.bus = dbus.SystemBus()
self.hal_manager_obj = self.bus.get_object("org.freedesktop.Hal", "/org$
self.hal_manager = dbus.Interface(self.hal_manager_obj, "org.freedeskto$
self.hal_manager.connect_to_signal("DeviceAdded", self._filter)
def _filter(self, udi):
device_obj = self.bus.get_object ("org.freedesktop.Hal", udi)
device = dbus.Interface(device_obj, "org.freedesktop.Hal.Device")
if device.QueryCapability("volume"):
return self.capture(device)
def capture(self,volume):
self.device_file = volume.GetProperty("block.device")
self.label = volume.GetProperty("volume.label")
self.fstype = volume.GetProperty("volume.fstype")
self.mounted = volume.GetProperty("volume.is_mounted")
self.mount_point = volume.GetProperty("volume.mount_point")
try:
self.size = volume.GetProperty("volume.size")
except:
self.size = 0
print "New storage device detected:"
print " device_file: %s" % self.device_file
print " label: %s" % self.label
print " fstype: %s" % self.fstype
if self.mounted:
print " mount_point: %s" % self.mount_point
response = raw_input("\nWould you like to acquire %s [y/N]? " % self.device_file)
if (response == "y"):
self.get_meta()
thread.start_new_thread(DoSomething(self.device_file))
else:
print "Returning to idle"
if __name__ == '__main__':
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
loop = gobject.MainLoop()
DeviceAddedListener()
loop.run()
Any thoughts would be greatly appreciated :) I have excluded the import list to save space

Try spawning a thread just for the capture stuff, by changing the following lines in your _filter() function to this:
if device.QueryCapability("volume"):
threading.start_new_thread(self.capture, (device))
This is assuming that the bulk of the work is happening in the capture() function. If not, then just spawn the thread a little earlier, possibly on the whole _filter() function.
This should then spawn a new thread for every filtered device detected. Bear in mind that I haven't done any dbus stuff and can't really test this, but it's an idea.
Also, you're trying to get user input from the capture function which, using the app as you've defined it, isn't really a nice thing to do in threads. What if a second device is connected while the first prompt is still on screen? Might not play nicely.
The design of this thing might be exactly the way you want it for specific reasons, but I can't help feeling like it could be a lot slicker. It's not really designed with threads in mind from what I can tell.

Related

Python Queues memory leaks when called inside thread

I have python TCP client and need to send media(.mpg) file in a loop to a 'C' TCP server.
I have following code, where in separate thread I am reading the 10K blocks of file and sending it and doing it all over again in loop, I think it is because of my implementation of thread module, or tcp send. I am using Queues to print the logs on my GUI ( Tkinter ) but after some times it goes out of memory..
UPDATE 1 - Added more code as requested
Thread class "Sendmpgthread" used to create thread to send data
.
.
def __init__ ( self, otherparams,MainGUI):
.
.
self.MainGUI = MainGUI
self.lock = threading.Lock()
Thread.__init__(self)
#This is the one causing leak, this is called inside loop
def pushlog(self,msg):
self.MainGUI.queuelog.put(msg)
def send(self, mysocket, block):
size = len(block)
pos = 0;
while size > 0:
try:
curpos = mysocket.send(block[pos:])
except socket.timeout, msg:
if self.over:
self.pushlog(Exit Send)
return False
except socket.error, msg:
print 'Exception'
return False
pos = pos + curpos
size = size - curpos
return True
def run(self):
media_file = None
mysocket = None
try:
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysocket.connect((self.ip, string.atoi(self.port)))
media_file = open(self.file, 'rb')
while not self.over:
chunk = media_file.read(10000)
if not chunk: # EOF Reset it
print 'resetting stream'
media_file.seek(0, 0)
continue
if not self.send(mysocket, chunk): # If some error or thread is killed
break;
#disabling this solves the issue
self.pushlog('print how much data sent')
except socket.error, msg:
print 'print exception'
except Exception, msg:
print 'print exception'
try:
if media_file is not None:
media_file.close()
media_file = None
if mysocket is not None:
mysocket.close()
mysocket = None
finally:
print 'some cleaning'
def kill(self):
self.over = True
I figured out that it is because of wrong implementation of Queue as commenting that piece resolves the issue
UPDATE 2 - MainGUI class which is called from above Thread class
class MainGUI(Frame):
def __init__(self, other args):
#some code
.
.
#from the above thread class used to send data
self.send_mpg_status = Sendmpgthread(params)
self.send_mpg_status.start()
self.after(100, self.updatelog)
self.queuelog = Queue.Queue()
def updatelog(self):
try:
msg = self.queuelog.get_nowait()
while msg is not None:
self.printlog(msg)
msg = self.queuelog.get_nowait()
except Queue.Empty:
pass
if self.send_mpg_status: # only continue when sending
self.after(100, self.updatelog)
def printlog(self,msg):
#print in GUI
Since printlog is adding to a tkinter text control, the memory occupied by that control will grow with each message (it has to store all the log messages in order to display them).
Unless storing all the logs is critical, a common solution is to limit the maximum number of log lines displayed.
A naive implementation is to eliminate extra lines from the begining after the control reaches a maximum number of messages. Add a function to get the number of lines in the control and then, in printlog something similar to:
while getnumlines(self.edit) > self.maxloglines:
self.edit.delete('1.0', '1.end')
(above code not tested)
update: some general guidelines
Keep in mind that what might look like a memory leak does not always mean that a function is wrong, or that the memory is no longer accessible. Many times there is missing cleanup code for a container that is accumulating elements.
A basic general approach for this kind of problems:
form an opinion on what part of the code might be causing the problem
check it by commenting that code out (or keep commenting code until you find a candidate)
look for containers in the responsible code, add code to print their size
decide what elements can be safely removed from that container, and when to do it
test the result
I can't see anything obviously wrong with your code snippet.
To reduce memory usage a bit under Python 2.7, I'd use buffer(block, pos) instead of block[pos:]. Also I'd use mysocket.sendall(block) instead of your send method.
If the ideas above don't solve your problem, then the bug is most probably elsewhere in your code. Could you please post the shortest possible version of the full Python script which still grows out-of-memory (http://sscce.org/)? That increases your change of getting useful help.
Out of memory errors are indicative of data being generated but not consumed or released. Looking through your code I would guess these two areas:
Messages are being pushed onto a Queue.Queue() instance in the pushlog method. Are they being consumed?
The MainGui printlog method may be writing text somewhere. eg. Is it continually writing to some kind of GUI widget without any pruning of messages?
From the code you've posted, here's what I would try:
Put a print statement in updatelog. If this is not being continually called for some reason such as a failed after() call, then the queuelog will continue to grow without bound.
If updatelog is continually being called, then turn your focus to printlog. Comment the contents of this function to see if out of memory errors still occur. If they don't, then something in printlog may be holding on to the logged data, you'll need to dig deeper to find out what.
Apart from this, the code could be cleaned up a bit. self.queuelog is not created until after the thread is started which gives rise to a race condition where the thread may try to write into the queue before it has been created. Creation of queuelog should be moved to somewhere before the thread is started.
updatelog could also be refactored to remove redundancy:
def updatelog(self):
try:
while True:
msg = self.queuelog.get_nowait()
self.printlog(msg)
except Queue.Empty:
pass
And I assume the the kill function is called from the GUI thread. To avoid thread race conditions, the self.over should be a thread safe variable such as a threading.Event object.
def __init__(...):
self.over = threading.Event()
def kill(self):
self.over.set()
There is no data piling up in your TCP sending loop.
Memory error is probably caused by logging queue, as you have not posted complete code try using following class for logging:
from threading import Thread, Event, Lock
from time import sleep, time as now
class LogRecord(object):
__slots__ = ["txt", "params"]
def __init__(self, txt, params):
self.txt, self.params = txt, params
class AsyncLog(Thread):
DEBUGGING_EMULATE_SLOW_IO = True
def __init__(self, queue_max_size=15, queue_min_size=5):
Thread.__init__(self)
self.queue_max_size, self.queue_min_size = queue_max_size, queue_min_size
self._queuelock = Lock()
self._queue = [] # protected by _queuelock
self._discarded_count = 0 # protected by _queuelock
self._pushed_event = Event()
self.setDaemon(True)
self.start()
def log(self, message, **params):
with self._queuelock:
self._queue.append(LogRecord(message, params))
if len(self._queue) > self.queue_max_size:
# empty the queue:
self._discarded_count += len(self._queue) - self.queue_min_size
del self._queue[self.queue_min_size:] # empty the queue instead of creating new list (= [])
self._pushed_event.set()
def run(self):
while 1: # no reason for exit condition here
logs, discarded_count = None, 0
with self._queuelock:
if len(self._queue) > 0:
# select buffered messages for printing, releasing lock ASAP
logs = self._queue[:]
del self._queue[:]
self._pushed_event.clear()
discarded_count = self._discarded_count
self._discarded_count = 0
if not logs:
self._pushed_event.wait()
self._pushed_event.clear()
continue
else:
# print logs
if discarded_count:
print ".. {0} log records missing ..".format(discarded_count)
for log_record in logs:
self.write_line(log_record)
if self.DEBUGGING_EMULATE_SLOW_IO:
sleep(0.5)
def write_line(self, log_record):
print log_record.txt, " ".join(["{0}={1}".format(name, value) for name, value in log_record.params.items()])
if __name__ == "__main__":
class MainGUI:
def __init__(self):
self._async_log = AsyncLog()
self.log = self._async_log.log # stored as bound method
def do_this_test(self):
print "I am about to log 100 times per sec, while text output frequency is 2Hz (twice per second)"
def log_100_records_in_one_second(itteration_index):
for i in xrange(100):
self.log("something happened", timestamp=now(), session=3.1415, itteration=itteration_index)
sleep(0.01)
for iter_index in range(3):
log_100_records_in_one_second(iter_index)
test = MainGUI()
test.do_this_test()
I have noticed that you do not sleep() anywhere in the sending loop, this means data is read as fast as it can and is sent as fast as it can. Note that this is not desirable behavior when playing media files - container time-stamps are there to dictate data-rate.

QProcess.readAllStandardOutput() doesn't seem to read anything - PyQt

Here is the code sample:
class RunGui (QtGui.QMainWindow)
def __init__(self, parent=None):
...
QtCore.Qobject.connect(self.ui.actionNew, QtCore.SIGNAL("triggered()"), self.new_select)
...
def normal_output_written(self, qprocess):
self.ui.text_edit.append("caught outputReady signal") #works
self.ui.text_edit.append(str(qprocess.readAllStandardOutput())) # doesn't work
def new_select(self):
...
dialog_np = NewProjectDialog()
dialog_np.exec_()
if dialog_np.is_OK:
section = dialog_np.get_section()
project = dialog_np.get_project()
...
np = NewProject()
np.outputReady.connect(lambda: self.normal_output_written(np.qprocess))
np.errorReady.connect(lambda: self.error_output_written(np.qprocess))
np.inputNeeded.connect(lambda: self.input_from_line_edit(np.qprocess))
np.params = partial(np.create_new_project, section, project, otherargs)
np.start()
class NewProject(QtCore.QThread):
outputReady = QtCore.pyqtSignal(object)
errorReady = QtCore.pyqtSignal(object)
inputNeeded = QtCore.pyqtSignal(object)
params = None
message = ""
def __init__(self):
super(NewProject, self).__init__()
self.qprocess = QtCore.QProcess()
self.qprocess.moveToThread(self)
self._inputQueue = Queue()
def run(self):
self.params()
def create_new_project(self, section, project, otherargs):
...
# PyDev for some reason skips the breakpoints inside the thread
self.qprocess.start(command)
self.qprocess.waitForReadyRead()
self.outputReady.emit(self.qprocess) # works - I'm getting signal in RunGui.normal_output_written()
print(str(self.qprocess.readAllStandardOutput())) # prints empty line
.... # other actions inside the method requiring "command" to finish properly.
The idea is beaten to death - get the GUI to run scripts and communicate with the processes. The challenge in this particular example is that the script started in QProcess as command runs an app, that requires user input (confirmation) along the way. Therefore I have to be able to start the script, get all output and parse it, wait for the question to appear in the output and then communicate back the answer, allow it to finish and only then to proceed further with other actions inside create_new_project()
I don't know if this will fix your overall issue, but there are a few design issues I see here.
You are passing around the qprocess between threads instead of just emitting your custom signals with the results of the qprocess
You are using class-level attributes that should probably be instance attributes
Technically you don't even need the QProcess, since you are running it in your thread and actively using blocking calls. It could easily be a subprocess.Popen...but anyways, I might suggest changes like this:
class RunGui (QtGui.QMainWindow)
...
def normal_output_written(self, msg):
self.ui.text_edit.append(msg)
def new_select(self):
...
np = NewProject()
np.outputReady.connect(self.normal_output_written)
np.params = partial(np.create_new_project, section, project, otherargs)
np.start()
class NewProject(QtCore.QThread):
outputReady = QtCore.pyqtSignal(object)
errorReady = QtCore.pyqtSignal(object)
inputNeeded = QtCore.pyqtSignal(object)
def __init__(self):
super(NewProject, self).__init__()
self._inputQueue = Queue()
self.params = None
def run(self):
self.params()
def create_new_project(self, section, project, otherargs):
...
qprocess = QtCore.QProcess()
qprocess.start(command)
if not qprocess.waitForStarted():
# handle a failed command here
return
if not qprocess.waitForReadyRead():
# handle a timeout or error here
return
msg = str(self.qprocess.readAllStandardOutput())
self.outputReady.emit(msg)
Don't pass around the QProcess. Just emit the data. And create it from within the threads method so that it is automatically owned by that thread. Your outside classes should really not have any knowledge of that QProcess object. It doesn't even need to be a member attribute since its only needed during the operation.
Also make sure you are properly checking that your command both successfully started, and is running and outputting data.
Update
To clarify some problems you might be having (per the comments), I wanted to suggest that QProcess might not be the best option if you need to have interactive control with processes that expect periodic user input. It should work find for running scripts that just produce output from start to finish, though really using subprocess would be much easier. For scripts that need user input over time, your best bet may be to use pexpect. It allows you to spawn a process, and then watch for various patterns that you know will indicate the need for input:
foo.py
import time
i = raw_input("Please enter something: ")
print "Output:", i
time.sleep(.1)
print "Another line"
time.sleep(.1)
print "Done"
test.py
import pexpect
import time
child = pexpect.spawn("python foo.py")
child.setecho(False)
ret = -1
while ret < 0:
time.sleep(.05)
ret = child.expect("Please enter something: ")
child.sendline('FOO')
while True:
line = child.readline()
if not line:
break
print line.strip()
# Output: FOO
# Another line
# Done

Using python multiprocessing pipes

I am trying to write a class that will calculate checksums using multiple processes, thereby taking advantage of multiple cores. I have a quite simple class for this, and it works great when executing a simple case. But whenever I create two or more instances of the class, the worker never exits. It seems like it never get the message that the pipe has been closed by the parent.
All the code can be found below. I first calculate the md5 and sha1 checksums separately, which works, and then I try to perform the calculation in parallel, and then the program locks up when it is time to close the pipe.
What is going on here? Why aren't the pipes working as I expect? I guess I could do a workaround by sending a "Stop" message on the queue and make the child quit that way, but I'd really like to know why this isn't working as it is.
import multiprocessing
import hashlib
class ChecksumPipe(multiprocessing.Process):
def __init__(self, csname):
multiprocessing.Process.__init__(self, name = csname)
self.summer = eval("hashlib.%s()" % csname)
self.child_conn, self.parent_conn = multiprocessing.Pipe(duplex = False)
self.result_queue = multiprocessing.Queue(1)
self.daemon = True
self.start()
self.child_conn.close() # This is the parent. Close the unused end.
def run(self):
self.parent_conn.close() # This is the child. Close unused end.
while True:
try:
print "Waiting for more data...", self
block = self.child_conn.recv_bytes()
print "Got some data...", self
except EOFError:
print "Finished work", self
break
self.summer.update(block)
self.result_queue.put(self.summer.hexdigest())
self.result_queue.close()
self.child_conn.close()
def update(self, block):
self.parent_conn.send_bytes(block)
def hexdigest(self):
self.parent_conn.close()
return self.result_queue.get()
def main():
# Calculating the first checksum works
md5 = ChecksumPipe("md5")
md5.update("hello")
print "md5 is", md5.hexdigest()
# Calculating the second checksum works
sha1 = ChecksumPipe("sha1")
sha1.update("hello")
print "sha1 is", sha1.hexdigest()
# Calculating both checksums in parallel causes a lockup!
md5, sha1 = ChecksumPipe("md5"), ChecksumPipe("sha1")
md5.update("hello")
sha1.update("hello")
print "md5 and sha1 is", md5.hexdigest(), sha1.hexdigest() # Lockup here!
main()
PS. This problem has been solved Here is a working version of the above code if anyone is interested:
import multiprocessing
import hashlib
class ChecksumPipe(multiprocessing.Process):
all_open_parent_conns = []
def __init__(self, csname):
multiprocessing.Process.__init__(self, name = csname)
self.summer = eval("hashlib.%s()" % csname)
self.child_conn, self.parent_conn = multiprocessing.Pipe(duplex = False)
ChecksumPipe.all_open_parent_conns.append(self.parent_conn)
self.result_queue = multiprocessing.Queue(1)
self.daemon = True
self.start()
self.child_conn.close() # This is the parent. Close the unused end.
def run(self):
for conn in ChecksumPipe.all_open_parent_conns:
conn.close() # This is the child. Close unused ends.
while True:
try:
print "Waiting for more data...", self
block = self.child_conn.recv_bytes()
print "Got some data...", self
except EOFError:
print "Finished work", self
break
self.summer.update(block)
self.result_queue.put(self.summer.hexdigest())
self.result_queue.close()
self.child_conn.close()
def update(self, block):
self.parent_conn.send_bytes(block)
def hexdigest(self):
self.parent_conn.close()
return self.result_queue.get()
def main():
# Calculating the first checksum works
md5 = ChecksumPipe("md5")
md5.update("hello")
print "md5 is", md5.hexdigest()
# Calculating the second checksum works
sha1 = ChecksumPipe("sha1")
sha1.update("hello")
print "sha1 is", sha1.hexdigest()
# Calculating both checksums also works fine now
md5, sha1 = ChecksumPipe("md5"), ChecksumPipe("sha1")
md5.update("hello")
sha1.update("hello")
print "md5 and sha1 is", md5.hexdigest(), sha1.hexdigest()
main()
Yep, that is surprising behaviour indeed.
However, if you look at the output of lsof for the two parallel child processes it is easy to notice that the second child process has more file descriptors open.
What happens is that when two parallel child processes get started the second child inherits the pipes of the parent, so that when the parent calls self.parent_conn.close() the second child still has that pipe file descriptor open, so that the pipe file description doesn't get closed in the kernel (the reference count is more than 0), with the effect being that self.child_conn.recv_bytes() in the first parallel child process never read()s EOF and EOFError gets never thrown.
You may need to send an explicit shutdown message, rather then just closing file descriptors because there seem to be little control over what file descriptors get shared between which processes (there is no close-on-fork file descriptor flag).

How to continuously monitor rhythmbox for track change using python

I want to monitor the change of track in Rhythmbox using python. I want to continuously check for change of track and execute a set of functions if the track is changed. I have written a piece of code which gets hold of the Rhythmbox interfaces from the dbus and gets the current track details. But this program has to be run manually to check for any change.
I am new to this and I would like to know how we can create a background process which continuously runs and checks Rhythmbox.
I dont want to make a Rhythmbox plugin(which rather would make my work simple) as I will be extending the application to listen to multiple music players.
Please suggest me what exactly I would have to do to achieve the functionality.
The Rhythmbox player object (/org/gnome/Rhythmbox/Player) sends a playingUriChanged signal whenever the current song changes. Connect a function to the signal to have it run whenever the signal is received. Here's an example that prints the title of the song whenever a new song starts, using the GLib main loop to process DBus messages:
#! /usr/bin/env python
import dbus
import dbus.mainloop.glib
import glib
# This gets called whenever Rhythmbox sends the playingUriChanged signal
def playing_song_changed (uri):
global shell
if uri != "":
song = shell.getSongProperties (uri)
print "Now playing: {0}".format (song["title"])
else:
print "Not playing anything"
dbus.mainloop.glib.DBusGMainLoop (set_as_default = True)
bus = dbus.SessionBus ()
proxy = bus.get_object ("org.gnome.Rhythmbox", "/org/gnome/Rhythmbox/Player")
player = dbus.Interface (proxy, "org.gnome.Rhythmbox.Player")
player.connect_to_signal ("playingUriChanged", playing_song_changed)
proxy = bus.get_object ("org.gnome.Rhythmbox", "/org/gnome/Rhythmbox/Shell")
shell = dbus.Interface (proxy, "org.gnome.Rhythmbox.Shell")
# Run the GLib event loop to process DBus signals as they arrive
mainloop = glib.MainLoop ()
mainloop.run ()
Take a look at the Conky script here:
https://launchpad.net/~conkyhardcore/+archive/ppa/+files/conkyrhythmbox_2.12.tar.gz
That uses dbus to talk to rhythmbox, like so:
bus = dbus.SessionBus()
remote_object_shell = bus.get_object('org.gnome.Rhythmbox', '/org/gnome/Rhythmbox/Shell')
iface_shell = dbus.Interface(remote_object_shell, 'org.gnome.Rhythmbox.Shell')
remote_object_player = bus.get_object('org.gnome.Rhythmbox', '/org/gnome/Rhythmbox/Player')
iface_player = dbus.Interface(remote_object_player, 'org.gnome.Rhythmbox.Player')
You can call a number of functions on iface_player to get the required information. It looks like you'll have to poll from this example though. If you want to receive a message from dbus on track change you'll have to do that in a different way. This discusses from avenues to explore:
http://ubuntuforums.org/showthread.php?t=156706
I am using Ubuntu 14.04.1 and the above script is deprecated for Rhythmbox 3. I am using this script to write the current song to ~/.now_playing for BUTT to read, but you can update it for your needs. Rhythmbox uses MPRIS now and you can get info here:
http://specifications.freedesktop.org/mpris-spec/latest/index.html
#!/usr/bin/python
import dbus
import dbus.mainloop.glib
import glib
# This gets called whenever Rhythmbox sends the playingUriChanged signal
def playing_song_changed (Player,two,three):
global iface
global track
global home
track2 = iface.Get(Player,"Metadata").get(dbus.String(u'xesam:artist'))[0] + " - "+ iface.Get(Player,"Metadata").get(dbus.String(u'xesam:title'))
if track != track2:
track = iface.Get(Player,"Metadata").get(dbus.String(u'xesam:artist'))[0] + " - "+ iface.Get(Player,"Metadata").get(dbus.String(u'xesam:title'))
f = open( home + '/.now_playing', 'w' )
f.write( track + '\n' )
f.close()
dbus.mainloop.glib.DBusGMainLoop (set_as_default = True)
bus = dbus.SessionBus ()
from os.path import expanduser
home = expanduser("~")
player = bus.get_object ("org.mpris.MediaPlayer2.rhythmbox", "/org/mpris/MediaPlayer2")
iface = dbus.Interface (player, "org.freedesktop.DBus.Properties")
track = iface.Get("org.mpris.MediaPlayer2.Player","Metadata").get(dbus.String(u'xesam:artist'))[0] + " - "+ iface.Get("org.mpris.MediaPlayer2.Player","Metadata").get(dbus.Strin$
f = open( home + "/.now_playing", 'w' )
f.write( track + '\n' )
f.close()
iface.connect_to_signal ("PropertiesChanged", playing_song_changed)
# Run the GLib event loop to process DBus signals as they arrive
mainloop = glib.MainLoop ()
mainloop.run ()
Something like:
from time import sleep
execute = True
while execute:
your_function_call()
sleep(30) # in seconds; prevent busy polling
Should work just fine. If that was hooked up to something that listened to signals (import signal) so that you could set execute to False when someone ctrl-c's the application, that'd be basically what you're after.
Otherwise, have a Google for daemonisation (which involves forking the process a couple of times); from memory, there's even a decent Python library now (which, from memory, requires 2.5/2.6 with statements) which would help make that side of things easier :).

How do I get all instances of VLC on dbus quickly?

basically the problem is, that the only way to get all instances of VLC is to search all non-named instances for the org.freedesktop.MediaPlayer identity function and call it.
(alternatively I could use the introspection API, but this wouldn't seem to solve my problem)
Unfortunately many programs upon having sent a dbus call, simply do not respond, causing a long and costly timeout.
When this happens multiple times it can add up.
Basically the builtin timeout is excessively long.
If I can decrease the dbus timeout somehow that will solve my problem, but the ideal solution would be a way.
I got the idea that I could put each call to "Identify" inside a thread and that I could kill threads that take too long, but this seems not to be suggested. Also adding multithreading greatly increases the CPU load while not increasing the speed of the program all that much.
here is the code that I am trying to get to run quickly (more or less) which is currently painfully slow.
import dbus
bus = dbus.SessionBus()
dbus_proxy = bus.get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
names = dbus_proxy.ListNames()
for name in names:
if name.startswith(':'):
try:
proxy = bus.get_object(name, '/')
ident_method = proxy.get_dbus_method("Identity",
dbus_interface="org.freedesktop.MediaPlayer")
print ident_method()
except dbus.exceptions.DBusException:
pass
Easier than spawning a bunch of threads would be to make the calls to the different services asynchronously, providing a callback handler for when a result comes back or a D-Bus error occurs. All of the calls effectively happen in parallel, and your program can proceed as soon as it gets some positive results.
Here's a quick-and-dirty program that prints a list of all the services it finds. Note how quickly it gets all the positive results without having to wait for any timeouts from anything. In a real program you'd probably assign a do-nothing function to the error handler, since your goal here is to ignore the services that don't respond, but this example waits until it's heard from everything before quitting.
#! /usr/bin/env python
import dbus
import dbus.mainloop.glib
import functools
import glib
class VlcFinder (object):
def __init__ (self, mainloop):
self.outstanding = 0
self.mainloop = mainloop
bus = dbus.SessionBus ()
dbus_proxy = bus.get_object ("org.freedesktop.DBus", "/org/freedesktop/DBus")
names = dbus_proxy.ListNames ()
for name in dbus_proxy.ListNames ():
if name.startswith (":"):
proxy = bus.get_object (name, "/")
iface = dbus.Interface (proxy, "org.freedesktop.MediaPlayer")
iface.Identity (reply_handler = functools.partial (self.reply_cb, name),
error_handler = functools.partial (self.error_cb, name))
self.outstanding += 1
def reply_cb (self, name, ver):
print "Found {0}: {1}".format (name, ver)
self.received_result ()
def error_cb (self, name, msg):
self.received_result ()
def received_result (self):
self.outstanding -= 1
if self.outstanding == 0:
self.mainloop.quit ()
if __name__ == "__main__":
dbus.mainloop.glib.DBusGMainLoop (set_as_default = True)
mainloop = glib.MainLoop ()
finder = VlcFinder (mainloop)
mainloop.run ()

Categories

Resources