python passing function to thread - python

how is possible to pass a function to a thread like a standard variable,
when i pass function on args , i have a non iterable error
there is a summary of my code
from CallbackThread import CallbackThread
import time
import threading
class foo:
def runSomthing(self,callback):
#do things
callthread = threading.Thread(target=self.newthread, args=(callback))
callthread.start()
def newthread(self,calback):
print("runiing")
while True:
#if receve data (udp)
data = 0
#run mycallbacktest
calback(data)
def mycallbacktest(i):
print("hello world", i)
myobj = foo()
myobj.runSomthing(mycallbacktest)
i have see on similar topics things like that
https://gist.github.com/amirasaran/e91c7253c03518b8f7b7955df0e954bb
and i have try this based on this bu i not sure what this is doing, for me is just call a callback when thread if finished
class BaseThread(threading.Thread):
def __init__(self, callback=None, callback_args=None, *args, **kwargs):
target = kwargs.pop('target')
super(BaseThread, self).__init__(target=self.target_with_callback, *args, **kwargs)
self.callback = callback
self.method = target
self.callback_args = callback_args
def target_with_callback(self):
self.method()
if self.callback is not None:
self.callback(*self.callback_args)
but this d'ont solve what i tring to do

as suggested MisterMiyagi, the awnser a missig comma,
It should be args=(callback,) instead of args=(callback)
(this post is to mark this post close)

Related

Count expected arguments to be passed to a function in Python

The Problem
I 'need' to count the number of arguments expected to be passed to a function object. My commands are stored via their object, so I can reference each one individually. I already tried inspect and while it seems perfect, it for some reason cannot distinguish between 0 arguments and 1 argument as both output 1. It seems I will have to link the actual code as it must be an error with my personal script. Here is the code:
# ============== Impots ==============
from shlex import split
from inspect import getfullargspec
# ============== Main ==============
class Command:
def __init__(self, fn, name=None, aliases=None):
self.name = name or fn.__name__
self.aliases = aliases or []
self.fn = fn
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
#property
def all_names(self):
return (self.name, *self.aliases)
# ============= Storage =============
class Commands(dict):
def __init__(self, not_found: str='{} was not recognized.', argmismatch: str='{} takes {} arguments but {} were given.'):
self.flag = [not_found, argmismatch]
super().__init__()
def _add_command(self, command):
for cmd_name in command.all_names:
if cmd_name in self:
raise ValueError(f'Name or alias assigned to function {command.fn.__name__} is duplicate: {cmd_name}')
self[cmd_name] = command
def add_command(self, name=None, aliases=None):
def inner_fn(fn):
self._add_command(Command(fn, name, aliases))
return fn
return inner_fn
def execute(self, user_input):
self.command, *args = self.parse(user_input or 'no_input')
if self.command.lower() in self:
# THIS IS WHERE THE ISSUE HAPPENS
print(f'Passed: {len(args)}')
print(f'Expected: {len(getfullargspec(self[self.command])[0])}')
if not (len(args)==len(getfullargspec(self[self.command])[0])):
return (False, self.flag[1].format(self.command, len(getfullargspec(self[self.command])[0]), len(args)))
self[self.command](*args)
return (True, 'Command found!')
else:
return (False, self.flag[0].format(self.command))
#staticmethod
def parse(string):
if (string=='no_input'):
return ['','']
return split(string)
cmd = Commands()
#cmd.add_command(name='foo')
def foo():
print('bar')
cmd.execute('foo bar')
Not the Problem
I have already tried seeing if the list was filled with a Null value, it is not.
It does this across platforms (VSCode, Repl.it, PyDroid) so it is not a weird os issue.
I'm not sure if it could be len() treating it weirdly, but I know a normal empty list such as [] is counted as one.
I'm using this for the error message and possibly for auto-tips on what might have been wrong with the command, I would love any help and will update my post with new info and possible, correct, and incorrect solutions.
Similar Posts
This post is similar in question but the solution provided does not match what I require.
You don't need getfullargspec if you only access the zeroth element.
my_funct.__code__.co_argcount

Using pexpect to detect and handle notifications from Bluetooth LE

I've been writing a python code to read values on Raspberry Pi 3 Model B received by Bluetooth LE.
I can read the correct values with:
child.sendline("char-read-hnd handle")
child.expect("Characteristic value/descripto: ",timeout=5)
What i am trying to do now is to check for notifications at any time, so i have a Thread that searches for the expected pattern "Notification handle=" like this:
def run():
patterns = ['Notification handle=','Indication handle=']
while True:
try:
matched_pattern_index = child.expect(patterns,timeout=1)
if matched_pattern_index in {0,1}:
print("Received Notification")
handleNotification()
except pexpect.TIMEOUT:
pass
Now during my main code, i am always doing some child.sendline to check for new values as well as child.expect . The problem is that my Thread call to pexpect.expect above blocks my others pexpect.expect in my code.
I already tried to do a second child similar to the first one to work inside the Thread but the result is the same.
So anyone has any idea how i can achieve this?
Any help would be appreciated.
Thanks in advance
I was thinking of subclassing pexpect.spawn and providing an expect_before(p,c) method that would save a list of patterns p and callback functions c, then override expect(p2) to prefix the list p onto the list p2 of that call before calling the real spawn.expect function.
If the real function then returns a match index i that is within the size of the list p, we can call function c[i] and loop again. When the index is beyond that list, we adjust it to be an index in list p2 and return from the call. Here's an approximation:
#!/usr/bin/python
# https://stackoverflow.com/q/51622025/5008284
from __future__ import print_function
import sys, pexpect
class Myspawn(pexpect.spawn):
def __init__(self, *args, **kwargs):
pexpect.spawn.__init__(self, *args, **kwargs)
def expect_before(self, patterns, callbacks):
self.eb_pat = patterns
self.eb_cb = callbacks
def expect(self, patlist, *args, **kwargs):
numbefore = len(self.eb_pat)
while True:
rc = pexpect.spawn.expect(self, self.eb_pat+patlist, *args, **kwargs)
if rc>=numbefore:
break
self.eb_cb[rc]() # call callback
return rc-numbefore
def test():
child = Myspawn("sudo bluetoothctl", logfile=sys.stdout)
patterns = ['Device FC:A8:9A:..:..:..', 'Device C8:FD:41:..:..:..']
callbacks = [lambda :handler1(), lambda :handler2()]
child.expect_before(patterns, callbacks)
child.sendline('scan on')
while True:
child.expect(['[bluetooth].*?#'], timeout=5)

Python Running Multiple Locks across Multiple Threads

So the situation is that I have multiple methods, which might be threaded simaltenously, but all need their own lock
against being re-threaded until they have run. They are established by initialising a class with some dataprocessing options:
class InfrequentDataDaemon(object): pass
class FrequentDataDaemon(object): pass
def addMethod(name):
def wrapper(f):
setattr(processor, f.__name__, staticmethod(f))
return f
return wrapper
class DataProcessors(object):
lock = threading.Lock()
def __init__(self, options):
self.common_settings = options['common_settings']
self.data_processing_configurations = options['data_processing_configurations'] #Configs for each processing method
self.data_processing_types = options['data_processing_types']
self.Data_Processsing_Functions ={}
#I __init__ each processing method as a seperate function so that it can be locked
for type in options['data_processing_types']:
def bindFunction1(name):
def func1(self, data=None, lock=None):
config = self.data_processing_configurations[data['type']] #I get the right config for the datatype
with lock:
FetchDataBaseStuff(data['type'])
#I don't want this to be run more than once at a time per DataProcessing Type
# But it's fine if multiple DoSomethings run at once, as long as each DataType is different!
DoSomething(data, config)
WriteToDataBase(data['type'])
func1.__name__ = "Processing_for_{}".format(type)
self.Data_Processing_Functions[func1.__name__] = func1 #Add this function to the Dictinary object
bindFunction1(type)
#Then I add some methods to a daemon that are going to check if our Dataprocessors need to be called
def fast_process_types(data):
if not example_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data, lock)).start()
def slow_process_types(data):
if not some_other_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data, lock)).start()
addMethod(InfrequentDataDaemon)(slow_process_types)
addMethod(FrequentDataDaemon)(fast_process_types)
The idea is to lock each method in
DataProcessors.Data_Processing_Functions - so that each method is only accessed by one thread at a time (and the rest of the threads for the same method are queued). How does Locking need to be set up to achieve this effect?
I'm not sure I completely follow what you're trying to do here, but could you just create a separate threading.Lock object for each type?
class DataProcessors(object):
def __init__(self, options):
self.common_settings = options['common_settings']
self.data_processing_configurations = options['data_processing_configurations'] #Configs for each processing method
self.data_processing_types = options['data_processing_types']
self.Data_Processsing_Functions ={}
self.locks = {}
#I __init__ each processing method as a seperate function so that it can be locked
for type in options['data_processing_types']:
self.locks[type] = threading.Lock()
def bindFunction1(name):
def func1(self, data=None):
config = self.data_processing_configurations[data['type']] #I get the right config for the datatype
with self.locks[data['type']]:
FetchDataBaseStuff(data['type'])
DoSomething(data, config)
WriteToDataBase(data['type'])
func1.__name__ = "Processing_for_{}".format(type)
self.Data_Processing_Functions[func1.__name__] = func1 #Add this function to the Dictinary object
bindFunction1(type)
#Then I add some methods to a daemon that are going to check if our Dataprocessors need to be called
def fast_process_types(data):
if not example_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data)).start()
def slow_process_types(data):
if not some_other_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data)).start()
addMethod(InfrequentDataDaemon)(slow_process_types)
addMethod(FrequentDataDaemon)(fast_process_types)

Python observer/observable library [duplicate]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
Are there any exemplary examples of the GoF Observer implemented in Python? I have a bit code which currently has bits of debugging code laced through the key class (currently generating messages to stderr if a magic env is set). Additionally, the class has an interface for incrementally return results as well as storing them (in memory) for post processing. (The class itself is a job manager for concurrently executing commands on remote machines over ssh).
Currently the usage of the class looks something like:
job = SSHJobMan(hostlist, cmd)
job.start()
while not job.done():
for each in job.poll():
incrementally_process(job.results[each])
time.sleep(0.2) # or other more useful work
post_process(job.results)
An alernative usage model is:
job = SSHJobMan(hostlist, cmd)
job.wait() # implicitly performs a start()
process(job.results)
This all works fine for the current utility. However it does lack flexibility. For example I currently support a brief output format or a progress bar as incremental results, I also support
brief, complete and "merged message" outputs for the post_process() function.
However, I'd like to support multiple results/output streams (progress bar to the terminal, debugging and warnings to a log file, outputs from successful jobs to one file/directory, error messages and other results from non-successful jobs to another, etc).
This sounds like a situation that calls for Observer ... have instances of my class accept registration from other objects and call them back with specific types of events as they occur.
I'm looking at PyPubSub since I saw several references to that in SO related questions. I'm not sure I'm ready to add the external dependency to my utility but I could see value in using their interface as a model for mine if that's going to make it easier for others to use. (The project is intended as both a standalone command line utility and a class for writing other scripts/utilities).
In short I know how to do what I want ... but there are numerous ways to accomplish it. I want suggestions on what's most likely to work for other users of the code in the long run.
The code itself is at: classh.
However it does lack flexibility.
Well... actually, this looks like a good design to me if an asynchronous API is what you want. It usually is. Maybe all you need is to switch from stderr to Python's logging module, which has a sort of publish/subscribe model of its own, what with Logger.addHandler() and so on.
If you do want to support observers, my advice is to keep it simple. You really only need a few lines of code.
class Event(object):
pass
class Observable(object):
def __init__(self):
self.callbacks = []
def subscribe(self, callback):
self.callbacks.append(callback)
def fire(self, **attrs):
e = Event()
e.source = self
for k, v in attrs.items():
setattr(e, k, v)
for fn in self.callbacks:
fn(e)
Your Job class can subclass Observable. When something of interest happens, call self.fire(type="progress", percent=50) or the like.
I think people in the other answers overdo it. You can easily achieve events in Python with less than 15 lines of code.
You simple have two classes: Event and Observer. Any class that wants to listen for an event, needs to inherit Observer and set to listen (observe) for a specific event. When an Event is instantiated and fired, all observers listening to that event will run the specified callback functions.
class Observer():
_observers = []
def __init__(self):
self._observers.append(self)
self._observables = {}
def observe(self, event_name, callback):
self._observables[event_name] = callback
class Event():
def __init__(self, name, data, autofire = True):
self.name = name
self.data = data
if autofire:
self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.data)
Example:
class Room(Observer):
def __init__(self):
print("Room is ready.")
Observer.__init__(self) # Observer's init needs to be called
def someone_arrived(self, who):
print(who + " has arrived!")
room = Room()
room.observe('someone arrived', room.someone_arrived)
Event('someone arrived', 'Lenard')
Output:
Room is ready.
Lenard has arrived!
A few more approaches...
Example: the logging module
Maybe all you need is to switch from stderr to Python's logging module, which has a powerful publish/subscribe model.
It's easy to get started producing log records.
# producer
import logging
log = logging.getLogger("myjobs") # that's all the setup you need
class MyJob(object):
def run(self):
log.info("starting job")
n = 10
for i in range(n):
log.info("%.1f%% done" % (100.0 * i / n))
log.info("work complete")
On the consumer side there's a bit more work. Unfortunately configuring logger output takes, like, 7 whole lines of code to do. ;)
# consumer
import myjobs, sys, logging
if user_wants_log_output:
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
myjobs.log.addHandler(ch)
myjobs.log.setLevel(logging.INFO)
myjobs.MyJob().run()
On the other hand there's an amazing amount of stuff in the logging package. If you ever need to send log data to a rotating set of files, an email address, and the Windows Event Log, you're covered.
Example: simplest possible observer
But you don't need to use any library at all. An extremely simple way to support observers is to call a method that does nothing.
# producer
class MyJob(object):
def on_progress(self, pct):
"""Called when progress is made. pct is the percent complete.
By default this does nothing. The user may override this method
or even just assign to it."""
pass
def run(self):
n = 10
for i in range(n):
self.on_progress(100.0 * i / n)
self.on_progress(100.0)
# consumer
import sys, myjobs
job = myjobs.MyJob()
job.on_progress = lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
Sometimes instead of writing a lambda, you can just say job.on_progress = progressBar.update, which is nice.
This is about as simple as it gets. One drawback is that it doesn't naturally support multiple listeners subscribing to the same events.
Example: C#-like events
With a bit of support code, you can get C#-like events in Python. Here's the code:
# glue code
class event(object):
def __init__(self, func):
self.__doc__ = func.__doc__
self._key = ' ' + func.__name__
def __get__(self, obj, cls):
try:
return obj.__dict__[self._key]
except KeyError, exc:
be = obj.__dict__[self._key] = boundevent()
return be
class boundevent(object):
def __init__(self):
self._fns = []
def __iadd__(self, fn):
self._fns.append(fn)
return self
def __isub__(self, fn):
self._fns.remove(fn)
return self
def __call__(self, *args, **kwargs):
for f in self._fns[:]:
f(*args, **kwargs)
The producer declares the event using a decorator:
# producer
class MyJob(object):
#event
def progress(pct):
"""Called when progress is made. pct is the percent complete."""
def run(self):
n = 10
for i in range(n+1):
self.progress(100.0 * i / n)
#consumer
import sys, myjobs
job = myjobs.MyJob()
job.progress += lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
This works exactly like the "simple observer" code above, but you can add as many listeners as you like using +=. (Unlike C#, there are no event handler types, you don't have to new EventHandler(foo.bar) when subscribing to an event, and you don't have to check for null before firing the event. Like C#, events do not squelch exceptions.)
How to choose
If logging does everything you need, use that. Otherwise do the simplest thing that works for you. The key thing to note is that you don't need to take on a big external dependency.
How about an implementation where objects aren't kept alive just because they're observing something? Below please find an implementation of the observer pattern with the following features:
Usage is pythonic. To add an observer to a bound method .bar of instance foo, just do foo.bar.addObserver(observer).
Observers are not kept alive by virtue of being observers. In other words, the observer code uses no strong references.
No sub-classing necessary (descriptors ftw).
Can be used with unhashable types.
Can be used as many times you want in a single class.
(bonus) As of today the code exists in a proper downloadable, installable package on github.
Here's the code (the github package or PyPI package have the most up to date implementation):
import weakref
import functools
class ObservableMethod(object):
"""
A proxy for a bound method which can be observed.
I behave like a bound method, but other bound methods can subscribe to be
called whenever I am called.
"""
def __init__(self, obj, func):
self.func = func
functools.update_wrapper(self, func)
self.objectWeakRef = weakref.ref(obj)
self.callbacks = {} #observing object ID -> weak ref, methodNames
def addObserver(self, boundMethod):
"""
Register a bound method to observe this ObservableMethod.
The observing method will be called whenever this ObservableMethod is
called, and with the same arguments and keyword arguments. If a
boundMethod has already been registered to as a callback, trying to add
it again does nothing. In other words, there is no way to sign up an
observer to be called back multiple times.
"""
obj = boundMethod.__self__
ID = id(obj)
if ID in self.callbacks:
s = self.callbacks[ID][1]
else:
wr = weakref.ref(obj, Cleanup(ID, self.callbacks))
s = set()
self.callbacks[ID] = (wr, s)
s.add(boundMethod.__name__)
def discardObserver(self, boundMethod):
"""
Un-register a bound method.
"""
obj = boundMethod.__self__
if id(obj) in self.callbacks:
self.callbacks[id(obj)][1].discard(boundMethod.__name__)
def __call__(self, *arg, **kw):
"""
Invoke the method which I proxy, and all of it's callbacks.
The callbacks are called with the same *args and **kw as the main
method.
"""
result = self.func(self.objectWeakRef(), *arg, **kw)
for ID in self.callbacks:
wr, methodNames = self.callbacks[ID]
obj = wr()
for methodName in methodNames:
getattr(obj, methodName)(*arg, **kw)
return result
#property
def __self__(self):
"""
Get a strong reference to the object owning this ObservableMethod
This is needed so that ObservableMethod instances can observe other
ObservableMethod instances.
"""
return self.objectWeakRef()
class ObservableMethodDescriptor(object):
def __init__(self, func):
"""
To each instance of the class using this descriptor, I associate an
ObservableMethod.
"""
self.instances = {} # Instance id -> (weak ref, Observablemethod)
self._func = func
def __get__(self, inst, cls):
if inst is None:
return self
ID = id(inst)
if ID in self.instances:
wr, om = self.instances[ID]
if not wr():
msg = "Object id %d should have been cleaned up"%(ID,)
raise RuntimeError(msg)
else:
wr = weakref.ref(inst, Cleanup(ID, self.instances))
om = ObservableMethod(inst, self._func)
self.instances[ID] = (wr, om)
return om
def __set__(self, inst, val):
raise RuntimeError("Assigning to ObservableMethod not supported")
def event(func):
return ObservableMethodDescriptor(func)
class Cleanup(object):
"""
I manage remove elements from a dict whenever I'm called.
Use me as a weakref.ref callback to remove an object's id from a dict
when that object is garbage collected.
"""
def __init__(self, key, d):
self.key = key
self.d = d
def __call__(self, wr):
del self.d[self.key]
To use this we just decorate methods we want to make observable with #event. Here's an example
class Foo(object):
def __init__(self, name):
self.name = name
#event
def bar(self):
print("%s called bar"%(self.name,))
def baz(self):
print("%s called baz"%(self.name,))
a = Foo('a')
b = Foo('b')
a.bar.addObserver(b.bar)
a.bar()
From wikipedia:
from collections import defaultdict
class Observable (defaultdict):
def __init__ (self):
defaultdict.__init__(self, object)
def emit (self, *args):
'''Pass parameters to all observers and update states.'''
for subscriber in self:
response = subscriber(*args)
self[subscriber] = response
def subscribe (self, subscriber):
'''Add a new subscriber to self.'''
self[subscriber]
def stat (self):
'''Return a tuple containing the state of each observer.'''
return tuple(self.values())
The Observable is used like this.
myObservable = Observable ()
# subscribe some inlined functions.
# myObservable[lambda x, y: x * y] would also work here.
myObservable.subscribe(lambda x, y: x * y)
myObservable.subscribe(lambda x, y: float(x) / y)
myObservable.subscribe(lambda x, y: x + y)
myObservable.subscribe(lambda x, y: x - y)
# emit parameters to each observer
myObservable.emit(6, 2)
# get updated values
myObservable.stat() # returns: (8, 3.0, 4, 12)
Based on Jason's answer, I implemented the C#-like events example as a fully-fledged python module including documentation and tests. I love fancy pythonic stuff :)
So, if you want some ready-to-use solution, you can just use the code on github.
Example: twisted log observers
To register an observer yourCallable() (a callable that accepts a dictionary) to receive all log events (in addition to any other observers):
twisted.python.log.addObserver(yourCallable)
Example: complete producer/consumer example
From Twisted-Python mailing list:
#!/usr/bin/env python
"""Serve as a sample implementation of a twisted producer/consumer
system, with a simple TCP server which asks the user how many random
integers they want, and it sends the result set back to the user, one
result per line."""
import random
from zope.interface import implements
from twisted.internet import interfaces, reactor
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
class Producer:
"""Send back the requested number of random integers to the client."""
implements(interfaces.IPushProducer)
def __init__(self, proto, cnt):
self._proto = proto
self._goal = cnt
self._produced = 0
self._paused = False
def pauseProducing(self):
"""When we've produced data too fast, pauseProducing() will be
called (reentrantly from within resumeProducing's transport.write
method, most likely), so set a flag that causes production to pause
temporarily."""
self._paused = True
print('pausing connection from %s' % (self._proto.transport.getPeer()))
def resumeProducing(self):
self._paused = False
while not self._paused and self._produced < self._goal:
next_int = random.randint(0, 10000)
self._proto.transport.write('%d\r\n' % (next_int))
self._produced += 1
if self._produced == self._goal:
self._proto.transport.unregisterProducer()
self._proto.transport.loseConnection()
def stopProducing(self):
pass
class ServeRandom(LineReceiver):
"""Serve up random data."""
def connectionMade(self):
print('connection made from %s' % (self.transport.getPeer()))
self.transport.write('how many random integers do you want?\r\n')
def lineReceived(self, line):
cnt = int(line.strip())
producer = Producer(self, cnt)
self.transport.registerProducer(producer, True)
producer.resumeProducing()
def connectionLost(self, reason):
print('connection lost from %s' % (self.transport.getPeer()))
factory = Factory()
factory.protocol = ServeRandom
reactor.listenTCP(1234, factory)
print('listening on 1234...')
reactor.run()
OP asks "Are there any exemplary examples of the GoF Observer implemented in Python?"
This is an example in Python 3.7. This Observable class meets the requirement of creating a relationship between one observable and many observers while remaining independent of their structure.
from functools import partial
from dataclasses import dataclass, field
import sys
from typing import List, Callable
#dataclass
class Observable:
observers: List[Callable] = field(default_factory=list)
def register(self, observer: Callable):
self.observers.append(observer)
def deregister(self, observer: Callable):
self.observers.remove(observer)
def notify(self, *args, **kwargs):
for observer in self.observers:
observer(*args, **kwargs)
def usage_demo():
observable = Observable()
# Register two anonymous observers using lambda.
observable.register(
lambda *args, **kwargs: print(f'Observer 1 called with args={args}, kwargs={kwargs}'))
observable.register(
lambda *args, **kwargs: print(f'Observer 2 called with args={args}, kwargs={kwargs}'))
# Create an observer function, register it, then deregister it.
def callable_3():
print('Observer 3 NOT called.')
observable.register(callable_3)
observable.deregister(callable_3)
# Create a general purpose observer function and register four observers.
def callable_x(*args, **kwargs):
print(f'{args[0]} observer called with args={args}, kwargs={kwargs}')
for gui_field in ['Form field 4', 'Form field 5', 'Form field 6', 'Form field 7']:
observable.register(partial(callable_x, gui_field))
observable.notify('test')
if __name__ == '__main__':
sys.exit(usage_demo())
A functional approach to observer design:
def add_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
# If this is the first listener, then set up the method wrapper
if not listeners:
listeners = [listener]
setattr(obj, listener_attr, listeners)
# Get the object's method
method = getattr(obj, method_name)
#wraps(method)
def method_wrapper(*args, **kwags):
method(*args, **kwags)
for l in listeners:
l(obj, *args, **kwags) # Listener also has object argument
# Replace the original method with the wrapper
setattr(obj, method_name, method_wrapper)
else:
# Event is already set up, so just add another listener
listeners.append(listener)
def remove_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
if listeners:
# Remove the listener
next((listeners.pop(i)
for i, l in enumerate(listeners)
if l == listener),
None)
# If this was the last listener, then remove the method wrapper
if not listeners:
method = getattr(obj, method_name)
delattr(obj, listener_attr)
setattr(obj, method_name, method.__wrapped__)
These methods can then be used to add a listener to any class method. For example:
class MyClass(object):
def __init__(self, prop):
self.prop = prop
def some_method(self, num, string):
print('method:', num, string)
def listener_method(obj, num, string):
print('listener:', num, string, obj.prop)
my = MyClass('my_prop')
add_listener(my, 'some_method', listener_method)
my.some_method(42, 'with listener')
remove_listener(my, 'some_method', listener_method)
my.some_method(42, 'without listener')
And the output is:
method: 42 with listener
listener: 42 with listener my_prop
method: 42 without listener

Updating a TKinter GUI from a multiprocessing calculation

I'm creating a GUI for a python simulator. The GUI provides tools to set up the simulation and run it. While the simulation is running I want to pass progress information to the GUI and have it displayed on a Label in my simulation_frame. Because the simulations need to be run with multiprocessing, I'm using a Queue to pass the updated information back to the GUI.
The way I have it set up, running the simulations blocks the Tk mainloop since I need to be able to close my Pool at the end of the call. I'm calling update_idletasks() to force the GUI to update the progress information.
This seems to me like an inelegant and potentially risky solution. Moreover, while it works in Ubuntu, it does not seem to work in Windows XP--the window goes blank after a second or so of running. I may be able to make it work in Windows by calling update() rather than update_idletasks(), but that seems even worse to me.
Is there a better solution?
The relevant code:
sims = []
queues = []
svars = []
names = []
i = 0
manager = mp.Manager()
for config in self.configs:
name, file, num = config.get()
j = 0
for _ in range(num):
#progress monitor label
q = manager.Queue()
s_var = StringVar()
label = Label(self.sim_frame, textvariable = s_var, bg = "white")
s_var.set("%d: Not Started"%i)
label.grid(row = i, column = 0, sticky = W+N)
self.sim_labels.append(label)
queues.append(q)
svars.append(s_var)
names.append("%s-%d"%(name, j))
sims.append(("%s-%d"%(name, j),file, data, verbose, q))
i += 1
j += 1
self.update()
# The progress tracking is pretty hacky.
pool = mp.Pool(parallel)
num_sims = len(sims)
#start simulating
tracker = pool.map_async(run_1_sim,sims)
while not tracker.ready():
pass
for i in range(num_sims):
q = queues[i]
try:
gen = q.get(timeout = .001)
# if the sim has updated, update the label
#print gen
svars[i].set(gen)
self.update()
except Empty:
pass
# The results of the map, if necessary
tracker.get()
def update(self):
"""
Redraws everything
"""
self.master.update_idletasks()
def run_1_sim(args):
"""
Runs one simulation with the specified args, output updates to the supplied
pipe every generation
"""
name,config,data, verbose, q = args
sim = Simulation(config, name=name, data = data)
generation = 0
q.put(sim.name + ": 0")
try:
while sim.run(verbose=verbose, log=True, generations = sim_step):
generation += sim_step
q.put(sim.name + ": " + str(generation))
except Exception as err:
print err
This may or may not be helpful to you, but it is possible to make tkinter thread-safe by ensuring that its code and methods are executed on the particular thread the root was instantiated on. One project that experimented with the concept can be found over on the Python Cookbook as recipe 577633 (Directory Pruner 2). The code below comes from lines 76 - 253 and is fairly easy to extend with widgets.
Primary Thread-safety Support
# Import several GUI libraries.
import tkinter.ttk
import tkinter.filedialog
import tkinter.messagebox
# Import other needed modules.
import queue
import _thread
import operator
################################################################################
class AffinityLoop:
"Restricts code execution to thread that instance was created on."
__slots__ = '__action', '__thread'
def __init__(self):
"Initialize AffinityLoop with job queue and thread identity."
self.__action = queue.Queue()
self.__thread = _thread.get_ident()
def run(self, func, *args, **keywords):
"Run function on creating thread and return result."
if _thread.get_ident() == self.__thread:
self.__run_jobs()
return func(*args, **keywords)
else:
job = self.__Job(func, args, keywords)
self.__action.put_nowait(job)
return job.result
def __run_jobs(self):
"Run all pending jobs currently in the job queue."
while not self.__action.empty():
job = self.__action.get_nowait()
job.execute()
########################################################################
class __Job:
"Store information to run a job at a later time."
__slots__ = ('__func', '__args', '__keywords',
'__error', '__mutex', '__value')
def __init__(self, func, args, keywords):
"Initialize the job's info and ready for execution."
self.__func = func
self.__args = args
self.__keywords = keywords
self.__error = False
self.__mutex = _thread.allocate_lock()
self.__mutex.acquire()
def execute(self):
"Run the job, store any error, and return to sender."
try:
self.__value = self.__func(*self.__args, **self.__keywords)
except Exception as error:
self.__error = True
self.__value = error
self.__mutex.release()
#property
def result(self):
"Return execution result or raise an error."
self.__mutex.acquire()
if self.__error:
raise self.__value
return self.__value
################################################################################
class _ThreadSafe:
"Create a thread-safe GUI class for safe cross-threaded calls."
ROOT = tkinter.Tk
def __init__(self, master=None, *args, **keywords):
"Initialize a thread-safe wrapper around a GUI base class."
if master is None:
if self.BASE is not self.ROOT:
raise ValueError('Widget must have a master!')
self.__job = AffinityLoop() # Use Affinity() if it does not break.
self.__schedule(self.__initialize, *args, **keywords)
else:
self.master = master
self.__job = master.__job
self.__schedule(self.__initialize, master, *args, **keywords)
def __initialize(self, *args, **keywords):
"Delegate instance creation to later time if necessary."
self.__obj = self.BASE(*args, **keywords)
########################################################################
# Provide a framework for delaying method execution when needed.
def __schedule(self, *args, **keywords):
"Schedule execution of a method till later if necessary."
return self.__job.run(self.__run, *args, **keywords)
#classmethod
def __run(cls, func, *args, **keywords):
"Execute the function after converting the arguments."
args = tuple(cls.unwrap(i) for i in args)
keywords = dict((k, cls.unwrap(v)) for k, v in keywords.items())
return func(*args, **keywords)
#staticmethod
def unwrap(obj):
"Unpack inner objects wrapped by _ThreadSafe instances."
return obj.__obj if isinstance(obj, _ThreadSafe) else obj
########################################################################
# Allow access to and manipulation of wrapped instance's settings.
def __getitem__(self, key):
"Get a configuration option from the underlying object."
return self.__schedule(operator.getitem, self, key)
def __setitem__(self, key, value):
"Set a configuration option on the underlying object."
return self.__schedule(operator.setitem, self, key, value)
########################################################################
# Create attribute proxies for methods and allow their execution.
def __getattr__(self, name):
"Create a requested attribute and return cached result."
attr = self.__Attr(self.__callback, (name,))
setattr(self, name, attr)
return attr
def __callback(self, path, *args, **keywords):
"Schedule execution of named method from attribute proxy."
return self.__schedule(self.__method, path, *args, **keywords)
def __method(self, path, *args, **keywords):
"Extract a method and run it with the provided arguments."
method = self.__obj
for name in path:
method = getattr(method, name)
return method(*args, **keywords)
########################################################################
class __Attr:
"Save an attribute's name and wait for execution."
__slots__ = '__callback', '__path'
def __init__(self, callback, path):
"Initialize proxy with callback and method path."
self.__callback = callback
self.__path = path
def __call__(self, *args, **keywords):
"Run a known method with the given arguments."
return self.__callback(self.__path, *args, **keywords)
def __getattr__(self, name):
"Generate a proxy object for a sub-attribute."
if name in {'__func__', '__name__'}:
# Hack for the "tkinter.__init__.Misc._register" method.
raise AttributeError('This is not a real method!')
return self.__class__(self.__callback, self.__path + (name,))
################################################################################
# Provide thread-safe classes to be used from tkinter.
class Tk(_ThreadSafe): BASE = tkinter.Tk
class Frame(_ThreadSafe): BASE = tkinter.ttk.Frame
class Button(_ThreadSafe): BASE = tkinter.ttk.Button
class Entry(_ThreadSafe): BASE = tkinter.ttk.Entry
class Progressbar(_ThreadSafe): BASE = tkinter.ttk.Progressbar
class Treeview(_ThreadSafe): BASE = tkinter.ttk.Treeview
class Scrollbar(_ThreadSafe): BASE = tkinter.ttk.Scrollbar
class Sizegrip(_ThreadSafe): BASE = tkinter.ttk.Sizegrip
class Menu(_ThreadSafe): BASE = tkinter.Menu
class Directory(_ThreadSafe): BASE = tkinter.filedialog.Directory
class Message(_ThreadSafe): BASE = tkinter.messagebox.Message
If you read the rest of the application, you will find that it is built with the widgets defined as _ThreadSafe variants that you are used to seeing in other tkinter applications. As method calls come in from various threads, they are automatically held until it becomes possible to execute those calls on the creating thread. Note how the mainloop is replaced by way of lines 291 - 298 and 326 - 336.
Notice NoDefaltRoot & main_loop Calls
#classmethod
def main(cls):
"Create an application containing a single TrimDirView widget."
tkinter.NoDefaultRoot()
root = cls.create_application_root()
cls.attach_window_icon(root, ICON)
view = cls.setup_class_instance(root)
cls.main_loop(root)
main_loop Allows Threads To Execute
#staticmethod
def main_loop(root):
"Process all GUI events according to tkinter's settings."
target = time.clock()
while True:
try:
root.update()
except tkinter.TclError:
break
target += tkinter._tkinter.getbusywaitinterval() / 1000
time.sleep(max(target - time.clock(), 0))

Categories

Resources