Updating a TKinter GUI from a multiprocessing calculation - python

I'm creating a GUI for a python simulator. The GUI provides tools to set up the simulation and run it. While the simulation is running I want to pass progress information to the GUI and have it displayed on a Label in my simulation_frame. Because the simulations need to be run with multiprocessing, I'm using a Queue to pass the updated information back to the GUI.
The way I have it set up, running the simulations blocks the Tk mainloop since I need to be able to close my Pool at the end of the call. I'm calling update_idletasks() to force the GUI to update the progress information.
This seems to me like an inelegant and potentially risky solution. Moreover, while it works in Ubuntu, it does not seem to work in Windows XP--the window goes blank after a second or so of running. I may be able to make it work in Windows by calling update() rather than update_idletasks(), but that seems even worse to me.
Is there a better solution?
The relevant code:
sims = []
queues = []
svars = []
names = []
i = 0
manager = mp.Manager()
for config in self.configs:
name, file, num = config.get()
j = 0
for _ in range(num):
#progress monitor label
q = manager.Queue()
s_var = StringVar()
label = Label(self.sim_frame, textvariable = s_var, bg = "white")
s_var.set("%d: Not Started"%i)
label.grid(row = i, column = 0, sticky = W+N)
self.sim_labels.append(label)
queues.append(q)
svars.append(s_var)
names.append("%s-%d"%(name, j))
sims.append(("%s-%d"%(name, j),file, data, verbose, q))
i += 1
j += 1
self.update()
# The progress tracking is pretty hacky.
pool = mp.Pool(parallel)
num_sims = len(sims)
#start simulating
tracker = pool.map_async(run_1_sim,sims)
while not tracker.ready():
pass
for i in range(num_sims):
q = queues[i]
try:
gen = q.get(timeout = .001)
# if the sim has updated, update the label
#print gen
svars[i].set(gen)
self.update()
except Empty:
pass
# The results of the map, if necessary
tracker.get()
def update(self):
"""
Redraws everything
"""
self.master.update_idletasks()
def run_1_sim(args):
"""
Runs one simulation with the specified args, output updates to the supplied
pipe every generation
"""
name,config,data, verbose, q = args
sim = Simulation(config, name=name, data = data)
generation = 0
q.put(sim.name + ": 0")
try:
while sim.run(verbose=verbose, log=True, generations = sim_step):
generation += sim_step
q.put(sim.name + ": " + str(generation))
except Exception as err:
print err

This may or may not be helpful to you, but it is possible to make tkinter thread-safe by ensuring that its code and methods are executed on the particular thread the root was instantiated on. One project that experimented with the concept can be found over on the Python Cookbook as recipe 577633 (Directory Pruner 2). The code below comes from lines 76 - 253 and is fairly easy to extend with widgets.
Primary Thread-safety Support
# Import several GUI libraries.
import tkinter.ttk
import tkinter.filedialog
import tkinter.messagebox
# Import other needed modules.
import queue
import _thread
import operator
################################################################################
class AffinityLoop:
"Restricts code execution to thread that instance was created on."
__slots__ = '__action', '__thread'
def __init__(self):
"Initialize AffinityLoop with job queue and thread identity."
self.__action = queue.Queue()
self.__thread = _thread.get_ident()
def run(self, func, *args, **keywords):
"Run function on creating thread and return result."
if _thread.get_ident() == self.__thread:
self.__run_jobs()
return func(*args, **keywords)
else:
job = self.__Job(func, args, keywords)
self.__action.put_nowait(job)
return job.result
def __run_jobs(self):
"Run all pending jobs currently in the job queue."
while not self.__action.empty():
job = self.__action.get_nowait()
job.execute()
########################################################################
class __Job:
"Store information to run a job at a later time."
__slots__ = ('__func', '__args', '__keywords',
'__error', '__mutex', '__value')
def __init__(self, func, args, keywords):
"Initialize the job's info and ready for execution."
self.__func = func
self.__args = args
self.__keywords = keywords
self.__error = False
self.__mutex = _thread.allocate_lock()
self.__mutex.acquire()
def execute(self):
"Run the job, store any error, and return to sender."
try:
self.__value = self.__func(*self.__args, **self.__keywords)
except Exception as error:
self.__error = True
self.__value = error
self.__mutex.release()
#property
def result(self):
"Return execution result or raise an error."
self.__mutex.acquire()
if self.__error:
raise self.__value
return self.__value
################################################################################
class _ThreadSafe:
"Create a thread-safe GUI class for safe cross-threaded calls."
ROOT = tkinter.Tk
def __init__(self, master=None, *args, **keywords):
"Initialize a thread-safe wrapper around a GUI base class."
if master is None:
if self.BASE is not self.ROOT:
raise ValueError('Widget must have a master!')
self.__job = AffinityLoop() # Use Affinity() if it does not break.
self.__schedule(self.__initialize, *args, **keywords)
else:
self.master = master
self.__job = master.__job
self.__schedule(self.__initialize, master, *args, **keywords)
def __initialize(self, *args, **keywords):
"Delegate instance creation to later time if necessary."
self.__obj = self.BASE(*args, **keywords)
########################################################################
# Provide a framework for delaying method execution when needed.
def __schedule(self, *args, **keywords):
"Schedule execution of a method till later if necessary."
return self.__job.run(self.__run, *args, **keywords)
#classmethod
def __run(cls, func, *args, **keywords):
"Execute the function after converting the arguments."
args = tuple(cls.unwrap(i) for i in args)
keywords = dict((k, cls.unwrap(v)) for k, v in keywords.items())
return func(*args, **keywords)
#staticmethod
def unwrap(obj):
"Unpack inner objects wrapped by _ThreadSafe instances."
return obj.__obj if isinstance(obj, _ThreadSafe) else obj
########################################################################
# Allow access to and manipulation of wrapped instance's settings.
def __getitem__(self, key):
"Get a configuration option from the underlying object."
return self.__schedule(operator.getitem, self, key)
def __setitem__(self, key, value):
"Set a configuration option on the underlying object."
return self.__schedule(operator.setitem, self, key, value)
########################################################################
# Create attribute proxies for methods and allow their execution.
def __getattr__(self, name):
"Create a requested attribute and return cached result."
attr = self.__Attr(self.__callback, (name,))
setattr(self, name, attr)
return attr
def __callback(self, path, *args, **keywords):
"Schedule execution of named method from attribute proxy."
return self.__schedule(self.__method, path, *args, **keywords)
def __method(self, path, *args, **keywords):
"Extract a method and run it with the provided arguments."
method = self.__obj
for name in path:
method = getattr(method, name)
return method(*args, **keywords)
########################################################################
class __Attr:
"Save an attribute's name and wait for execution."
__slots__ = '__callback', '__path'
def __init__(self, callback, path):
"Initialize proxy with callback and method path."
self.__callback = callback
self.__path = path
def __call__(self, *args, **keywords):
"Run a known method with the given arguments."
return self.__callback(self.__path, *args, **keywords)
def __getattr__(self, name):
"Generate a proxy object for a sub-attribute."
if name in {'__func__', '__name__'}:
# Hack for the "tkinter.__init__.Misc._register" method.
raise AttributeError('This is not a real method!')
return self.__class__(self.__callback, self.__path + (name,))
################################################################################
# Provide thread-safe classes to be used from tkinter.
class Tk(_ThreadSafe): BASE = tkinter.Tk
class Frame(_ThreadSafe): BASE = tkinter.ttk.Frame
class Button(_ThreadSafe): BASE = tkinter.ttk.Button
class Entry(_ThreadSafe): BASE = tkinter.ttk.Entry
class Progressbar(_ThreadSafe): BASE = tkinter.ttk.Progressbar
class Treeview(_ThreadSafe): BASE = tkinter.ttk.Treeview
class Scrollbar(_ThreadSafe): BASE = tkinter.ttk.Scrollbar
class Sizegrip(_ThreadSafe): BASE = tkinter.ttk.Sizegrip
class Menu(_ThreadSafe): BASE = tkinter.Menu
class Directory(_ThreadSafe): BASE = tkinter.filedialog.Directory
class Message(_ThreadSafe): BASE = tkinter.messagebox.Message
If you read the rest of the application, you will find that it is built with the widgets defined as _ThreadSafe variants that you are used to seeing in other tkinter applications. As method calls come in from various threads, they are automatically held until it becomes possible to execute those calls on the creating thread. Note how the mainloop is replaced by way of lines 291 - 298 and 326 - 336.
Notice NoDefaltRoot & main_loop Calls
#classmethod
def main(cls):
"Create an application containing a single TrimDirView widget."
tkinter.NoDefaultRoot()
root = cls.create_application_root()
cls.attach_window_icon(root, ICON)
view = cls.setup_class_instance(root)
cls.main_loop(root)
main_loop Allows Threads To Execute
#staticmethod
def main_loop(root):
"Process all GUI events according to tkinter's settings."
target = time.clock()
while True:
try:
root.update()
except tkinter.TclError:
break
target += tkinter._tkinter.getbusywaitinterval() / 1000
time.sleep(max(target - time.clock(), 0))

Related

python passing function to thread

how is possible to pass a function to a thread like a standard variable,
when i pass function on args , i have a non iterable error
there is a summary of my code
from CallbackThread import CallbackThread
import time
import threading
class foo:
def runSomthing(self,callback):
#do things
callthread = threading.Thread(target=self.newthread, args=(callback))
callthread.start()
def newthread(self,calback):
print("runiing")
while True:
#if receve data (udp)
data = 0
#run mycallbacktest
calback(data)
def mycallbacktest(i):
print("hello world", i)
myobj = foo()
myobj.runSomthing(mycallbacktest)
i have see on similar topics things like that
https://gist.github.com/amirasaran/e91c7253c03518b8f7b7955df0e954bb
and i have try this based on this bu i not sure what this is doing, for me is just call a callback when thread if finished
class BaseThread(threading.Thread):
def __init__(self, callback=None, callback_args=None, *args, **kwargs):
target = kwargs.pop('target')
super(BaseThread, self).__init__(target=self.target_with_callback, *args, **kwargs)
self.callback = callback
self.method = target
self.callback_args = callback_args
def target_with_callback(self):
self.method()
if self.callback is not None:
self.callback(*self.callback_args)
but this d'ont solve what i tring to do
as suggested MisterMiyagi, the awnser a missig comma,
It should be args=(callback,) instead of args=(callback)
(this post is to mark this post close)

NameError raised by IUpdater registered through pyRevit

I'm attempting to implement a Dynamic Model Updater as part of a pyRevit extension. However, I am unable to instantiate classes, invoke functions, or reference constants from the updater's Execute method.
As an example, let's assume make a pushbutton that executes this script:
from pyrevit import HOST_APP, DB
from System import Guid
from Autodesk.Revit.UI import TaskDialog
def do_thing(wall):
TaskDialog.Show('ExampleUpdater', 'Updating {}'.format(wall.Id.IntegerValue))
class ExampleUpdater(DB.IUpdater):
def __init__(self, addin_id):
self.id = DB.UpdaterId(addin_id, Guid("70f3be2d-b524-4798-8baf-5b249c2f31c4"))
def GetUpdaterId(self):
return self.id
def GetUpdaterName(self):
return "Example Updater"
def GetAdditionalInformation(self):
return "Just an example"
def GetChangePriority(self):
return DB.ChangePriority.Views
def Execute(self, data):
doc = data.GetDocument()
for id in data.GetModifiedElementIds():
wall = doc.GetElement(id)
try:
do_thing(wall)
except Exception as err:
wall.ParametersMap["Comments"].Set("{}: {}".format(err.__class__.__name__, err))
updater = ExampleUpdater(HOST_APP.addin_id)
if DB.UpdaterRegistry.IsUpdaterRegistered(updater.GetUpdaterId()):
DB.UpdaterRegistry.UnregisterUpdater(updater.GetUpdaterId())
DB.UpdaterRegistry.RegisterUpdater(updater)
wall_filter = DB.ElementCategoryFilter(DB.BuiltInCategory.OST_Walls)
change_type = DB.Element.GetChangeTypeAny()
DB.UpdaterRegistry.AddTrigger(updater.GetUpdaterId(), wall_filter, change_type)
If I move a wall (after clicking the button to register the updater, of course), an exception is raised and stored in the wall's comments property: NameError: name 'do_thing' is not defined. Similar exceptions are also raised when I try to call functions like TaskDialog.Show that were not originally defined in Python.
However, if I register the same updater in the RevitPythonShell, it works as expected:
from System import Guid
from Autodesk.Revit.UI import TaskDialog
def do_thing(wall):
TaskDialog.Show('ExampleUpdater', 'Updating {}'.format(wall.Id.IntegerValue))
class ExampleUpdater(IUpdater):
def __init__(self, addin_id):
self.id = UpdaterId(addin_id, Guid("c197ee15-47a9-4cf7-b12c-43b863497826"))
def GetUpdaterId(self):
return self.id
def GetUpdaterName(self):
return "Example Updater"
def GetAdditionalInformation(self):
return "Just an example"
def GetChangePriority(self):
return ChangePriority.Views
def Execute(self, data):
doc = data.GetDocument()
for id in data.GetModifiedElementIds():
wall = doc.GetElement(id)
try:
do_thing(wall)
except Exception as err:
wall.ParametersMap["Comments"].Set("{}: {}".format(err.__class__.__name__, err))
updater = ExampleUpdater(__revit__.Application.ActiveAddInId)
if UpdaterRegistry.IsUpdaterRegistered(updater.GetUpdaterId()):
UpdaterRegistry.UnregisterUpdater(updater.GetUpdaterId())
UpdaterRegistry.RegisterUpdater(updater)
wall_filter = ElementCategoryFilter(BuiltInCategory.OST_Walls)
change_type = Element.GetChangeTypeAny()
UpdaterRegistry.AddTrigger(updater.GetUpdaterId(), wall_filter, change_type)
What is the difference between the two environments that allows the updater to work in RevitPythonShell but not in pyRevit?
Revit 2019, pyRevit 4.7.4, IronPython 2.7.7
Building on #Callum's suggestion, I was able to work around the problem by saving references to the imported resources I needed in the updater's __init__ method. My example updater now looks like this:
from pyrevit import HOST_APP, DB
from System import Guid
from Autodesk.Revit.UI import TaskDialog
class ExampleUpdater(DB.IUpdater):
def __init__(self, addin_id):
self.id = DB.UpdaterId(addin_id, Guid("70f3be2d-b524-4798-8baf-5b249c2f31c4"))
self.TaskDialog = TaskDialog
def GetUpdaterId(self):
return self.id
def GetUpdaterName(self):
return "Example Updater"
def GetAdditionalInformation(self):
return "Just an example"
def GetChangePriority(self):
return DB.ChangePriority.Views
def Execute(self, data):
doc = data.GetDocument()
for id in data.GetModifiedElementIds():
wall = doc.GetElement(id)
try:
self.do_thing(wall)
except Exception as err:
wall.ParametersMap["Comments"].Set("{}: {}".format(err.__class__.__name__, err))
def do_thing(self, wall):
self.TaskDialog.Show('ExampleUpdater', 'Updating {}'.format(wall.Id.IntegerValue))
Simply making do_thing a method of ExampleUpdater wasn't enough, and saving a reference to the do_thing function (i.e. by adding self.do_thing = do_thing in __init__) didn't work either. In both cases, the updater raised a NameError that said global name 'TaskDialog' is not defined.

What is the prefered way to deprecate a class and subclass

How could I deprecate a class and his subclasses in Python?
Currently I thought that __init__() would work, but it doesn't since it won't be called if I won't call super() on subclasses.
Edit:
Okai some infos are missing on my question.
I know how to use warn.warning()
Also I don't wanted to use a Decorater. I wanted to just use it on one class and if the class is invoked it should warn the user.
You are looking for warnings.warn(message[, category[, stacklevel]])
Issue a warning, or maybe ignore it or raise an exception. The
category argument, if given, must be a warning category class (see
above); it defaults to UserWarning. Alternatively message can be a
Warning instance, in which case category will be ignored and
message.class will be used. In this case the message text will be
str(message). This function raises an exception if the particular
warning issued is changed into an error by the warnings filter see
above.
From here:
import functools
import inspect
import os
import warnings
class _DeprecatedDecorator(object):
MESSAGE = "%s is #deprecated"
def __call__(self, symbol):
if not inspect.isclass(symbol):
raise TypeError("only classes can be #deprecated")
warnings.filterwarnings('default',
message=self.MESSAGE % r'\w+',
category=DeprecationWarning)
return self._wrap_class(symbol)
def _wrap_class(self, cls):
previous_ctor = cls.__init__
#functools.wraps(previous_ctor)
def new_ctor(*args, **kwargs):
self._warn(cls.__name__)
return previous_ctor(*args, **kwargs)
cls.__init__ = new_ctor
return cls
def _warn(self, name):
warnings.warn(self.MESSAGE % name, DeprecationWarning,
stacklevel=self._compute_stacklevel())
def _compute_stacklevel(self):
this_file, _ = os.path.splitext(__file__)
app_code_dir = self._get_app_code_dir()
def is_relevant(filename):
return filename.startswith(app_code_dir) and not \
filename.startswith(this_file)
stack = self._get_callstack()
stack.pop(0) # omit this function's frame
frame = None
try:
for i, frame in enumerate(stack, 1):
filename = frame.f_code.co_filename
if is_relevant(filename):
return i
finally:
del frame
del stack
return 0
def _get_app_code_dir(self):
import myapplication # root package for the app
app_dir = os.path.dirname(myapplication.__file__)
return os.path.join(app_dir, '') # ensure trailing slash
def _get_callstack(self):
frame = inspect.currentframe()
frame = frame.f_back # omit this function's frame
stack = []
try:
while frame:
stack.append(frame)
frame = frame.f_back
finally:
del frame
return stack
deprecated = _DeprecatedDecorator()
del _DeprecatedDecorator

Function offloaded to PyQt QThread is 2x slower

I've been trying to optimize my application, and although I made the function run on average 10.06 seconds when I profiled it by itself, when it is put on a QThread, it takes around 17-22 seconds.
It's 2x slower in the QThread. How do I fix that?
The function is actually initializing a class called DocxDocument, which is a document that I read from a Word file and parsed it for my needs.
I have a QThread that creates this class and uses Qt signals to send progress information back to the GUI. Here is the code from that class:
class DocxImporterThread(QThread):
'''
This thread is used to import a .docx document, report its progress, and
then return the document that it parsed.
'''
reportProgress = pyqtSignal(int)
reportError = pyqtSignal(Exception, str)
reportText = pyqtSignal(str)
def __init__(self, filePath):
QThread.__init__(self)
self.setPriority(QThread.HighestPriority)
self._filePath = filePath
self._docx = None
self._html = ''
self._bookmarks = None
self._pages = None
self._stop = False
def run(self):
def myProgressHook(percentage):
self.reportProgress.emit(percentage)
def myCancelHook():
return self._stop
try:
self._docx = DocxDocument(self._filePath, myProgressHook, myCancelHook)
if not self._stop:
self._html = self._docx.getMainPage()
self._bookmarks = self._docx.getHeadings()
self._pages = self._docx.getPages()
except Exception as ex2:
print 'ERROR: The .docx document didn\'t import.'
self.reportError.emit(ex2, traceback.format_exc())
The getMainPage(), getHeadings(), and getPages() are instantaneous because they just return a reference to something that the constructor already created. Here is the code I used to profile my DocxDocument class:
testFile = 'some_file.docx'
statSave = 'profile.out'
def progress(percent):
print ' --', percent, '--'
cProfile.run('DocxDocument(testFile)', filename=statSave)
myStats = pstats.Stats(statSave)
myStats.sort_stats('cumulative', 'name')
myStats.print_stats()
Thanks for your time in looking at this!

Python observer/observable library [duplicate]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
Are there any exemplary examples of the GoF Observer implemented in Python? I have a bit code which currently has bits of debugging code laced through the key class (currently generating messages to stderr if a magic env is set). Additionally, the class has an interface for incrementally return results as well as storing them (in memory) for post processing. (The class itself is a job manager for concurrently executing commands on remote machines over ssh).
Currently the usage of the class looks something like:
job = SSHJobMan(hostlist, cmd)
job.start()
while not job.done():
for each in job.poll():
incrementally_process(job.results[each])
time.sleep(0.2) # or other more useful work
post_process(job.results)
An alernative usage model is:
job = SSHJobMan(hostlist, cmd)
job.wait() # implicitly performs a start()
process(job.results)
This all works fine for the current utility. However it does lack flexibility. For example I currently support a brief output format or a progress bar as incremental results, I also support
brief, complete and "merged message" outputs for the post_process() function.
However, I'd like to support multiple results/output streams (progress bar to the terminal, debugging and warnings to a log file, outputs from successful jobs to one file/directory, error messages and other results from non-successful jobs to another, etc).
This sounds like a situation that calls for Observer ... have instances of my class accept registration from other objects and call them back with specific types of events as they occur.
I'm looking at PyPubSub since I saw several references to that in SO related questions. I'm not sure I'm ready to add the external dependency to my utility but I could see value in using their interface as a model for mine if that's going to make it easier for others to use. (The project is intended as both a standalone command line utility and a class for writing other scripts/utilities).
In short I know how to do what I want ... but there are numerous ways to accomplish it. I want suggestions on what's most likely to work for other users of the code in the long run.
The code itself is at: classh.
However it does lack flexibility.
Well... actually, this looks like a good design to me if an asynchronous API is what you want. It usually is. Maybe all you need is to switch from stderr to Python's logging module, which has a sort of publish/subscribe model of its own, what with Logger.addHandler() and so on.
If you do want to support observers, my advice is to keep it simple. You really only need a few lines of code.
class Event(object):
pass
class Observable(object):
def __init__(self):
self.callbacks = []
def subscribe(self, callback):
self.callbacks.append(callback)
def fire(self, **attrs):
e = Event()
e.source = self
for k, v in attrs.items():
setattr(e, k, v)
for fn in self.callbacks:
fn(e)
Your Job class can subclass Observable. When something of interest happens, call self.fire(type="progress", percent=50) or the like.
I think people in the other answers overdo it. You can easily achieve events in Python with less than 15 lines of code.
You simple have two classes: Event and Observer. Any class that wants to listen for an event, needs to inherit Observer and set to listen (observe) for a specific event. When an Event is instantiated and fired, all observers listening to that event will run the specified callback functions.
class Observer():
_observers = []
def __init__(self):
self._observers.append(self)
self._observables = {}
def observe(self, event_name, callback):
self._observables[event_name] = callback
class Event():
def __init__(self, name, data, autofire = True):
self.name = name
self.data = data
if autofire:
self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.data)
Example:
class Room(Observer):
def __init__(self):
print("Room is ready.")
Observer.__init__(self) # Observer's init needs to be called
def someone_arrived(self, who):
print(who + " has arrived!")
room = Room()
room.observe('someone arrived', room.someone_arrived)
Event('someone arrived', 'Lenard')
Output:
Room is ready.
Lenard has arrived!
A few more approaches...
Example: the logging module
Maybe all you need is to switch from stderr to Python's logging module, which has a powerful publish/subscribe model.
It's easy to get started producing log records.
# producer
import logging
log = logging.getLogger("myjobs") # that's all the setup you need
class MyJob(object):
def run(self):
log.info("starting job")
n = 10
for i in range(n):
log.info("%.1f%% done" % (100.0 * i / n))
log.info("work complete")
On the consumer side there's a bit more work. Unfortunately configuring logger output takes, like, 7 whole lines of code to do. ;)
# consumer
import myjobs, sys, logging
if user_wants_log_output:
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
myjobs.log.addHandler(ch)
myjobs.log.setLevel(logging.INFO)
myjobs.MyJob().run()
On the other hand there's an amazing amount of stuff in the logging package. If you ever need to send log data to a rotating set of files, an email address, and the Windows Event Log, you're covered.
Example: simplest possible observer
But you don't need to use any library at all. An extremely simple way to support observers is to call a method that does nothing.
# producer
class MyJob(object):
def on_progress(self, pct):
"""Called when progress is made. pct is the percent complete.
By default this does nothing. The user may override this method
or even just assign to it."""
pass
def run(self):
n = 10
for i in range(n):
self.on_progress(100.0 * i / n)
self.on_progress(100.0)
# consumer
import sys, myjobs
job = myjobs.MyJob()
job.on_progress = lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
Sometimes instead of writing a lambda, you can just say job.on_progress = progressBar.update, which is nice.
This is about as simple as it gets. One drawback is that it doesn't naturally support multiple listeners subscribing to the same events.
Example: C#-like events
With a bit of support code, you can get C#-like events in Python. Here's the code:
# glue code
class event(object):
def __init__(self, func):
self.__doc__ = func.__doc__
self._key = ' ' + func.__name__
def __get__(self, obj, cls):
try:
return obj.__dict__[self._key]
except KeyError, exc:
be = obj.__dict__[self._key] = boundevent()
return be
class boundevent(object):
def __init__(self):
self._fns = []
def __iadd__(self, fn):
self._fns.append(fn)
return self
def __isub__(self, fn):
self._fns.remove(fn)
return self
def __call__(self, *args, **kwargs):
for f in self._fns[:]:
f(*args, **kwargs)
The producer declares the event using a decorator:
# producer
class MyJob(object):
#event
def progress(pct):
"""Called when progress is made. pct is the percent complete."""
def run(self):
n = 10
for i in range(n+1):
self.progress(100.0 * i / n)
#consumer
import sys, myjobs
job = myjobs.MyJob()
job.progress += lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
This works exactly like the "simple observer" code above, but you can add as many listeners as you like using +=. (Unlike C#, there are no event handler types, you don't have to new EventHandler(foo.bar) when subscribing to an event, and you don't have to check for null before firing the event. Like C#, events do not squelch exceptions.)
How to choose
If logging does everything you need, use that. Otherwise do the simplest thing that works for you. The key thing to note is that you don't need to take on a big external dependency.
How about an implementation where objects aren't kept alive just because they're observing something? Below please find an implementation of the observer pattern with the following features:
Usage is pythonic. To add an observer to a bound method .bar of instance foo, just do foo.bar.addObserver(observer).
Observers are not kept alive by virtue of being observers. In other words, the observer code uses no strong references.
No sub-classing necessary (descriptors ftw).
Can be used with unhashable types.
Can be used as many times you want in a single class.
(bonus) As of today the code exists in a proper downloadable, installable package on github.
Here's the code (the github package or PyPI package have the most up to date implementation):
import weakref
import functools
class ObservableMethod(object):
"""
A proxy for a bound method which can be observed.
I behave like a bound method, but other bound methods can subscribe to be
called whenever I am called.
"""
def __init__(self, obj, func):
self.func = func
functools.update_wrapper(self, func)
self.objectWeakRef = weakref.ref(obj)
self.callbacks = {} #observing object ID -> weak ref, methodNames
def addObserver(self, boundMethod):
"""
Register a bound method to observe this ObservableMethod.
The observing method will be called whenever this ObservableMethod is
called, and with the same arguments and keyword arguments. If a
boundMethod has already been registered to as a callback, trying to add
it again does nothing. In other words, there is no way to sign up an
observer to be called back multiple times.
"""
obj = boundMethod.__self__
ID = id(obj)
if ID in self.callbacks:
s = self.callbacks[ID][1]
else:
wr = weakref.ref(obj, Cleanup(ID, self.callbacks))
s = set()
self.callbacks[ID] = (wr, s)
s.add(boundMethod.__name__)
def discardObserver(self, boundMethod):
"""
Un-register a bound method.
"""
obj = boundMethod.__self__
if id(obj) in self.callbacks:
self.callbacks[id(obj)][1].discard(boundMethod.__name__)
def __call__(self, *arg, **kw):
"""
Invoke the method which I proxy, and all of it's callbacks.
The callbacks are called with the same *args and **kw as the main
method.
"""
result = self.func(self.objectWeakRef(), *arg, **kw)
for ID in self.callbacks:
wr, methodNames = self.callbacks[ID]
obj = wr()
for methodName in methodNames:
getattr(obj, methodName)(*arg, **kw)
return result
#property
def __self__(self):
"""
Get a strong reference to the object owning this ObservableMethod
This is needed so that ObservableMethod instances can observe other
ObservableMethod instances.
"""
return self.objectWeakRef()
class ObservableMethodDescriptor(object):
def __init__(self, func):
"""
To each instance of the class using this descriptor, I associate an
ObservableMethod.
"""
self.instances = {} # Instance id -> (weak ref, Observablemethod)
self._func = func
def __get__(self, inst, cls):
if inst is None:
return self
ID = id(inst)
if ID in self.instances:
wr, om = self.instances[ID]
if not wr():
msg = "Object id %d should have been cleaned up"%(ID,)
raise RuntimeError(msg)
else:
wr = weakref.ref(inst, Cleanup(ID, self.instances))
om = ObservableMethod(inst, self._func)
self.instances[ID] = (wr, om)
return om
def __set__(self, inst, val):
raise RuntimeError("Assigning to ObservableMethod not supported")
def event(func):
return ObservableMethodDescriptor(func)
class Cleanup(object):
"""
I manage remove elements from a dict whenever I'm called.
Use me as a weakref.ref callback to remove an object's id from a dict
when that object is garbage collected.
"""
def __init__(self, key, d):
self.key = key
self.d = d
def __call__(self, wr):
del self.d[self.key]
To use this we just decorate methods we want to make observable with #event. Here's an example
class Foo(object):
def __init__(self, name):
self.name = name
#event
def bar(self):
print("%s called bar"%(self.name,))
def baz(self):
print("%s called baz"%(self.name,))
a = Foo('a')
b = Foo('b')
a.bar.addObserver(b.bar)
a.bar()
From wikipedia:
from collections import defaultdict
class Observable (defaultdict):
def __init__ (self):
defaultdict.__init__(self, object)
def emit (self, *args):
'''Pass parameters to all observers and update states.'''
for subscriber in self:
response = subscriber(*args)
self[subscriber] = response
def subscribe (self, subscriber):
'''Add a new subscriber to self.'''
self[subscriber]
def stat (self):
'''Return a tuple containing the state of each observer.'''
return tuple(self.values())
The Observable is used like this.
myObservable = Observable ()
# subscribe some inlined functions.
# myObservable[lambda x, y: x * y] would also work here.
myObservable.subscribe(lambda x, y: x * y)
myObservable.subscribe(lambda x, y: float(x) / y)
myObservable.subscribe(lambda x, y: x + y)
myObservable.subscribe(lambda x, y: x - y)
# emit parameters to each observer
myObservable.emit(6, 2)
# get updated values
myObservable.stat() # returns: (8, 3.0, 4, 12)
Based on Jason's answer, I implemented the C#-like events example as a fully-fledged python module including documentation and tests. I love fancy pythonic stuff :)
So, if you want some ready-to-use solution, you can just use the code on github.
Example: twisted log observers
To register an observer yourCallable() (a callable that accepts a dictionary) to receive all log events (in addition to any other observers):
twisted.python.log.addObserver(yourCallable)
Example: complete producer/consumer example
From Twisted-Python mailing list:
#!/usr/bin/env python
"""Serve as a sample implementation of a twisted producer/consumer
system, with a simple TCP server which asks the user how many random
integers they want, and it sends the result set back to the user, one
result per line."""
import random
from zope.interface import implements
from twisted.internet import interfaces, reactor
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
class Producer:
"""Send back the requested number of random integers to the client."""
implements(interfaces.IPushProducer)
def __init__(self, proto, cnt):
self._proto = proto
self._goal = cnt
self._produced = 0
self._paused = False
def pauseProducing(self):
"""When we've produced data too fast, pauseProducing() will be
called (reentrantly from within resumeProducing's transport.write
method, most likely), so set a flag that causes production to pause
temporarily."""
self._paused = True
print('pausing connection from %s' % (self._proto.transport.getPeer()))
def resumeProducing(self):
self._paused = False
while not self._paused and self._produced < self._goal:
next_int = random.randint(0, 10000)
self._proto.transport.write('%d\r\n' % (next_int))
self._produced += 1
if self._produced == self._goal:
self._proto.transport.unregisterProducer()
self._proto.transport.loseConnection()
def stopProducing(self):
pass
class ServeRandom(LineReceiver):
"""Serve up random data."""
def connectionMade(self):
print('connection made from %s' % (self.transport.getPeer()))
self.transport.write('how many random integers do you want?\r\n')
def lineReceived(self, line):
cnt = int(line.strip())
producer = Producer(self, cnt)
self.transport.registerProducer(producer, True)
producer.resumeProducing()
def connectionLost(self, reason):
print('connection lost from %s' % (self.transport.getPeer()))
factory = Factory()
factory.protocol = ServeRandom
reactor.listenTCP(1234, factory)
print('listening on 1234...')
reactor.run()
OP asks "Are there any exemplary examples of the GoF Observer implemented in Python?"
This is an example in Python 3.7. This Observable class meets the requirement of creating a relationship between one observable and many observers while remaining independent of their structure.
from functools import partial
from dataclasses import dataclass, field
import sys
from typing import List, Callable
#dataclass
class Observable:
observers: List[Callable] = field(default_factory=list)
def register(self, observer: Callable):
self.observers.append(observer)
def deregister(self, observer: Callable):
self.observers.remove(observer)
def notify(self, *args, **kwargs):
for observer in self.observers:
observer(*args, **kwargs)
def usage_demo():
observable = Observable()
# Register two anonymous observers using lambda.
observable.register(
lambda *args, **kwargs: print(f'Observer 1 called with args={args}, kwargs={kwargs}'))
observable.register(
lambda *args, **kwargs: print(f'Observer 2 called with args={args}, kwargs={kwargs}'))
# Create an observer function, register it, then deregister it.
def callable_3():
print('Observer 3 NOT called.')
observable.register(callable_3)
observable.deregister(callable_3)
# Create a general purpose observer function and register four observers.
def callable_x(*args, **kwargs):
print(f'{args[0]} observer called with args={args}, kwargs={kwargs}')
for gui_field in ['Form field 4', 'Form field 5', 'Form field 6', 'Form field 7']:
observable.register(partial(callable_x, gui_field))
observable.notify('test')
if __name__ == '__main__':
sys.exit(usage_demo())
A functional approach to observer design:
def add_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
# If this is the first listener, then set up the method wrapper
if not listeners:
listeners = [listener]
setattr(obj, listener_attr, listeners)
# Get the object's method
method = getattr(obj, method_name)
#wraps(method)
def method_wrapper(*args, **kwags):
method(*args, **kwags)
for l in listeners:
l(obj, *args, **kwags) # Listener also has object argument
# Replace the original method with the wrapper
setattr(obj, method_name, method_wrapper)
else:
# Event is already set up, so just add another listener
listeners.append(listener)
def remove_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
if listeners:
# Remove the listener
next((listeners.pop(i)
for i, l in enumerate(listeners)
if l == listener),
None)
# If this was the last listener, then remove the method wrapper
if not listeners:
method = getattr(obj, method_name)
delattr(obj, listener_attr)
setattr(obj, method_name, method.__wrapped__)
These methods can then be used to add a listener to any class method. For example:
class MyClass(object):
def __init__(self, prop):
self.prop = prop
def some_method(self, num, string):
print('method:', num, string)
def listener_method(obj, num, string):
print('listener:', num, string, obj.prop)
my = MyClass('my_prop')
add_listener(my, 'some_method', listener_method)
my.some_method(42, 'with listener')
remove_listener(my, 'some_method', listener_method)
my.some_method(42, 'without listener')
And the output is:
method: 42 with listener
listener: 42 with listener my_prop
method: 42 without listener

Categories

Resources