Manager / Container class, how to? - python
I am currently designing a software which needs to manage a certain hardware setup.
The hardware setup is as following :
System - The system contains two identical devices, and has certain functionality relative to the entire system.
Device - Each device contains two identical sub devices, and has certain functionality relative to both sub devices.
Sub device - Each sub device has 4 configurable entities (Controlled via the same hardware command - thus I don't count them as a sub-sub device).
What I want to achieve :
I want to control all configurable entities via the system manager (the entities are counted in a serial way), meaning I would be able to do the following :
system_instance = system_manager_class(some_params)
system_instance.some_func(0) # configure device_manager[0].sub_device_manager[0].entity[0]
system_instance.some_func(5) # configure device_manager[0].sub_device_manager[1].entity[1]
system_instance.some_func(8) # configure device_manager[1].sub_device_manager[1].entity[0]
What I have thought of doing :
I was thinking of creating an abstract class, which contains all sub device functions (with a call to a conversion function) and have the system_manager, device_manager and sub_device_manager inherit it. Thus all classes will have the same function name and I will be able to access them via the system manager.
Something around these lines :
class abs_sub_device():
#staticmethod
def convert_entity(self):
sub_manager = None
sub_entity_num = None
pass
def set_entity_to_2(entity_num):
sub_manager, sub_manager_entity_num = self.convert_entity(entity_num)
sub_manager.some_func(sub_manager_entity_num)
class system_manager(abs_sub_device):
def __init__(self):
self.device_manager_list = [] # Initiliaze device list
self.device_manager_list.append(device_manager())
self.device_manager_list.append(device_manager())
def convert_entity(self, entity_num):
relevant_device_manager = self.device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_device_manage, relevant_entity
class device_manager(abs_sub_device):
def __init__(self):
self.sub_device_manager_list = [] # Initiliaze sub device list
self.sub_device_manager_list.append(sub_device_manager())
self.sub_device_manager_list.append(sub_device_manager())
def convert_entity(self, entity_num):
relevant_sub_device_manager = self.sub_device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_sub_device_manager, relevant_entity
class sub_device_manager(abs_sub_device):
def __init__(self):
self.entity_list = [0] * 4
def set_entity_to_2(self, entity_num):
self.entity_list[entity_num] = 2
The code is for generic understanding of my design, not for actual functionality.
The problem :
It seems to me that the system I am trying to design is really generic and that there must be a built-in python way to do this, or that my entire object oriented look at it is wrong.
I would really like to know if some one has a better way of doing this.
After much thinking, I think I found a pretty generic way to solve the issue, using a combination of decorators, inheritance and dynamic function creation.
The main idea is as following :
1) Each layer dynamically creates all sub layer relevant functions for it self (Inside the init function, using a decorator on the init function)
2) Each function created dynamically converts the entity value according to a convert function (which is a static function of the abs_container_class), and calls the lowers layer function with the same name (see make_convert_function_method).
3) This basically causes all sub layer function to be implemented on the higher level with zero code duplication.
def get_relevant_class_method_list(class_instance):
method_list = [func for func in dir(class_instance) if callable(getattr(class_instance, func)) and not func.startswith("__") and not func.startswith("_")]
return method_list
def make_convert_function_method(name):
def _method(self, entity_num, *args):
sub_manager, sub_manager_entity_num = self._convert_entity(entity_num)
function_to_call = getattr(sub_manager, name)
function_to_call(sub_manager_entity_num, *args)
return _method
def container_class_init_decorator(function_object):
def new_init_function(self, *args):
# Call the init function :
function_object(self, *args)
# Get all relevant methods (Of one sub class is enough)
method_list = get_relevant_class_method_list(self.container_list[0])
# Dynamically create all sub layer functions :
for method_name in method_list:
_method = make_convert_function_method(method_name)
setattr(type(self), method_name, _method)
return new_init_function
class abs_container_class():
#staticmethod
def _convert_entity(self):
sub_manager = None
sub_entity_num = None
pass
class system_manager(abs_container_class):
#container_class_init_decorator
def __init__(self):
self.device_manager_list = [] # Initiliaze device list
self.device_manager_list.append(device_manager())
self.device_manager_list.append(device_manager())
self.container_list = self.device_manager_list
def _convert_entity(self, entity_num):
relevant_device_manager = self.device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_device_manager, relevant_entity
class device_manager(abs_container_class):
#container_class_init_decorator
def __init__(self):
self.sub_device_manager_list = [] # Initiliaze sub device list
self.sub_device_manager_list.append(sub_device_manager())
self.sub_device_manager_list.append(sub_device_manager())
self.container_list = self.sub_device_manager_list
def _convert_entity(self, entity_num):
relevant_sub_device_manager = self.sub_device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_sub_device_manager, relevant_entity
class sub_device_manager():
def __init__(self):
self.entity_list = [0] * 4
def set_entity_to_value(self, entity_num, required_value):
self.entity_list[entity_num] = required_value
print("I set the entity to : {}".format(required_value))
# This is used for auto completion purposes (Using pep convention)
class auto_complete_class(system_manager, device_manager, sub_device_manager):
pass
system_instance = system_manager() # type: auto_complete_class
system_instance.set_entity_to_value(0, 3)
There is still a little issue with this solution, auto-completion would not work since the highest level class has almost no static implemented function.
In order to solve this I cheated a bit, I created an empty class which inherited from all layers and stated to the IDE using pep convention that it is the type of the instance being created (# type: auto_complete_class).
Does this solve your Problem?
class EndDevice:
def __init__(self, entities_num):
self.entities = list(range(entities_num))
#property
def count_entities(self):
return len(self.entities)
def get_entity(self, i):
return str(i)
class Device:
def __init__(self, sub_devices):
self.sub_devices = sub_devices
#property
def count_entities(self):
return sum(sd.count_entities for sd in self.sub_devices)
def get_entity(self, i):
c = 0
for index, sd in enumerate(self.sub_devices):
if c <= i < sd.count_entities + c:
return str(index) + " " + sd.get_entity(i - c)
c += sd.count_entities
raise IndexError(i)
SystemManager = Device # Are the exact same. This also means you can stack that infinite
sub_devices1 = [EndDevice(4) for _ in range(2)]
sub_devices2 = [EndDevice(4) for _ in range(2)]
system_manager = SystemManager([Device(sub_devices1), Device(sub_devices2)])
print(system_manager.get_entity(0))
print(system_manager.get_entity(5))
print(system_manager.get_entity(15))
I can't think of a better way to do this than OOP, but inheritance will only give you one set of low-level functions for the system manager, so it wil be like having one device manager and one sub-device manager. A better thing to do will be, a bit like tkinter widgets, to have one system manager and initialise all the other managers like children in a tree, so:
system = SystemManager()
device1 = DeviceManager(system)
subDevice1 = SubDeviceManager(device1)
device2 = DeviceManager(system)
subDevice2 = SubDeviceManager(device2)
#to execute some_func on subDevice1
system.some_func(0, 0, *someParams)
We can do this by keeping a list of 'children' of the higher-level managers and having functions which reference the children.
class SystemManager:
def __init__(self):
self.children = []
def some_func(self, child, *params):
self.children[child].some_func(*params)
class DeviceManager:
def __init__(self, parent):
parent.children.append(self)
self.children = []
def some_func(self, child, *params):
self.children[child].some_func(*params)
class SubDeviceManager:
def __init__(self, parent):
parent.children.append(self)
#this may or may not have sub-objects, if it does we need to make it its own children list.
def some_func(self, *params):
#do some important stuff
Unfortunately, this does mean that if we want to call a function of a sub-device manager from the system manager without having lots of dots, we will have to define it again again in the system manager. What you can do instead is use the built-in exec() function, which will take in a string input and run it using the Python interpreter:
class SystemManager:
...
def execute(self, child, function, *args):
exec("self.children[child]."+function+"(*args)")
(and keep the device manager the same)
You would then write in the main program:
system.execute(0, "some_func", 0, *someArgs)
Which would call
device1.some_func(0, someArgs)
Here's what I'm thinking:
SystemManager().apply_to_entity(entity_num=7, lambda e: e.value = 2)
class EntitySuperManagerMixin():
"""Mixin to handle logic for managing entity managers."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # Supports any kind of __init__ call.
self._entity_manager_list = []
def apply_to_entity(self, entity_num, action):
relevant_entity_manager = self._entity_manager_list[index // 4]
relevant_entity_num = index % 4
return relevant_entity_manager.apply_to_entity(
relevant_entity_num, action)
class SystemManager(EntitySuperManagerMixin):
def __init__(self):
super().__init__()
# An alias for _entity_manager_list to improve readability.
self.device_manager_list = self._entity_manager_list
self.device_manager_list.extend(DeviceManager() for _ in range(4))
class DeviceManager(EntitySuperManagerMixin):
def __init__(self):
super().__init__()
# An alias for _entity_manager_list to improve readability.
self.sub_device_manager_list = self._entity_manager_list
self.sub_device_manager_list.extend(SubDeviceManager() for _ in range(4))
class SubDeviceManager():
"""Manages entities, not entity managers, thus doesn't inherit the mixin."""
def __init__(self):
# Entities need to be classes for this idea to work.
self._entity_list = [Entity() for _ in range(4)]
def apply_to_entity(self, entity_num, action):
return action(self._entity_list[entity_num])
class Entity():
def __init__(self, initial_value=0):
self.value = initial_value
With this structure:
Entity-specific functions can stay bound to the Entity class (where it belongs).
Manager-specific code needs to be updated in two places: EntitySuperManagerMixin and the lowest level manager (which would need custom behavior anyway since it deals with the actual entities, not other managers).
The way i see it if you want to dynamically configure different part of system you need some sort of addressing so if you input an ID or address with some parameter the system will know with address on which sub sistem you are talking about and then configure that system with parameter.
OOP is quite ok for that and then you can easily manipulate such data via bitwise operators.
So basic addressing is done via binary system , so to do that in python you need first to implement an address static attribute to your class with perhaps some basic further detailing if system grows.
Basic implementation of addres systems is as follows:
bin(71)
1010 1011
and if we divide it into nibbles
1010 - device manager 10
1011 - sub device manager 11
So in this example we have system of 15 device managers and 15 sub device menagers, and every device and sub device manager has its integer address.So let's say you want to access device manager no10 with sub device manager no11. You would need their address which is in binary 71 and you would go with:
system.config(address, parameter )
Where system.config funcion would look like this:
def config(self,address, parameter):
device_manager = (address&0xF0)>>4 #10
sub_device_manager = address&0xf # 11
if device_manager not in range(self.devices): raise LookupError("device manager not found")
if sub_device_manager not in range(self.devices[device_manager].device): raise LookupError("sub device manager not found")
self.devices[device_manager].device[sub_device_manager].implement(parameter)
In layman you would tell system that sub_device 11 from device 10 needs configuration with this parameter.
So how would this setup look in python inheritance class of some base class of system that could be then composited/inherited to different classes:
class systems(object):
parent = None #global parent element, defaults to None well for simplicity
def __init__(self):
self.addrMASK = 0xf # address mask for that nibble
self.addr = 0x1 # default address of that element
self.devices = [] # list of instances of device
self.data = { #some arbitrary data
"param1":"param_val",
"param2":"param_val",
"param3":"param_val",
}
def addSubSystem(self,sub_system): # connects elements to eachother
# checks for valiability
if not isinstance(sub_system,systems):
raise TypeError("defined input is not a system type") # to prevent passing an integer or something
# appends a device to system data
self.devices.append(sub_system)
# search parent variables from sub device manager to system
obj = self
while 1:
if obj.parent is not None:
obj.parent.addrMASK<<=4 #bitshifts 4 bits
obj.parent.addr <<=4 #bitshifts 4 bits
obj = obj.parent
else:break
#self management , i am lazy guy so i added this part so i wouldn't have to reset addresses manualy
self.addrMASK <<=4 #bitshifts 4 bits
self.addr <<=4 #bitshifts 4 bits
# this element is added so the obj address is coresponding to place in list, this could be done more eloquently but i didn't know what are your limitations
if not self.devices:
self.devices[ len(self.devices)-1 ].addr +=1
self.devices[ len(self.devices)-1 ].parent = self
# helpful for checking data ... gives the address of system
def __repr__(self):
return "system at {0:X}, {1:0X}".format(self.addr,self.addrMASK)
# extra helpful lists data as well
def __str__(self):
data = [ '{} : {}\n'.format(k,v) for k,v in self.data.items() ]
return " ".join([ repr(self),'\n',*data ])
#checking for data, skips looping over sub systems
def __contains__(self,system_index):
return system_index-1 in range(len(self.data))
# applying parameter change -- just an example
def apply(self,par_dict):
if not isinstance(par_dict,dict):
raise TypeError("parameter must be a dict type")
if any( key in self.data.keys() for key in par_dict.keys() ):
for k,v in par_dict.items():
if k in self.data.keys():
self.data[k]=v
else:pass
else:pass
# implementing parameters trough addresses
def implement(self,address,parameter_dictionary):
if address&self.addrMASK==self.addr:
if address-self.addr!=0:
item = (address-self.addr)>>4
self.devices[item-1].implement( address-self.addr,parameter_dictionary )
else:
self.apply(parameter_dictionary)
a = systems()
b = systems()
a.addSubSystem(b)
c = systems()
b.addSubSystem(c)
print('a')
print(a)
print('')
print('b')
print(b)
print('')
print('c')
print(c)
print('')
a.implement(0x100,{"param1":"a"})
a.implement(0x110,{"param1":"b"})
a.implement(0x111,{"param1":"c"})
print('a')
print(a)
print('')
print('b')
print(b)
print('')
print('c')
print(c)
print('')
Related
Python unit testing on class methods with no input arguments
Given a class with class methods that contain only self input: class ABC(): def __init__(self, input_dict) self.variable_0 = input_dict['variable_0'] self.variable_1 = input_dict['variable_1'] self.variable_2 = input_dict['variable_2'] self.variable_3 = input_dict['variable_3'] def some_operation_0(self): return self.variable_0 + self.variable_1 def some_operation_1(self): return self.variable_2 + self.variable_3 First question: Is this very bad practice? Should I just refactor some_operation_0(self) to explicitly take the necessary inputs, some_operation_0(self, variable_0, variable_1)? If so, the testing is very straightforward. Second question: What is the correct way to setup my unit test on the method some_operation_0(self)? Should I setup a fixture in which I initialize input_dict, and then instantiate the class with a mock object? #pytest.fixture def generator_inputs(): f = open('inputs.txt', 'r') input_dict = eval(f.read()) f.close() mock_obj = ABC(input_dict) def test_some_operation_0(): assert mock_obj.some_operation_0() == some_value (I am new to both python and general unit testing...)
Those methods do take an argument: self. There is no need to mock anything. Instead, you can simply create an instance, and verify that the methods return the expected value when invoked. For your example: def test_abc(): a = ABC({'variable_0':0, 'variable_1':1, 'variable_2':2, 'variable_3':3)) assert a.some_operation_0() == 1 assert a.some_operation_1() == 5 If constructing an instance is very difficult, you might want to change your code so that the class can be instantiated from standard in-memory data structures (e.g. a dictionary). In that case, you could create a separate function that reads/parses data from a file and uses the "data-structure-based" __init__ method, e.g. make_abc() or a class method. If this approach does not generalize to your real problem, you could imagine providing programmatic access to the key names or other metadata that ABC recognizes or cares about. Then, you could programmatically construct a "defaulted" instance, e.g. an instance where every value in the input dict is a default-constructed value (such as 0 for int): class ABC(): PROPERTY_NAMES = ['variable_0', 'variable_1', 'variable_2', 'variable_3'] def __init__(self, input_dict): # implementation omitted for brevity pass def some_operation_0(self): return self.variable_0 + self.variable_1 def some_operation_1(self): return self.variable_2 + self.variable_3 def test_abc(): a = ABC({name: 0 for name in ABC.PROPERTY_NAMES}) assert a.some_operation_0() == 0 assert a.some_operation_1() == 0
Python object as property type
I'm searching for an elegant way to replace setter/getter methodes handling complex data types by properties using the #property decorator. The class I'm working on should represent some kind of (network) dimmer. It is possible to request/send "resources" addressed by a specific ID to control the device. I'd like to represent those "resources" as properties of my class hiding the request/send mechanism and the cryptical ID numbers. Some of those "resources" are just primitive types (int, float, ...) but some are more complex, so I've just created simple classes for them. This works fine, but there is an ugly source of error: It is not possible to change an attribute of that property directly, I have to set the property completely everytime. DUMMY_DB = {0x0001: bytearray([0x00])} class State: def __init__(self, on, value): self.on = on self.value = value #staticmethod def from_int(val): return State(bool(val & 0x80), val & 0x7f) def __int__(self): return self.on << 7 | self.value class Dimmer: #property def state(self) -> State: return State.from_int(self._request(0x0001)[0]) # ID 0x0001 => State #state.setter def state(self, val: State): self._send(0x0001, [int(val)]) # ID 0x0001 => State # several more properties... def _request(self, ident) -> bytearray: # usually get resource 'ident' from network/file/... return DUMMY_DB[ident] def _send(self, ident, value): # usually set resource 'ident' on network/file/... using value DUMMY_DB[ident] = value if __name__ == '__main__': dimmer = Dimmer() print(dimmer.state.on, dimmer.state.value) # start state dimmer.state.on = True dimmer.state.value = 15 print(dimmer.state.on, dimmer.state.value) # state did not change dimmer.state = State(True, 15) print(dimmer.state.on, dimmer.state.value) # state changed The first print is just to show the start state ("False 0"). But the second print shows that dimmer.state.on = True dimmer.state.value = 15 are useless. This is because dimmer.state returns a new mutable object which is modified and destroyed without further usage. Only through the complete property assignment the setter methode is called and Dimmer._send invoked. I think this might be extremely unintuitive and error-prone. Do you have any suggestions for a better design?
Accessing member variable in Python?
I have recently started learning python (coming from C++ background), but I could not understand how should I access the member variable (nonce) and use it in the second function called def mine_block().Aren't all members of the class Block publicly available from everywhere? class Block: ''' Дефинираме ф-я , която създава променливите като членове на класа Block ''' def _init_(self,prevHash,index,nonce,data,hash,time): self.prevHash = prevHash self.index = index self.nonce = nonce self.data = data self.hash = hash self.time = time def get_hash(self): print(self.hash) def mine_block(self,difficulty): arr = [] for i in range(difficulty): arr[i] = '0' arr[difficulty] = '\0' str = arr while True: ''' here I receive an error unresolved referene nonce ''' nonce++
To refer to class attributes within the class methods you need pass the object itself into the methods with the keyword self. Then you can access other class methods and the class attributes with self.foo. Also, the while True loop should not be indented at root level within the class. Last, the foo++ C-style is not correct in Pyhton, it should be foo += 1
In Python all instance members are publicly available through the class instance which is passed to class methods as self. Hence you should use self.nonce. Besides, in Python be careful with indentation. Your mine_block method should look like: def mine_block(self,difficulty): ... str = arr while True: self.nonce += 1
Python observer/observable library [duplicate]
Closed. This question needs to be more focused. It is not currently accepting answers. Want to improve this question? Update the question so it focuses on one problem only by editing this post. Closed 4 years ago. Improve this question Are there any exemplary examples of the GoF Observer implemented in Python? I have a bit code which currently has bits of debugging code laced through the key class (currently generating messages to stderr if a magic env is set). Additionally, the class has an interface for incrementally return results as well as storing them (in memory) for post processing. (The class itself is a job manager for concurrently executing commands on remote machines over ssh). Currently the usage of the class looks something like: job = SSHJobMan(hostlist, cmd) job.start() while not job.done(): for each in job.poll(): incrementally_process(job.results[each]) time.sleep(0.2) # or other more useful work post_process(job.results) An alernative usage model is: job = SSHJobMan(hostlist, cmd) job.wait() # implicitly performs a start() process(job.results) This all works fine for the current utility. However it does lack flexibility. For example I currently support a brief output format or a progress bar as incremental results, I also support brief, complete and "merged message" outputs for the post_process() function. However, I'd like to support multiple results/output streams (progress bar to the terminal, debugging and warnings to a log file, outputs from successful jobs to one file/directory, error messages and other results from non-successful jobs to another, etc). This sounds like a situation that calls for Observer ... have instances of my class accept registration from other objects and call them back with specific types of events as they occur. I'm looking at PyPubSub since I saw several references to that in SO related questions. I'm not sure I'm ready to add the external dependency to my utility but I could see value in using their interface as a model for mine if that's going to make it easier for others to use. (The project is intended as both a standalone command line utility and a class for writing other scripts/utilities). In short I know how to do what I want ... but there are numerous ways to accomplish it. I want suggestions on what's most likely to work for other users of the code in the long run. The code itself is at: classh.
However it does lack flexibility. Well... actually, this looks like a good design to me if an asynchronous API is what you want. It usually is. Maybe all you need is to switch from stderr to Python's logging module, which has a sort of publish/subscribe model of its own, what with Logger.addHandler() and so on. If you do want to support observers, my advice is to keep it simple. You really only need a few lines of code. class Event(object): pass class Observable(object): def __init__(self): self.callbacks = [] def subscribe(self, callback): self.callbacks.append(callback) def fire(self, **attrs): e = Event() e.source = self for k, v in attrs.items(): setattr(e, k, v) for fn in self.callbacks: fn(e) Your Job class can subclass Observable. When something of interest happens, call self.fire(type="progress", percent=50) or the like.
I think people in the other answers overdo it. You can easily achieve events in Python with less than 15 lines of code. You simple have two classes: Event and Observer. Any class that wants to listen for an event, needs to inherit Observer and set to listen (observe) for a specific event. When an Event is instantiated and fired, all observers listening to that event will run the specified callback functions. class Observer(): _observers = [] def __init__(self): self._observers.append(self) self._observables = {} def observe(self, event_name, callback): self._observables[event_name] = callback class Event(): def __init__(self, name, data, autofire = True): self.name = name self.data = data if autofire: self.fire() def fire(self): for observer in Observer._observers: if self.name in observer._observables: observer._observables[self.name](self.data) Example: class Room(Observer): def __init__(self): print("Room is ready.") Observer.__init__(self) # Observer's init needs to be called def someone_arrived(self, who): print(who + " has arrived!") room = Room() room.observe('someone arrived', room.someone_arrived) Event('someone arrived', 'Lenard') Output: Room is ready. Lenard has arrived!
A few more approaches... Example: the logging module Maybe all you need is to switch from stderr to Python's logging module, which has a powerful publish/subscribe model. It's easy to get started producing log records. # producer import logging log = logging.getLogger("myjobs") # that's all the setup you need class MyJob(object): def run(self): log.info("starting job") n = 10 for i in range(n): log.info("%.1f%% done" % (100.0 * i / n)) log.info("work complete") On the consumer side there's a bit more work. Unfortunately configuring logger output takes, like, 7 whole lines of code to do. ;) # consumer import myjobs, sys, logging if user_wants_log_output: ch = logging.StreamHandler(sys.stderr) ch.setLevel(logging.INFO) formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") ch.setFormatter(formatter) myjobs.log.addHandler(ch) myjobs.log.setLevel(logging.INFO) myjobs.MyJob().run() On the other hand there's an amazing amount of stuff in the logging package. If you ever need to send log data to a rotating set of files, an email address, and the Windows Event Log, you're covered. Example: simplest possible observer But you don't need to use any library at all. An extremely simple way to support observers is to call a method that does nothing. # producer class MyJob(object): def on_progress(self, pct): """Called when progress is made. pct is the percent complete. By default this does nothing. The user may override this method or even just assign to it.""" pass def run(self): n = 10 for i in range(n): self.on_progress(100.0 * i / n) self.on_progress(100.0) # consumer import sys, myjobs job = myjobs.MyJob() job.on_progress = lambda pct: sys.stdout.write("%.1f%% done\n" % pct) job.run() Sometimes instead of writing a lambda, you can just say job.on_progress = progressBar.update, which is nice. This is about as simple as it gets. One drawback is that it doesn't naturally support multiple listeners subscribing to the same events. Example: C#-like events With a bit of support code, you can get C#-like events in Python. Here's the code: # glue code class event(object): def __init__(self, func): self.__doc__ = func.__doc__ self._key = ' ' + func.__name__ def __get__(self, obj, cls): try: return obj.__dict__[self._key] except KeyError, exc: be = obj.__dict__[self._key] = boundevent() return be class boundevent(object): def __init__(self): self._fns = [] def __iadd__(self, fn): self._fns.append(fn) return self def __isub__(self, fn): self._fns.remove(fn) return self def __call__(self, *args, **kwargs): for f in self._fns[:]: f(*args, **kwargs) The producer declares the event using a decorator: # producer class MyJob(object): #event def progress(pct): """Called when progress is made. pct is the percent complete.""" def run(self): n = 10 for i in range(n+1): self.progress(100.0 * i / n) #consumer import sys, myjobs job = myjobs.MyJob() job.progress += lambda pct: sys.stdout.write("%.1f%% done\n" % pct) job.run() This works exactly like the "simple observer" code above, but you can add as many listeners as you like using +=. (Unlike C#, there are no event handler types, you don't have to new EventHandler(foo.bar) when subscribing to an event, and you don't have to check for null before firing the event. Like C#, events do not squelch exceptions.) How to choose If logging does everything you need, use that. Otherwise do the simplest thing that works for you. The key thing to note is that you don't need to take on a big external dependency.
How about an implementation where objects aren't kept alive just because they're observing something? Below please find an implementation of the observer pattern with the following features: Usage is pythonic. To add an observer to a bound method .bar of instance foo, just do foo.bar.addObserver(observer). Observers are not kept alive by virtue of being observers. In other words, the observer code uses no strong references. No sub-classing necessary (descriptors ftw). Can be used with unhashable types. Can be used as many times you want in a single class. (bonus) As of today the code exists in a proper downloadable, installable package on github. Here's the code (the github package or PyPI package have the most up to date implementation): import weakref import functools class ObservableMethod(object): """ A proxy for a bound method which can be observed. I behave like a bound method, but other bound methods can subscribe to be called whenever I am called. """ def __init__(self, obj, func): self.func = func functools.update_wrapper(self, func) self.objectWeakRef = weakref.ref(obj) self.callbacks = {} #observing object ID -> weak ref, methodNames def addObserver(self, boundMethod): """ Register a bound method to observe this ObservableMethod. The observing method will be called whenever this ObservableMethod is called, and with the same arguments and keyword arguments. If a boundMethod has already been registered to as a callback, trying to add it again does nothing. In other words, there is no way to sign up an observer to be called back multiple times. """ obj = boundMethod.__self__ ID = id(obj) if ID in self.callbacks: s = self.callbacks[ID][1] else: wr = weakref.ref(obj, Cleanup(ID, self.callbacks)) s = set() self.callbacks[ID] = (wr, s) s.add(boundMethod.__name__) def discardObserver(self, boundMethod): """ Un-register a bound method. """ obj = boundMethod.__self__ if id(obj) in self.callbacks: self.callbacks[id(obj)][1].discard(boundMethod.__name__) def __call__(self, *arg, **kw): """ Invoke the method which I proxy, and all of it's callbacks. The callbacks are called with the same *args and **kw as the main method. """ result = self.func(self.objectWeakRef(), *arg, **kw) for ID in self.callbacks: wr, methodNames = self.callbacks[ID] obj = wr() for methodName in methodNames: getattr(obj, methodName)(*arg, **kw) return result #property def __self__(self): """ Get a strong reference to the object owning this ObservableMethod This is needed so that ObservableMethod instances can observe other ObservableMethod instances. """ return self.objectWeakRef() class ObservableMethodDescriptor(object): def __init__(self, func): """ To each instance of the class using this descriptor, I associate an ObservableMethod. """ self.instances = {} # Instance id -> (weak ref, Observablemethod) self._func = func def __get__(self, inst, cls): if inst is None: return self ID = id(inst) if ID in self.instances: wr, om = self.instances[ID] if not wr(): msg = "Object id %d should have been cleaned up"%(ID,) raise RuntimeError(msg) else: wr = weakref.ref(inst, Cleanup(ID, self.instances)) om = ObservableMethod(inst, self._func) self.instances[ID] = (wr, om) return om def __set__(self, inst, val): raise RuntimeError("Assigning to ObservableMethod not supported") def event(func): return ObservableMethodDescriptor(func) class Cleanup(object): """ I manage remove elements from a dict whenever I'm called. Use me as a weakref.ref callback to remove an object's id from a dict when that object is garbage collected. """ def __init__(self, key, d): self.key = key self.d = d def __call__(self, wr): del self.d[self.key] To use this we just decorate methods we want to make observable with #event. Here's an example class Foo(object): def __init__(self, name): self.name = name #event def bar(self): print("%s called bar"%(self.name,)) def baz(self): print("%s called baz"%(self.name,)) a = Foo('a') b = Foo('b') a.bar.addObserver(b.bar) a.bar()
From wikipedia: from collections import defaultdict class Observable (defaultdict): def __init__ (self): defaultdict.__init__(self, object) def emit (self, *args): '''Pass parameters to all observers and update states.''' for subscriber in self: response = subscriber(*args) self[subscriber] = response def subscribe (self, subscriber): '''Add a new subscriber to self.''' self[subscriber] def stat (self): '''Return a tuple containing the state of each observer.''' return tuple(self.values()) The Observable is used like this. myObservable = Observable () # subscribe some inlined functions. # myObservable[lambda x, y: x * y] would also work here. myObservable.subscribe(lambda x, y: x * y) myObservable.subscribe(lambda x, y: float(x) / y) myObservable.subscribe(lambda x, y: x + y) myObservable.subscribe(lambda x, y: x - y) # emit parameters to each observer myObservable.emit(6, 2) # get updated values myObservable.stat() # returns: (8, 3.0, 4, 12)
Based on Jason's answer, I implemented the C#-like events example as a fully-fledged python module including documentation and tests. I love fancy pythonic stuff :) So, if you want some ready-to-use solution, you can just use the code on github.
Example: twisted log observers To register an observer yourCallable() (a callable that accepts a dictionary) to receive all log events (in addition to any other observers): twisted.python.log.addObserver(yourCallable) Example: complete producer/consumer example From Twisted-Python mailing list: #!/usr/bin/env python """Serve as a sample implementation of a twisted producer/consumer system, with a simple TCP server which asks the user how many random integers they want, and it sends the result set back to the user, one result per line.""" import random from zope.interface import implements from twisted.internet import interfaces, reactor from twisted.internet.protocol import Factory from twisted.protocols.basic import LineReceiver class Producer: """Send back the requested number of random integers to the client.""" implements(interfaces.IPushProducer) def __init__(self, proto, cnt): self._proto = proto self._goal = cnt self._produced = 0 self._paused = False def pauseProducing(self): """When we've produced data too fast, pauseProducing() will be called (reentrantly from within resumeProducing's transport.write method, most likely), so set a flag that causes production to pause temporarily.""" self._paused = True print('pausing connection from %s' % (self._proto.transport.getPeer())) def resumeProducing(self): self._paused = False while not self._paused and self._produced < self._goal: next_int = random.randint(0, 10000) self._proto.transport.write('%d\r\n' % (next_int)) self._produced += 1 if self._produced == self._goal: self._proto.transport.unregisterProducer() self._proto.transport.loseConnection() def stopProducing(self): pass class ServeRandom(LineReceiver): """Serve up random data.""" def connectionMade(self): print('connection made from %s' % (self.transport.getPeer())) self.transport.write('how many random integers do you want?\r\n') def lineReceived(self, line): cnt = int(line.strip()) producer = Producer(self, cnt) self.transport.registerProducer(producer, True) producer.resumeProducing() def connectionLost(self, reason): print('connection lost from %s' % (self.transport.getPeer())) factory = Factory() factory.protocol = ServeRandom reactor.listenTCP(1234, factory) print('listening on 1234...') reactor.run()
OP asks "Are there any exemplary examples of the GoF Observer implemented in Python?" This is an example in Python 3.7. This Observable class meets the requirement of creating a relationship between one observable and many observers while remaining independent of their structure. from functools import partial from dataclasses import dataclass, field import sys from typing import List, Callable #dataclass class Observable: observers: List[Callable] = field(default_factory=list) def register(self, observer: Callable): self.observers.append(observer) def deregister(self, observer: Callable): self.observers.remove(observer) def notify(self, *args, **kwargs): for observer in self.observers: observer(*args, **kwargs) def usage_demo(): observable = Observable() # Register two anonymous observers using lambda. observable.register( lambda *args, **kwargs: print(f'Observer 1 called with args={args}, kwargs={kwargs}')) observable.register( lambda *args, **kwargs: print(f'Observer 2 called with args={args}, kwargs={kwargs}')) # Create an observer function, register it, then deregister it. def callable_3(): print('Observer 3 NOT called.') observable.register(callable_3) observable.deregister(callable_3) # Create a general purpose observer function and register four observers. def callable_x(*args, **kwargs): print(f'{args[0]} observer called with args={args}, kwargs={kwargs}') for gui_field in ['Form field 4', 'Form field 5', 'Form field 6', 'Form field 7']: observable.register(partial(callable_x, gui_field)) observable.notify('test') if __name__ == '__main__': sys.exit(usage_demo())
A functional approach to observer design: def add_listener(obj, method_name, listener): # Get any existing listeners listener_attr = method_name + '_listeners' listeners = getattr(obj, listener_attr, None) # If this is the first listener, then set up the method wrapper if not listeners: listeners = [listener] setattr(obj, listener_attr, listeners) # Get the object's method method = getattr(obj, method_name) #wraps(method) def method_wrapper(*args, **kwags): method(*args, **kwags) for l in listeners: l(obj, *args, **kwags) # Listener also has object argument # Replace the original method with the wrapper setattr(obj, method_name, method_wrapper) else: # Event is already set up, so just add another listener listeners.append(listener) def remove_listener(obj, method_name, listener): # Get any existing listeners listener_attr = method_name + '_listeners' listeners = getattr(obj, listener_attr, None) if listeners: # Remove the listener next((listeners.pop(i) for i, l in enumerate(listeners) if l == listener), None) # If this was the last listener, then remove the method wrapper if not listeners: method = getattr(obj, method_name) delattr(obj, listener_attr) setattr(obj, method_name, method.__wrapped__) These methods can then be used to add a listener to any class method. For example: class MyClass(object): def __init__(self, prop): self.prop = prop def some_method(self, num, string): print('method:', num, string) def listener_method(obj, num, string): print('listener:', num, string, obj.prop) my = MyClass('my_prop') add_listener(my, 'some_method', listener_method) my.some_method(42, 'with listener') remove_listener(my, 'some_method', listener_method) my.some_method(42, 'without listener') And the output is: method: 42 with listener listener: 42 with listener my_prop method: 42 without listener
Python Class inheritance Constructor fail: What am I doing wrong?
I have a small Python OOP program in which 2 class, Flan and Outil inherit from a superclass Part. My problem is when I call Flan everything works perfectly, however when I call Outil the program fails silently. The Outil instance is created, but it lacks all the attributes it doesn't share with Part. The Outil instance isn't added to Outil.list_instance_outils, nor to Part.list_instances. class Outil(Part): list_instance_outils = [] def __init___(self, name, part_type, nodes, elems): Part.__init__(self, name, part_type, nodes, elems) self.vect_norm = vectnorm(self.nodes[self.elems[0,1:]-1, 1:]) self.elset = Elset(self) self.nset = Nset(self, refpoint=True, generate=False) self.SPOS = Ab_surface(self, self.elset) self.SNEG = Ab_surface(self, self.elset, type_surf='SNEG') Outil.list_instance_outils.append(self) Part.list_instances.append(self) class Flan(Part): list_instances_flans = [] def __init__(self, name, part_type, nodes, elems): Part.__init__(self, name, part_type, nodes, elems) self.vect_norm = vectnorm(self.nodes[self.elems[0,1:4]-1, 1:]) self.elset = Elset(self) self.nset = Nset(self) self.SPOS = Ab_surface(self, self.elset) self.SNEG = Ab_surface(self, self.elset, type_surf='SNEG') Flan.list_instances_flans.append(self) Part.list_instances.append(self) Both this Classes inherit from Part : class Part(): list_instances = [] def __init__(self, name, part_type, nodes, elems): self.name = name self.name_instance = self.name + '-1' self.part_type = part_type self.elems = elems self.nodes = nodes offset = np.min(self.elems[:, 1:])-1 self.nodes[:, 0] -= offset self.elems[:, 1:] -= offset I cannot stress enough that I have no error message whatsoever. What am I doing wrong here ?
You wrote __init__ with three trailing underscores instead of two in Outil. Because of this, it doesn't get called -- Part.__init__ gets called instead. That's why the class is created but it lacks the attributes beyond what are in Part.
To solve this sort of problem, the best thing to do is to run the code through the debugger. Get your classes into the python interpreter (import, paste, whatever you like), then call pdb: import pdb; pdb.run('Outil()'). You can now step through the code to see what is happening.