Python overload __init__ - python

How do I overload __init__() in Python? I'm used to C/C++ where the compiler can see the difference in data type, but since there are no data types in Python, how can I make sure the third method gets called instead of the second when I give a string as parameter instead of an int?
class Handle:
def __init__(self):
self.pid = -1
def __init__(self, pid):
self.pid = pid
def __init__(self, procname):
print(procname)
self.pid = -1 # find pid by name

In C++ you can define multiple functions/methods with the same name and different signatures. In Python, everytime you define a new function with the same name of a previously defined function, you are replacing it.
To achieve what you want, you have to use optional arguments and/or explicit checks.
Here are a few possible solutions:
class Handle:
def __init__(self, pid=-1):
if isinstance(pid, str):
self.procname = pid
self.pid = -1
else:
self.procname = None
self.pid = pid
class Handle:
# Both pid and procname may be specified at the same time.
def __init__(self, pid=-1, procname=None):
self.procname = procname
self.pid = pid
class Handle:
# Either pid or procname, not both, may be specified.
def __init__(self, pid=-1, procname=None):
if pid >= 0 and procname is not None:
raise ValueError('you can specify either pid or procname, not both')
self.procname = procname
self.pid = pid
class Handle:
def __init__(self, pid=-1):
self.pid = pid
# A separate constructor, with a different name,
# because "explicit is better than implicit" and
# "sparse is better than dense" (cit. The Zen of
# Python).
# In my opinion, this is the most Pythonic solution.
#classmethod
def from_process_name(cls, procname):
pid = get_pid(procname)
return cls(pid)
By the way, I would not recommend using -1 for "not specified". I'd rather use None.

Couldn't the PEP 443 : Single-dispatch generic functions be useful in your case ?
With something like (very basic example) :
from functools import singledispatch
class Handle:
def __init__(self, arg=None):
self.pid = init_from_arg(arg)
#singledispatch
def init_from_arg(arg):
# If None or neither an integer nor a string, return None
return None
# If the argument is an integer (ie the pid)
#init_from_arg.register(int)
def _(arg):
return arg
# If the argument provided is a string (ie the procname)
#init_from_arg.register(str)
def _(arg):
return get_pid_from_procname(arg)
Edit : Following the advice in comments, these functions could probably be made staticmethods, although I had trouble with them so I reverted to the code of my original answer (see edit history if needed)

Use isinstance to check the type:
class Handle:
def __init__(self, arg):
if isinstance(arg, str):
self.pid = arg
else:
self.pid = -1
If you want to do one action or another you might be better setting the arg to None and using it based on whether it is set or not:
class Handle:
def __init__(self, by_name=None):
self.by_name = by_name
Then just check later if it is None:
if self.by_name is not None:
# find by name
else:
# by pid
Of if you want the user to have a choice to pass either a pid or name:
class Handle:
def __init__(self, by_name=None,by_pid=None):
self.by_name = by_name
self.by_pid = by_pid
The simplest solution would be to take a single arg and move the logic wherever you call it, just because something is not None does not make it an int or string so you should also check for that or your program will error:
class Handle:
def __init__(self, proc):
self.proc = proc
if not isinstance(self.proc, (int,str)):
raise ValueError("Must be process name or pid.")
def find_proc(self):
if isinstance(self.proc, str):
# find by name
else:
# by pid

Related

Manager / Container class, how to?

I am currently designing a software which needs to manage a certain hardware setup.
The hardware setup is as following :
System - The system contains two identical devices, and has certain functionality relative to the entire system.
Device - Each device contains two identical sub devices, and has certain functionality relative to both sub devices.
Sub device - Each sub device has 4 configurable entities (Controlled via the same hardware command - thus I don't count them as a sub-sub device).
What I want to achieve :
I want to control all configurable entities via the system manager (the entities are counted in a serial way), meaning I would be able to do the following :
system_instance = system_manager_class(some_params)
system_instance.some_func(0) # configure device_manager[0].sub_device_manager[0].entity[0]
system_instance.some_func(5) # configure device_manager[0].sub_device_manager[1].entity[1]
system_instance.some_func(8) # configure device_manager[1].sub_device_manager[1].entity[0]
What I have thought of doing :
I was thinking of creating an abstract class, which contains all sub device functions (with a call to a conversion function) and have the system_manager, device_manager and sub_device_manager inherit it. Thus all classes will have the same function name and I will be able to access them via the system manager.
Something around these lines :
class abs_sub_device():
#staticmethod
def convert_entity(self):
sub_manager = None
sub_entity_num = None
pass
def set_entity_to_2(entity_num):
sub_manager, sub_manager_entity_num = self.convert_entity(entity_num)
sub_manager.some_func(sub_manager_entity_num)
class system_manager(abs_sub_device):
def __init__(self):
self.device_manager_list = [] # Initiliaze device list
self.device_manager_list.append(device_manager())
self.device_manager_list.append(device_manager())
def convert_entity(self, entity_num):
relevant_device_manager = self.device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_device_manage, relevant_entity
class device_manager(abs_sub_device):
def __init__(self):
self.sub_device_manager_list = [] # Initiliaze sub device list
self.sub_device_manager_list.append(sub_device_manager())
self.sub_device_manager_list.append(sub_device_manager())
def convert_entity(self, entity_num):
relevant_sub_device_manager = self.sub_device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_sub_device_manager, relevant_entity
class sub_device_manager(abs_sub_device):
def __init__(self):
self.entity_list = [0] * 4
def set_entity_to_2(self, entity_num):
self.entity_list[entity_num] = 2
The code is for generic understanding of my design, not for actual functionality.
The problem :
It seems to me that the system I am trying to design is really generic and that there must be a built-in python way to do this, or that my entire object oriented look at it is wrong.
I would really like to know if some one has a better way of doing this.
After much thinking, I think I found a pretty generic way to solve the issue, using a combination of decorators, inheritance and dynamic function creation.
The main idea is as following :
1) Each layer dynamically creates all sub layer relevant functions for it self (Inside the init function, using a decorator on the init function)
2) Each function created dynamically converts the entity value according to a convert function (which is a static function of the abs_container_class), and calls the lowers layer function with the same name (see make_convert_function_method).
3) This basically causes all sub layer function to be implemented on the higher level with zero code duplication.
def get_relevant_class_method_list(class_instance):
method_list = [func for func in dir(class_instance) if callable(getattr(class_instance, func)) and not func.startswith("__") and not func.startswith("_")]
return method_list
def make_convert_function_method(name):
def _method(self, entity_num, *args):
sub_manager, sub_manager_entity_num = self._convert_entity(entity_num)
function_to_call = getattr(sub_manager, name)
function_to_call(sub_manager_entity_num, *args)
return _method
def container_class_init_decorator(function_object):
def new_init_function(self, *args):
# Call the init function :
function_object(self, *args)
# Get all relevant methods (Of one sub class is enough)
method_list = get_relevant_class_method_list(self.container_list[0])
# Dynamically create all sub layer functions :
for method_name in method_list:
_method = make_convert_function_method(method_name)
setattr(type(self), method_name, _method)
return new_init_function
class abs_container_class():
#staticmethod
def _convert_entity(self):
sub_manager = None
sub_entity_num = None
pass
class system_manager(abs_container_class):
#container_class_init_decorator
def __init__(self):
self.device_manager_list = [] # Initiliaze device list
self.device_manager_list.append(device_manager())
self.device_manager_list.append(device_manager())
self.container_list = self.device_manager_list
def _convert_entity(self, entity_num):
relevant_device_manager = self.device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_device_manager, relevant_entity
class device_manager(abs_container_class):
#container_class_init_decorator
def __init__(self):
self.sub_device_manager_list = [] # Initiliaze sub device list
self.sub_device_manager_list.append(sub_device_manager())
self.sub_device_manager_list.append(sub_device_manager())
self.container_list = self.sub_device_manager_list
def _convert_entity(self, entity_num):
relevant_sub_device_manager = self.sub_device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_sub_device_manager, relevant_entity
class sub_device_manager():
def __init__(self):
self.entity_list = [0] * 4
def set_entity_to_value(self, entity_num, required_value):
self.entity_list[entity_num] = required_value
print("I set the entity to : {}".format(required_value))
# This is used for auto completion purposes (Using pep convention)
class auto_complete_class(system_manager, device_manager, sub_device_manager):
pass
system_instance = system_manager() # type: auto_complete_class
system_instance.set_entity_to_value(0, 3)
There is still a little issue with this solution, auto-completion would not work since the highest level class has almost no static implemented function.
In order to solve this I cheated a bit, I created an empty class which inherited from all layers and stated to the IDE using pep convention that it is the type of the instance being created (# type: auto_complete_class).
Does this solve your Problem?
class EndDevice:
def __init__(self, entities_num):
self.entities = list(range(entities_num))
#property
def count_entities(self):
return len(self.entities)
def get_entity(self, i):
return str(i)
class Device:
def __init__(self, sub_devices):
self.sub_devices = sub_devices
#property
def count_entities(self):
return sum(sd.count_entities for sd in self.sub_devices)
def get_entity(self, i):
c = 0
for index, sd in enumerate(self.sub_devices):
if c <= i < sd.count_entities + c:
return str(index) + " " + sd.get_entity(i - c)
c += sd.count_entities
raise IndexError(i)
SystemManager = Device # Are the exact same. This also means you can stack that infinite
sub_devices1 = [EndDevice(4) for _ in range(2)]
sub_devices2 = [EndDevice(4) for _ in range(2)]
system_manager = SystemManager([Device(sub_devices1), Device(sub_devices2)])
print(system_manager.get_entity(0))
print(system_manager.get_entity(5))
print(system_manager.get_entity(15))
I can't think of a better way to do this than OOP, but inheritance will only give you one set of low-level functions for the system manager, so it wil be like having one device manager and one sub-device manager. A better thing to do will be, a bit like tkinter widgets, to have one system manager and initialise all the other managers like children in a tree, so:
system = SystemManager()
device1 = DeviceManager(system)
subDevice1 = SubDeviceManager(device1)
device2 = DeviceManager(system)
subDevice2 = SubDeviceManager(device2)
#to execute some_func on subDevice1
system.some_func(0, 0, *someParams)
We can do this by keeping a list of 'children' of the higher-level managers and having functions which reference the children.
class SystemManager:
def __init__(self):
self.children = []
def some_func(self, child, *params):
self.children[child].some_func(*params)
class DeviceManager:
def __init__(self, parent):
parent.children.append(self)
self.children = []
def some_func(self, child, *params):
self.children[child].some_func(*params)
class SubDeviceManager:
def __init__(self, parent):
parent.children.append(self)
#this may or may not have sub-objects, if it does we need to make it its own children list.
def some_func(self, *params):
#do some important stuff
Unfortunately, this does mean that if we want to call a function of a sub-device manager from the system manager without having lots of dots, we will have to define it again again in the system manager. What you can do instead is use the built-in exec() function, which will take in a string input and run it using the Python interpreter:
class SystemManager:
...
def execute(self, child, function, *args):
exec("self.children[child]."+function+"(*args)")
(and keep the device manager the same)
You would then write in the main program:
system.execute(0, "some_func", 0, *someArgs)
Which would call
device1.some_func(0, someArgs)
Here's what I'm thinking:
SystemManager().apply_to_entity(entity_num=7, lambda e: e.value = 2)
class EntitySuperManagerMixin():
"""Mixin to handle logic for managing entity managers."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # Supports any kind of __init__ call.
self._entity_manager_list = []
def apply_to_entity(self, entity_num, action):
relevant_entity_manager = self._entity_manager_list[index // 4]
relevant_entity_num = index % 4
return relevant_entity_manager.apply_to_entity(
relevant_entity_num, action)
class SystemManager(EntitySuperManagerMixin):
def __init__(self):
super().__init__()
# An alias for _entity_manager_list to improve readability.
self.device_manager_list = self._entity_manager_list
self.device_manager_list.extend(DeviceManager() for _ in range(4))
class DeviceManager(EntitySuperManagerMixin):
def __init__(self):
super().__init__()
# An alias for _entity_manager_list to improve readability.
self.sub_device_manager_list = self._entity_manager_list
self.sub_device_manager_list.extend(SubDeviceManager() for _ in range(4))
class SubDeviceManager():
"""Manages entities, not entity managers, thus doesn't inherit the mixin."""
def __init__(self):
# Entities need to be classes for this idea to work.
self._entity_list = [Entity() for _ in range(4)]
def apply_to_entity(self, entity_num, action):
return action(self._entity_list[entity_num])
class Entity():
def __init__(self, initial_value=0):
self.value = initial_value
With this structure:
Entity-specific functions can stay bound to the Entity class (where it belongs).
Manager-specific code needs to be updated in two places: EntitySuperManagerMixin and the lowest level manager (which would need custom behavior anyway since it deals with the actual entities, not other managers).
The way i see it if you want to dynamically configure different part of system you need some sort of addressing so if you input an ID or address with some parameter the system will know with address on which sub sistem you are talking about and then configure that system with parameter.
OOP is quite ok for that and then you can easily manipulate such data via bitwise operators.
So basic addressing is done via binary system , so to do that in python you need first to implement an address static attribute to your class with perhaps some basic further detailing if system grows.
Basic implementation of addres systems is as follows:
bin(71)
1010 1011
and if we divide it into nibbles
1010 - device manager 10
1011 - sub device manager 11
So in this example we have system of 15 device managers and 15 sub device menagers, and every device and sub device manager has its integer address.So let's say you want to access device manager no10 with sub device manager no11. You would need their address which is in binary 71 and you would go with:
system.config(address, parameter )
Where system.config funcion would look like this:
def config(self,address, parameter):
device_manager = (address&0xF0)>>4 #10
sub_device_manager = address&0xf # 11
if device_manager not in range(self.devices): raise LookupError("device manager not found")
if sub_device_manager not in range(self.devices[device_manager].device): raise LookupError("sub device manager not found")
self.devices[device_manager].device[sub_device_manager].implement(parameter)
In layman you would tell system that sub_device 11 from device 10 needs configuration with this parameter.
So how would this setup look in python inheritance class of some base class of system that could be then composited/inherited to different classes:
class systems(object):
parent = None #global parent element, defaults to None well for simplicity
def __init__(self):
self.addrMASK = 0xf # address mask for that nibble
self.addr = 0x1 # default address of that element
self.devices = [] # list of instances of device
self.data = { #some arbitrary data
"param1":"param_val",
"param2":"param_val",
"param3":"param_val",
}
def addSubSystem(self,sub_system): # connects elements to eachother
# checks for valiability
if not isinstance(sub_system,systems):
raise TypeError("defined input is not a system type") # to prevent passing an integer or something
# appends a device to system data
self.devices.append(sub_system)
# search parent variables from sub device manager to system
obj = self
while 1:
if obj.parent is not None:
obj.parent.addrMASK<<=4 #bitshifts 4 bits
obj.parent.addr <<=4 #bitshifts 4 bits
obj = obj.parent
else:break
#self management , i am lazy guy so i added this part so i wouldn't have to reset addresses manualy
self.addrMASK <<=4 #bitshifts 4 bits
self.addr <<=4 #bitshifts 4 bits
# this element is added so the obj address is coresponding to place in list, this could be done more eloquently but i didn't know what are your limitations
if not self.devices:
self.devices[ len(self.devices)-1 ].addr +=1
self.devices[ len(self.devices)-1 ].parent = self
# helpful for checking data ... gives the address of system
def __repr__(self):
return "system at {0:X}, {1:0X}".format(self.addr,self.addrMASK)
# extra helpful lists data as well
def __str__(self):
data = [ '{} : {}\n'.format(k,v) for k,v in self.data.items() ]
return " ".join([ repr(self),'\n',*data ])
#checking for data, skips looping over sub systems
def __contains__(self,system_index):
return system_index-1 in range(len(self.data))
# applying parameter change -- just an example
def apply(self,par_dict):
if not isinstance(par_dict,dict):
raise TypeError("parameter must be a dict type")
if any( key in self.data.keys() for key in par_dict.keys() ):
for k,v in par_dict.items():
if k in self.data.keys():
self.data[k]=v
else:pass
else:pass
# implementing parameters trough addresses
def implement(self,address,parameter_dictionary):
if address&self.addrMASK==self.addr:
if address-self.addr!=0:
item = (address-self.addr)>>4
self.devices[item-1].implement( address-self.addr,parameter_dictionary )
else:
self.apply(parameter_dictionary)
a = systems()
b = systems()
a.addSubSystem(b)
c = systems()
b.addSubSystem(c)
print('a')
print(a)
print('')
print('b')
print(b)
print('')
print('c')
print(c)
print('')
a.implement(0x100,{"param1":"a"})
a.implement(0x110,{"param1":"b"})
a.implement(0x111,{"param1":"c"})
print('a')
print(a)
print('')
print('b')
print(b)
print('')
print('c')
print(c)
print('')

How can I split a long function into separate steps while maintaining the relationship between said steps?

I have a very long function func which takes a browser handle and performs a bunch of requests and reads a bunch of responses in a specific order:
def func(browser):
# make sure we are logged in otherwise log in
# make request to /search and check that the page has loaded
# fill form in /search and submit it
# read table of response and return the result as list of objects
Each operation require a large amount of code due to the complexity of the DOM and they tend to grow really fast.
What would be the best way to refactor this function into smaller components so that the following properties still hold:
the execution flow of the operations and/or their preconditions is guaranteed just like in the current version
the preconditions are not checked with asserts against the state, as this is a very costly operation
func can be called multiple times on the browser
?
Just wrap the three helper methods in a class, and track which methods are allowed to run in an instance.
class Helper(object):
def __init__(self):
self.a = True
self.b = False
self.c = False
def funcA(self):
if not self.A:
raise Error("Cannot run funcA now")
# do stuff here
self.a = False
self.b = True
return whatever
def funcB(self):
if not self.B:
raise Error("Cannot run funcB now")
# do stuff here
self.b = False
self.c = True
return whatever
def funcC(self):
if not self.C:
raise Error("Cannot run funcC now")
# do stuff here
self.c = False
self.a = True
return whatever
def func(...):
h = Helper()
h.funcA()
h.funcB()
h.funcC()
# etc
The only way to call a method is if its flag is true, and each method clears its own flag and sets the next method's flag before exiting. As long as you don't touch h.a et al. directly, this ensures that each method can only be called in the proper order.
Alternately, you can use a single flag that is a reference to the function currently allowed to run.
class Helper(object):
def __init__(self):
self.allowed = self.funcA
def funcA(self):
if self.allowed is not self.funcA:
raise Error("Cannot run funcA now")
# do stuff
self.allowed = self.funcB
return whatever
# etc
Here's the solution I came up with. I used a decorator (closely related to the one in this blog post) which only allows for a function to be called once.
def call_only_once(func):
def new_func(*args, **kwargs):
if not new_func._called:
try:
return func(*args, **kwargs)
finally:
new_func._called = True
else:
raise Exception("Already called this once.")
new_func._called = False
return new_func
#call_only_once
def stateA():
print 'Calling stateA only this time'
#call_only_once
def stateB():
print 'Calling stateB only this time'
#call_only_once
def stateC():
print 'Calling stateC only this time'
def state():
stateA()
stateB()
stateC()
if __name__ == "__main__":
state()
You'll see that if you re-call any of the functions, the function will throw an Exception stating that the functions have already been called.
The problem with this is that if you ever need to call state() again, you're hosed. Unless you implement these functions as private functions, I don't think you can do exactly what you want due to the nature of Python's scoping rules.
Edit
You can also remove the else in the decorator and your function will always return None.
Here a snippet I used once for my state machine
class StateMachine(object):
def __init__(self):
self.handlers = {}
self.start_state = None
self.end_states = []
def add_state(self, name, handler, end_state=0):
name = name.upper()
self.handlers[name] = handler
if end_state:
self.end_states.append(name)
def set_start(self, name):
# startup state
self.start_state = name
def run(self, **kw):
"""
Run
:param kw:
:return:
"""
# the first .run call call the first handler with kw keywords
# each registered handler should returns the following handler and the needed kw
try:
handler = self.handlers[self.start_state]
except:
raise InitializationError("must call .set_start() before .run()")
while True:
(new_state, kw) = handler(**kw)
if isinstance(new_state, str):
if new_state in self.end_states:
print("reached ", new_state)
break
else:
handler = self.handlers[new_state]
elif hasattr(new_state, "__call__"):
handler = new_state
else:
return
The use
class MyParser(StateMachine):
def __init__(self):
super().__init__()
# define handlers
# we can define many handler as we want
self.handlers["begin_parse"] = self.begin_parse
# define the startup handler
self.set_start("begin_parse")
def end(self, **kw):
logging.info("End of parsing ")
# no callable handler => end
return None, None
def second(self, **kw):
logging.info("second ")
# do something
# if condition is reach the call `self.end` handler
if ...:
return self.end, {}
def begin_parse(self, **kw):
logging.info("start of parsing ")
# long process until the condition is reach then call the `self.second` handler with kw new keywords
while True:
kw = {}
if ...:
return self.second, kw
# elif other cond:
# return self.other_handler, kw
# elif other cond 2:
# return self.other_handler 2, kw
else:
return self.end, kw
# start the state machine
MyParser().run()
will print
INFO:root:start of parsing
INFO:root:second
INFO:root:End of parsing
You could use local functions in your func function. Ok, they are still declared inside one single global function, but Python is nice enough to still give you access to them for tests.
Here is one example of one function declaring and executing 3 (supposedly heavy) subfunctions. It takes one optional parameter test that when set to TEST prevent actual execution but instead gives external access to individual sub-functions and to a local variable:
def func(test=None):
glob = []
def partA():
glob.append('A')
def partB():
glob.append('B')
def partC():
glob.append('C')
if (test == 'TEST'):
global testA, testB, testC, testCR
testA, testB, testC, testCR = partA, partB, partC, glob
return None
partA()
partB()
partC()
return glob
When you call func, the 3 parts are executed in sequence. But if you first call func('TEST'), you can then access the local glob variable as testCR, and the 3 subfunctions as testA, testB and testC. This way you can still test individually the 3 parts with well defined input and control their output.
I would insist on the suggestion given by #user3159253 in his comment on the original question:
If the sole purpose is readability I would split the func into three "private" > or "protected" ones (i.e. _func1 or __func1) and a private or protected property > which keeps the state shared between the functions.
This makes a lot of sense to me and seems more usual amongst object oriented programming than the other options. Consider this example as an alternative:
Your class (teste.py):
class Test:
def __init__(self):
self.__environment = {} # Protected information to be shared
self.public_stuff = 'public info' # Accessible to outside callers
def func(self):
print "Main function"
self.__func_a()
self.__func_b()
self.__func_c()
print self.__environment
def __func_a(self):
self.__environment['function a says'] = 'hi'
def __func_b(self):
self.__environment['function b says'] = 'hello'
def __func_c(self):
self.__environment['function c says'] = 'hey'
Other file:
from teste import Test
t = Test()
t.func()
This will output:
Main function says hey guys
{'function a says': 'hi', 'function b says': 'hello', 'function c says': 'hey'}
If you try to call one of the protected functions, an error occurs:
Traceback (most recent call last):
File "C:/Users/Lucas/PycharmProjects/testes/other.py", line 6, in <module>
t.__func_a()
AttributeError: Test instance has no attribute '__func_a'
Same thing if you try to access the protected environment variable:
Traceback (most recent call last):
File "C:/Users/Lucas/PycharmProjects/testes/other.py", line 5, in <module>
print t.__environment
AttributeError: Test instance has no attribute '__environment'
In my view this is the most elegant, simple and readable way to solve your problem, let me know if it fits your needs :)

Python Running Multiple Locks across Multiple Threads

So the situation is that I have multiple methods, which might be threaded simaltenously, but all need their own lock
against being re-threaded until they have run. They are established by initialising a class with some dataprocessing options:
class InfrequentDataDaemon(object): pass
class FrequentDataDaemon(object): pass
def addMethod(name):
def wrapper(f):
setattr(processor, f.__name__, staticmethod(f))
return f
return wrapper
class DataProcessors(object):
lock = threading.Lock()
def __init__(self, options):
self.common_settings = options['common_settings']
self.data_processing_configurations = options['data_processing_configurations'] #Configs for each processing method
self.data_processing_types = options['data_processing_types']
self.Data_Processsing_Functions ={}
#I __init__ each processing method as a seperate function so that it can be locked
for type in options['data_processing_types']:
def bindFunction1(name):
def func1(self, data=None, lock=None):
config = self.data_processing_configurations[data['type']] #I get the right config for the datatype
with lock:
FetchDataBaseStuff(data['type'])
#I don't want this to be run more than once at a time per DataProcessing Type
# But it's fine if multiple DoSomethings run at once, as long as each DataType is different!
DoSomething(data, config)
WriteToDataBase(data['type'])
func1.__name__ = "Processing_for_{}".format(type)
self.Data_Processing_Functions[func1.__name__] = func1 #Add this function to the Dictinary object
bindFunction1(type)
#Then I add some methods to a daemon that are going to check if our Dataprocessors need to be called
def fast_process_types(data):
if not example_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data, lock)).start()
def slow_process_types(data):
if not some_other_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data, lock)).start()
addMethod(InfrequentDataDaemon)(slow_process_types)
addMethod(FrequentDataDaemon)(fast_process_types)
The idea is to lock each method in
DataProcessors.Data_Processing_Functions - so that each method is only accessed by one thread at a time (and the rest of the threads for the same method are queued). How does Locking need to be set up to achieve this effect?
I'm not sure I completely follow what you're trying to do here, but could you just create a separate threading.Lock object for each type?
class DataProcessors(object):
def __init__(self, options):
self.common_settings = options['common_settings']
self.data_processing_configurations = options['data_processing_configurations'] #Configs for each processing method
self.data_processing_types = options['data_processing_types']
self.Data_Processsing_Functions ={}
self.locks = {}
#I __init__ each processing method as a seperate function so that it can be locked
for type in options['data_processing_types']:
self.locks[type] = threading.Lock()
def bindFunction1(name):
def func1(self, data=None):
config = self.data_processing_configurations[data['type']] #I get the right config for the datatype
with self.locks[data['type']]:
FetchDataBaseStuff(data['type'])
DoSomething(data, config)
WriteToDataBase(data['type'])
func1.__name__ = "Processing_for_{}".format(type)
self.Data_Processing_Functions[func1.__name__] = func1 #Add this function to the Dictinary object
bindFunction1(type)
#Then I add some methods to a daemon that are going to check if our Dataprocessors need to be called
def fast_process_types(data):
if not example_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data)).start()
def slow_process_types(data):
if not some_other_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data)).start()
addMethod(InfrequentDataDaemon)(slow_process_types)
addMethod(FrequentDataDaemon)(fast_process_types)

Pythonic - How to initialize a construtor with multiple arguments and validate

I'm a python noob and I'm trying to solve my problems the 'pythonic' way. I have a class, who's __init__ method takes 6 parameters. I need to validate each param and throw/raise an Exception if any fails to validate.
Is this the right way?
class DefinitionRunner:
def __init__(self, canvasSize, flightId, domain, definitionPath, harPath):
self.canvasSize = canvasSize
self.flightId = flightId
self.domain = domain
self.harPath = harPath
self.definitionPath = definitionPath
... bunch of validation checks...
... if fails, raise ValueError ...
If you want the variables to be settable independently of __init__, you could use properties to implement validations in separate methods.
They work only for new style classes though, so you need to define the class as class DefinitionRunner(object)
So for example,
#property
def canvasSize(self):
return self._canvasSize
#canvasSize.setter
def canvasSize(self, value):
# some validation here
self._canvasSize = value
Broadly speaking, that looks like the way you'd do it. Though strictly speaking, you might as well do validation before rather than after assignment, especially if assignment could potentially be time or resource intensive. Also, style convention says not to align assignment blocks like you are.
I would do it like you did it. Except the validating stuff. I would validate in a setter method and use it to set the attributes.
You could do something like this. Make a validator for each type of input. Make a helper function to run validation:
def validate_and_assign(obj, items_d, validators):
#validate all entries
for key, validator in validators.items():
if not validator[key](items_d[key]):
raise ValueError("Validation for %s failed" % (key,))
#set all entries
for key, val in items_d.items():
setattr(obj, key, val)
Which you'd use like this:
class DefinitionRunner:
validators = {
'canvasSize': canvasSize_validator,
'flightId': flightId_validator,
'domain': domain_validator,
'definitionPath': definitionPath_validator,
'harPath': harPath_validator,
}
def __init__(self, canvasSize, flightId, domain, definitionPath, harPath):
validate_and_assign(self, {
'canvasSize': canvasSize,
'flightId': flightId,
'domain': domain,
'definitionPath': definitionPath,
'harPath': harPath,
}, DefinitionRunner.validators)
The validators might be the same function, of course, if the data type is the same.
I'm not sure if this is exactly "Pythonic", but I've defined a function decorator called require_type. (To be honest, I think I found it somewhere online.)
def require_type(my_arg, *valid_types):
'''
A simple decorator that performs type checking.
#param my_arg: string indicating argument name
#param valid_types: list of valid types
'''
def make_wrapper(func):
if hasattr(func, 'wrapped_args'):
wrapped = getattr(func, 'wrapped_args')
else:
body = func.func_code
wrapped = list(body.co_varnames[:body.co_argcount])
try:
idx = wrapped.index(my_arg)
except ValueError:
raise(NameError, my_arg)
def wrapper(*args, **kwargs):
def fail():
all_types = ', '.join(str(typ) for typ in valid_types)
raise(TypeError, '\'%s\' was type %s, expected to be in following list: %s' % (my_arg, all_types, type(arg)))
if len(args) > idx:
arg = args[idx]
if not isinstance(arg, valid_types):
fail()
else:
if my_arg in kwargs:
arg = kwargs[my_arg]
if not isinstance(arg, valid_types):
fail()
return func(*args, **kwargs)
wrapper.wrapped_args = wrapped
return wrapper
return make_wrapper
Then, to use it:
class SomeObject(object):
#require_type("prop1", str)
#require_type("prop2", numpy.complex128)
def __init__(self, prop1, prop2):
pass

Python observer/observable library [duplicate]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
Are there any exemplary examples of the GoF Observer implemented in Python? I have a bit code which currently has bits of debugging code laced through the key class (currently generating messages to stderr if a magic env is set). Additionally, the class has an interface for incrementally return results as well as storing them (in memory) for post processing. (The class itself is a job manager for concurrently executing commands on remote machines over ssh).
Currently the usage of the class looks something like:
job = SSHJobMan(hostlist, cmd)
job.start()
while not job.done():
for each in job.poll():
incrementally_process(job.results[each])
time.sleep(0.2) # or other more useful work
post_process(job.results)
An alernative usage model is:
job = SSHJobMan(hostlist, cmd)
job.wait() # implicitly performs a start()
process(job.results)
This all works fine for the current utility. However it does lack flexibility. For example I currently support a brief output format or a progress bar as incremental results, I also support
brief, complete and "merged message" outputs for the post_process() function.
However, I'd like to support multiple results/output streams (progress bar to the terminal, debugging and warnings to a log file, outputs from successful jobs to one file/directory, error messages and other results from non-successful jobs to another, etc).
This sounds like a situation that calls for Observer ... have instances of my class accept registration from other objects and call them back with specific types of events as they occur.
I'm looking at PyPubSub since I saw several references to that in SO related questions. I'm not sure I'm ready to add the external dependency to my utility but I could see value in using their interface as a model for mine if that's going to make it easier for others to use. (The project is intended as both a standalone command line utility and a class for writing other scripts/utilities).
In short I know how to do what I want ... but there are numerous ways to accomplish it. I want suggestions on what's most likely to work for other users of the code in the long run.
The code itself is at: classh.
However it does lack flexibility.
Well... actually, this looks like a good design to me if an asynchronous API is what you want. It usually is. Maybe all you need is to switch from stderr to Python's logging module, which has a sort of publish/subscribe model of its own, what with Logger.addHandler() and so on.
If you do want to support observers, my advice is to keep it simple. You really only need a few lines of code.
class Event(object):
pass
class Observable(object):
def __init__(self):
self.callbacks = []
def subscribe(self, callback):
self.callbacks.append(callback)
def fire(self, **attrs):
e = Event()
e.source = self
for k, v in attrs.items():
setattr(e, k, v)
for fn in self.callbacks:
fn(e)
Your Job class can subclass Observable. When something of interest happens, call self.fire(type="progress", percent=50) or the like.
I think people in the other answers overdo it. You can easily achieve events in Python with less than 15 lines of code.
You simple have two classes: Event and Observer. Any class that wants to listen for an event, needs to inherit Observer and set to listen (observe) for a specific event. When an Event is instantiated and fired, all observers listening to that event will run the specified callback functions.
class Observer():
_observers = []
def __init__(self):
self._observers.append(self)
self._observables = {}
def observe(self, event_name, callback):
self._observables[event_name] = callback
class Event():
def __init__(self, name, data, autofire = True):
self.name = name
self.data = data
if autofire:
self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.data)
Example:
class Room(Observer):
def __init__(self):
print("Room is ready.")
Observer.__init__(self) # Observer's init needs to be called
def someone_arrived(self, who):
print(who + " has arrived!")
room = Room()
room.observe('someone arrived', room.someone_arrived)
Event('someone arrived', 'Lenard')
Output:
Room is ready.
Lenard has arrived!
A few more approaches...
Example: the logging module
Maybe all you need is to switch from stderr to Python's logging module, which has a powerful publish/subscribe model.
It's easy to get started producing log records.
# producer
import logging
log = logging.getLogger("myjobs") # that's all the setup you need
class MyJob(object):
def run(self):
log.info("starting job")
n = 10
for i in range(n):
log.info("%.1f%% done" % (100.0 * i / n))
log.info("work complete")
On the consumer side there's a bit more work. Unfortunately configuring logger output takes, like, 7 whole lines of code to do. ;)
# consumer
import myjobs, sys, logging
if user_wants_log_output:
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
myjobs.log.addHandler(ch)
myjobs.log.setLevel(logging.INFO)
myjobs.MyJob().run()
On the other hand there's an amazing amount of stuff in the logging package. If you ever need to send log data to a rotating set of files, an email address, and the Windows Event Log, you're covered.
Example: simplest possible observer
But you don't need to use any library at all. An extremely simple way to support observers is to call a method that does nothing.
# producer
class MyJob(object):
def on_progress(self, pct):
"""Called when progress is made. pct is the percent complete.
By default this does nothing. The user may override this method
or even just assign to it."""
pass
def run(self):
n = 10
for i in range(n):
self.on_progress(100.0 * i / n)
self.on_progress(100.0)
# consumer
import sys, myjobs
job = myjobs.MyJob()
job.on_progress = lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
Sometimes instead of writing a lambda, you can just say job.on_progress = progressBar.update, which is nice.
This is about as simple as it gets. One drawback is that it doesn't naturally support multiple listeners subscribing to the same events.
Example: C#-like events
With a bit of support code, you can get C#-like events in Python. Here's the code:
# glue code
class event(object):
def __init__(self, func):
self.__doc__ = func.__doc__
self._key = ' ' + func.__name__
def __get__(self, obj, cls):
try:
return obj.__dict__[self._key]
except KeyError, exc:
be = obj.__dict__[self._key] = boundevent()
return be
class boundevent(object):
def __init__(self):
self._fns = []
def __iadd__(self, fn):
self._fns.append(fn)
return self
def __isub__(self, fn):
self._fns.remove(fn)
return self
def __call__(self, *args, **kwargs):
for f in self._fns[:]:
f(*args, **kwargs)
The producer declares the event using a decorator:
# producer
class MyJob(object):
#event
def progress(pct):
"""Called when progress is made. pct is the percent complete."""
def run(self):
n = 10
for i in range(n+1):
self.progress(100.0 * i / n)
#consumer
import sys, myjobs
job = myjobs.MyJob()
job.progress += lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
This works exactly like the "simple observer" code above, but you can add as many listeners as you like using +=. (Unlike C#, there are no event handler types, you don't have to new EventHandler(foo.bar) when subscribing to an event, and you don't have to check for null before firing the event. Like C#, events do not squelch exceptions.)
How to choose
If logging does everything you need, use that. Otherwise do the simplest thing that works for you. The key thing to note is that you don't need to take on a big external dependency.
How about an implementation where objects aren't kept alive just because they're observing something? Below please find an implementation of the observer pattern with the following features:
Usage is pythonic. To add an observer to a bound method .bar of instance foo, just do foo.bar.addObserver(observer).
Observers are not kept alive by virtue of being observers. In other words, the observer code uses no strong references.
No sub-classing necessary (descriptors ftw).
Can be used with unhashable types.
Can be used as many times you want in a single class.
(bonus) As of today the code exists in a proper downloadable, installable package on github.
Here's the code (the github package or PyPI package have the most up to date implementation):
import weakref
import functools
class ObservableMethod(object):
"""
A proxy for a bound method which can be observed.
I behave like a bound method, but other bound methods can subscribe to be
called whenever I am called.
"""
def __init__(self, obj, func):
self.func = func
functools.update_wrapper(self, func)
self.objectWeakRef = weakref.ref(obj)
self.callbacks = {} #observing object ID -> weak ref, methodNames
def addObserver(self, boundMethod):
"""
Register a bound method to observe this ObservableMethod.
The observing method will be called whenever this ObservableMethod is
called, and with the same arguments and keyword arguments. If a
boundMethod has already been registered to as a callback, trying to add
it again does nothing. In other words, there is no way to sign up an
observer to be called back multiple times.
"""
obj = boundMethod.__self__
ID = id(obj)
if ID in self.callbacks:
s = self.callbacks[ID][1]
else:
wr = weakref.ref(obj, Cleanup(ID, self.callbacks))
s = set()
self.callbacks[ID] = (wr, s)
s.add(boundMethod.__name__)
def discardObserver(self, boundMethod):
"""
Un-register a bound method.
"""
obj = boundMethod.__self__
if id(obj) in self.callbacks:
self.callbacks[id(obj)][1].discard(boundMethod.__name__)
def __call__(self, *arg, **kw):
"""
Invoke the method which I proxy, and all of it's callbacks.
The callbacks are called with the same *args and **kw as the main
method.
"""
result = self.func(self.objectWeakRef(), *arg, **kw)
for ID in self.callbacks:
wr, methodNames = self.callbacks[ID]
obj = wr()
for methodName in methodNames:
getattr(obj, methodName)(*arg, **kw)
return result
#property
def __self__(self):
"""
Get a strong reference to the object owning this ObservableMethod
This is needed so that ObservableMethod instances can observe other
ObservableMethod instances.
"""
return self.objectWeakRef()
class ObservableMethodDescriptor(object):
def __init__(self, func):
"""
To each instance of the class using this descriptor, I associate an
ObservableMethod.
"""
self.instances = {} # Instance id -> (weak ref, Observablemethod)
self._func = func
def __get__(self, inst, cls):
if inst is None:
return self
ID = id(inst)
if ID in self.instances:
wr, om = self.instances[ID]
if not wr():
msg = "Object id %d should have been cleaned up"%(ID,)
raise RuntimeError(msg)
else:
wr = weakref.ref(inst, Cleanup(ID, self.instances))
om = ObservableMethod(inst, self._func)
self.instances[ID] = (wr, om)
return om
def __set__(self, inst, val):
raise RuntimeError("Assigning to ObservableMethod not supported")
def event(func):
return ObservableMethodDescriptor(func)
class Cleanup(object):
"""
I manage remove elements from a dict whenever I'm called.
Use me as a weakref.ref callback to remove an object's id from a dict
when that object is garbage collected.
"""
def __init__(self, key, d):
self.key = key
self.d = d
def __call__(self, wr):
del self.d[self.key]
To use this we just decorate methods we want to make observable with #event. Here's an example
class Foo(object):
def __init__(self, name):
self.name = name
#event
def bar(self):
print("%s called bar"%(self.name,))
def baz(self):
print("%s called baz"%(self.name,))
a = Foo('a')
b = Foo('b')
a.bar.addObserver(b.bar)
a.bar()
From wikipedia:
from collections import defaultdict
class Observable (defaultdict):
def __init__ (self):
defaultdict.__init__(self, object)
def emit (self, *args):
'''Pass parameters to all observers and update states.'''
for subscriber in self:
response = subscriber(*args)
self[subscriber] = response
def subscribe (self, subscriber):
'''Add a new subscriber to self.'''
self[subscriber]
def stat (self):
'''Return a tuple containing the state of each observer.'''
return tuple(self.values())
The Observable is used like this.
myObservable = Observable ()
# subscribe some inlined functions.
# myObservable[lambda x, y: x * y] would also work here.
myObservable.subscribe(lambda x, y: x * y)
myObservable.subscribe(lambda x, y: float(x) / y)
myObservable.subscribe(lambda x, y: x + y)
myObservable.subscribe(lambda x, y: x - y)
# emit parameters to each observer
myObservable.emit(6, 2)
# get updated values
myObservable.stat() # returns: (8, 3.0, 4, 12)
Based on Jason's answer, I implemented the C#-like events example as a fully-fledged python module including documentation and tests. I love fancy pythonic stuff :)
So, if you want some ready-to-use solution, you can just use the code on github.
Example: twisted log observers
To register an observer yourCallable() (a callable that accepts a dictionary) to receive all log events (in addition to any other observers):
twisted.python.log.addObserver(yourCallable)
Example: complete producer/consumer example
From Twisted-Python mailing list:
#!/usr/bin/env python
"""Serve as a sample implementation of a twisted producer/consumer
system, with a simple TCP server which asks the user how many random
integers they want, and it sends the result set back to the user, one
result per line."""
import random
from zope.interface import implements
from twisted.internet import interfaces, reactor
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
class Producer:
"""Send back the requested number of random integers to the client."""
implements(interfaces.IPushProducer)
def __init__(self, proto, cnt):
self._proto = proto
self._goal = cnt
self._produced = 0
self._paused = False
def pauseProducing(self):
"""When we've produced data too fast, pauseProducing() will be
called (reentrantly from within resumeProducing's transport.write
method, most likely), so set a flag that causes production to pause
temporarily."""
self._paused = True
print('pausing connection from %s' % (self._proto.transport.getPeer()))
def resumeProducing(self):
self._paused = False
while not self._paused and self._produced < self._goal:
next_int = random.randint(0, 10000)
self._proto.transport.write('%d\r\n' % (next_int))
self._produced += 1
if self._produced == self._goal:
self._proto.transport.unregisterProducer()
self._proto.transport.loseConnection()
def stopProducing(self):
pass
class ServeRandom(LineReceiver):
"""Serve up random data."""
def connectionMade(self):
print('connection made from %s' % (self.transport.getPeer()))
self.transport.write('how many random integers do you want?\r\n')
def lineReceived(self, line):
cnt = int(line.strip())
producer = Producer(self, cnt)
self.transport.registerProducer(producer, True)
producer.resumeProducing()
def connectionLost(self, reason):
print('connection lost from %s' % (self.transport.getPeer()))
factory = Factory()
factory.protocol = ServeRandom
reactor.listenTCP(1234, factory)
print('listening on 1234...')
reactor.run()
OP asks "Are there any exemplary examples of the GoF Observer implemented in Python?"
This is an example in Python 3.7. This Observable class meets the requirement of creating a relationship between one observable and many observers while remaining independent of their structure.
from functools import partial
from dataclasses import dataclass, field
import sys
from typing import List, Callable
#dataclass
class Observable:
observers: List[Callable] = field(default_factory=list)
def register(self, observer: Callable):
self.observers.append(observer)
def deregister(self, observer: Callable):
self.observers.remove(observer)
def notify(self, *args, **kwargs):
for observer in self.observers:
observer(*args, **kwargs)
def usage_demo():
observable = Observable()
# Register two anonymous observers using lambda.
observable.register(
lambda *args, **kwargs: print(f'Observer 1 called with args={args}, kwargs={kwargs}'))
observable.register(
lambda *args, **kwargs: print(f'Observer 2 called with args={args}, kwargs={kwargs}'))
# Create an observer function, register it, then deregister it.
def callable_3():
print('Observer 3 NOT called.')
observable.register(callable_3)
observable.deregister(callable_3)
# Create a general purpose observer function and register four observers.
def callable_x(*args, **kwargs):
print(f'{args[0]} observer called with args={args}, kwargs={kwargs}')
for gui_field in ['Form field 4', 'Form field 5', 'Form field 6', 'Form field 7']:
observable.register(partial(callable_x, gui_field))
observable.notify('test')
if __name__ == '__main__':
sys.exit(usage_demo())
A functional approach to observer design:
def add_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
# If this is the first listener, then set up the method wrapper
if not listeners:
listeners = [listener]
setattr(obj, listener_attr, listeners)
# Get the object's method
method = getattr(obj, method_name)
#wraps(method)
def method_wrapper(*args, **kwags):
method(*args, **kwags)
for l in listeners:
l(obj, *args, **kwags) # Listener also has object argument
# Replace the original method with the wrapper
setattr(obj, method_name, method_wrapper)
else:
# Event is already set up, so just add another listener
listeners.append(listener)
def remove_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
if listeners:
# Remove the listener
next((listeners.pop(i)
for i, l in enumerate(listeners)
if l == listener),
None)
# If this was the last listener, then remove the method wrapper
if not listeners:
method = getattr(obj, method_name)
delattr(obj, listener_attr)
setattr(obj, method_name, method.__wrapped__)
These methods can then be used to add a listener to any class method. For example:
class MyClass(object):
def __init__(self, prop):
self.prop = prop
def some_method(self, num, string):
print('method:', num, string)
def listener_method(obj, num, string):
print('listener:', num, string, obj.prop)
my = MyClass('my_prop')
add_listener(my, 'some_method', listener_method)
my.some_method(42, 'with listener')
remove_listener(my, 'some_method', listener_method)
my.some_method(42, 'without listener')
And the output is:
method: 42 with listener
listener: 42 with listener my_prop
method: 42 without listener

Categories

Resources