Creating Set of objects of user defined class in python - python

table = set([])
class GlobeLearningTable(object):
def __init__(self,mac,port,dpid):
self.mac = mac
self.port = port
self.dpid = dpid
def add(self):
global table
if self not in table:
table.add(self)
class LearningSwitch(object):
def __init__ (self, connection, transparent):
self.connection = connection
self.transparent = transparent
self.macToPort = {}
connection.addListeners(self)
self.hold_down_expired = _flood_delay == 0
def _handle_PacketIn (self, event):
packet = event.parsed
self.macToPort[packet.src] = event.port # 1
packet_src = str(packet.src)
packet_mac = packet_src.upper()
entry = GlobeLearningTable(packet_mac, event.port, dpid_to_str(self.connection.dpid))
entry.add()
Problem : entry.add() method adds new object every time it is called and increments the items in the table.
This should not happen because
In the add method I am checking that is that object in the table or not , then I am adding that particular object.
Table is a set which is unordered list, which should not have duplicate objects.
Help: is there any way in this set up I can add the object only when it's not in the table.

You need to implement __eq__ and __hash__ methods to teach Python about how to recognise unique GlobeLearningTable instances.
class GlobeLearningTable(object):
def __init__(self,mac,port,dpid):
self.mac = mac
self.port = port
self.dpid = dpid
def __hash__(self):
return hash((self.mac, self.port, self.dpid))
def __eq__(self, other):
if not isinstance(other, type(self)): return NotImplemented
return self.mac == other.mac and self.port == other.port and self.dpid == other.dpid
Now your object is comparable, and equal objects will also return equal values for __hash__. This lets set and dict objects store your objects efficiently and detect if it is already present:
>>> demo = set([GlobeLearningTable('a', 10, 'b')])
>>> GlobeLearningTable('a', 10, 'b') in demo
True

Related

Assignin object to self inside class method

I want to do a check inside model whether object already exists in DB or not, and if its exists to read all properties from DB and assign to same object.
models.py
class Obj1(models.Model)
name
prop1
prop2
prop3
def obj_check(self, name, prop1):
objects = Obj1.object.filter(name=name, prop1=prop1)
len = len(objects)
if len == 0:
# Object does not exists in DB
self.name = name
self.prop1 = prop1
self.save()
return 0
elif len == 1:
# Object does exists in DB
self = objects[0] # <<< Is it possible to do like this?
return 1
else:
# Something wrong, too many objects in DB
return len
main.py
...
obj = Obj1()
check = obj.obj_check(name, var)
if check == 0:
print("Such object does NOT exists")
print("New object created")
elif check == 1:
print("Such object already exists")
obj.prop3 = "New Value"
else:
print("Something wrong, too many objects in DB")
Is this possible/right to do like this?
self = objects[0]
I know I can use try: .. except Obj1.DoesNotExist: costruction, but wanted to create Object first and make less queries to DB.
Thanks!
No, reassigning self doesn't really do anything. It just reassigns the local variable, it does not modify the object.
You would want to implement this kind of method as a method on the model manager instead. The default manager for an object is the .objects property, but you can also implement your own manager.
Obj1Manager(models.Manager):
def obj_check(self, name, prop1):
qs = self.get_query()
objects = qs.filter(name=name, prop1=prop1)
n = len(objects)
if n == 0:
# Object does not exists in DB, create it
obj = self.create(name=name, prop1=prop1)
return obj, 1
else:
obj = objects.first()
return obj, 0
class Obj1(models.Model):
objects = Obj1Manager()
# ...
# ....
obj, created = Obj1.objects.obj_check(name='foo', prop1='bar')
However, it's worth noting that this already exists as a default manager method: get_or_create.
obj, created = Obj1.objects.get_or_create(name='foo', prop1='bar')

Manager / Container class, how to?

I am currently designing a software which needs to manage a certain hardware setup.
The hardware setup is as following :
System - The system contains two identical devices, and has certain functionality relative to the entire system.
Device - Each device contains two identical sub devices, and has certain functionality relative to both sub devices.
Sub device - Each sub device has 4 configurable entities (Controlled via the same hardware command - thus I don't count them as a sub-sub device).
What I want to achieve :
I want to control all configurable entities via the system manager (the entities are counted in a serial way), meaning I would be able to do the following :
system_instance = system_manager_class(some_params)
system_instance.some_func(0) # configure device_manager[0].sub_device_manager[0].entity[0]
system_instance.some_func(5) # configure device_manager[0].sub_device_manager[1].entity[1]
system_instance.some_func(8) # configure device_manager[1].sub_device_manager[1].entity[0]
What I have thought of doing :
I was thinking of creating an abstract class, which contains all sub device functions (with a call to a conversion function) and have the system_manager, device_manager and sub_device_manager inherit it. Thus all classes will have the same function name and I will be able to access them via the system manager.
Something around these lines :
class abs_sub_device():
#staticmethod
def convert_entity(self):
sub_manager = None
sub_entity_num = None
pass
def set_entity_to_2(entity_num):
sub_manager, sub_manager_entity_num = self.convert_entity(entity_num)
sub_manager.some_func(sub_manager_entity_num)
class system_manager(abs_sub_device):
def __init__(self):
self.device_manager_list = [] # Initiliaze device list
self.device_manager_list.append(device_manager())
self.device_manager_list.append(device_manager())
def convert_entity(self, entity_num):
relevant_device_manager = self.device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_device_manage, relevant_entity
class device_manager(abs_sub_device):
def __init__(self):
self.sub_device_manager_list = [] # Initiliaze sub device list
self.sub_device_manager_list.append(sub_device_manager())
self.sub_device_manager_list.append(sub_device_manager())
def convert_entity(self, entity_num):
relevant_sub_device_manager = self.sub_device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_sub_device_manager, relevant_entity
class sub_device_manager(abs_sub_device):
def __init__(self):
self.entity_list = [0] * 4
def set_entity_to_2(self, entity_num):
self.entity_list[entity_num] = 2
The code is for generic understanding of my design, not for actual functionality.
The problem :
It seems to me that the system I am trying to design is really generic and that there must be a built-in python way to do this, or that my entire object oriented look at it is wrong.
I would really like to know if some one has a better way of doing this.
After much thinking, I think I found a pretty generic way to solve the issue, using a combination of decorators, inheritance and dynamic function creation.
The main idea is as following :
1) Each layer dynamically creates all sub layer relevant functions for it self (Inside the init function, using a decorator on the init function)
2) Each function created dynamically converts the entity value according to a convert function (which is a static function of the abs_container_class), and calls the lowers layer function with the same name (see make_convert_function_method).
3) This basically causes all sub layer function to be implemented on the higher level with zero code duplication.
def get_relevant_class_method_list(class_instance):
method_list = [func for func in dir(class_instance) if callable(getattr(class_instance, func)) and not func.startswith("__") and not func.startswith("_")]
return method_list
def make_convert_function_method(name):
def _method(self, entity_num, *args):
sub_manager, sub_manager_entity_num = self._convert_entity(entity_num)
function_to_call = getattr(sub_manager, name)
function_to_call(sub_manager_entity_num, *args)
return _method
def container_class_init_decorator(function_object):
def new_init_function(self, *args):
# Call the init function :
function_object(self, *args)
# Get all relevant methods (Of one sub class is enough)
method_list = get_relevant_class_method_list(self.container_list[0])
# Dynamically create all sub layer functions :
for method_name in method_list:
_method = make_convert_function_method(method_name)
setattr(type(self), method_name, _method)
return new_init_function
class abs_container_class():
#staticmethod
def _convert_entity(self):
sub_manager = None
sub_entity_num = None
pass
class system_manager(abs_container_class):
#container_class_init_decorator
def __init__(self):
self.device_manager_list = [] # Initiliaze device list
self.device_manager_list.append(device_manager())
self.device_manager_list.append(device_manager())
self.container_list = self.device_manager_list
def _convert_entity(self, entity_num):
relevant_device_manager = self.device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_device_manager, relevant_entity
class device_manager(abs_container_class):
#container_class_init_decorator
def __init__(self):
self.sub_device_manager_list = [] # Initiliaze sub device list
self.sub_device_manager_list.append(sub_device_manager())
self.sub_device_manager_list.append(sub_device_manager())
self.container_list = self.sub_device_manager_list
def _convert_entity(self, entity_num):
relevant_sub_device_manager = self.sub_device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_sub_device_manager, relevant_entity
class sub_device_manager():
def __init__(self):
self.entity_list = [0] * 4
def set_entity_to_value(self, entity_num, required_value):
self.entity_list[entity_num] = required_value
print("I set the entity to : {}".format(required_value))
# This is used for auto completion purposes (Using pep convention)
class auto_complete_class(system_manager, device_manager, sub_device_manager):
pass
system_instance = system_manager() # type: auto_complete_class
system_instance.set_entity_to_value(0, 3)
There is still a little issue with this solution, auto-completion would not work since the highest level class has almost no static implemented function.
In order to solve this I cheated a bit, I created an empty class which inherited from all layers and stated to the IDE using pep convention that it is the type of the instance being created (# type: auto_complete_class).
Does this solve your Problem?
class EndDevice:
def __init__(self, entities_num):
self.entities = list(range(entities_num))
#property
def count_entities(self):
return len(self.entities)
def get_entity(self, i):
return str(i)
class Device:
def __init__(self, sub_devices):
self.sub_devices = sub_devices
#property
def count_entities(self):
return sum(sd.count_entities for sd in self.sub_devices)
def get_entity(self, i):
c = 0
for index, sd in enumerate(self.sub_devices):
if c <= i < sd.count_entities + c:
return str(index) + " " + sd.get_entity(i - c)
c += sd.count_entities
raise IndexError(i)
SystemManager = Device # Are the exact same. This also means you can stack that infinite
sub_devices1 = [EndDevice(4) for _ in range(2)]
sub_devices2 = [EndDevice(4) for _ in range(2)]
system_manager = SystemManager([Device(sub_devices1), Device(sub_devices2)])
print(system_manager.get_entity(0))
print(system_manager.get_entity(5))
print(system_manager.get_entity(15))
I can't think of a better way to do this than OOP, but inheritance will only give you one set of low-level functions for the system manager, so it wil be like having one device manager and one sub-device manager. A better thing to do will be, a bit like tkinter widgets, to have one system manager and initialise all the other managers like children in a tree, so:
system = SystemManager()
device1 = DeviceManager(system)
subDevice1 = SubDeviceManager(device1)
device2 = DeviceManager(system)
subDevice2 = SubDeviceManager(device2)
#to execute some_func on subDevice1
system.some_func(0, 0, *someParams)
We can do this by keeping a list of 'children' of the higher-level managers and having functions which reference the children.
class SystemManager:
def __init__(self):
self.children = []
def some_func(self, child, *params):
self.children[child].some_func(*params)
class DeviceManager:
def __init__(self, parent):
parent.children.append(self)
self.children = []
def some_func(self, child, *params):
self.children[child].some_func(*params)
class SubDeviceManager:
def __init__(self, parent):
parent.children.append(self)
#this may or may not have sub-objects, if it does we need to make it its own children list.
def some_func(self, *params):
#do some important stuff
Unfortunately, this does mean that if we want to call a function of a sub-device manager from the system manager without having lots of dots, we will have to define it again again in the system manager. What you can do instead is use the built-in exec() function, which will take in a string input and run it using the Python interpreter:
class SystemManager:
...
def execute(self, child, function, *args):
exec("self.children[child]."+function+"(*args)")
(and keep the device manager the same)
You would then write in the main program:
system.execute(0, "some_func", 0, *someArgs)
Which would call
device1.some_func(0, someArgs)
Here's what I'm thinking:
SystemManager().apply_to_entity(entity_num=7, lambda e: e.value = 2)
class EntitySuperManagerMixin():
"""Mixin to handle logic for managing entity managers."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # Supports any kind of __init__ call.
self._entity_manager_list = []
def apply_to_entity(self, entity_num, action):
relevant_entity_manager = self._entity_manager_list[index // 4]
relevant_entity_num = index % 4
return relevant_entity_manager.apply_to_entity(
relevant_entity_num, action)
class SystemManager(EntitySuperManagerMixin):
def __init__(self):
super().__init__()
# An alias for _entity_manager_list to improve readability.
self.device_manager_list = self._entity_manager_list
self.device_manager_list.extend(DeviceManager() for _ in range(4))
class DeviceManager(EntitySuperManagerMixin):
def __init__(self):
super().__init__()
# An alias for _entity_manager_list to improve readability.
self.sub_device_manager_list = self._entity_manager_list
self.sub_device_manager_list.extend(SubDeviceManager() for _ in range(4))
class SubDeviceManager():
"""Manages entities, not entity managers, thus doesn't inherit the mixin."""
def __init__(self):
# Entities need to be classes for this idea to work.
self._entity_list = [Entity() for _ in range(4)]
def apply_to_entity(self, entity_num, action):
return action(self._entity_list[entity_num])
class Entity():
def __init__(self, initial_value=0):
self.value = initial_value
With this structure:
Entity-specific functions can stay bound to the Entity class (where it belongs).
Manager-specific code needs to be updated in two places: EntitySuperManagerMixin and the lowest level manager (which would need custom behavior anyway since it deals with the actual entities, not other managers).
The way i see it if you want to dynamically configure different part of system you need some sort of addressing so if you input an ID or address with some parameter the system will know with address on which sub sistem you are talking about and then configure that system with parameter.
OOP is quite ok for that and then you can easily manipulate such data via bitwise operators.
So basic addressing is done via binary system , so to do that in python you need first to implement an address static attribute to your class with perhaps some basic further detailing if system grows.
Basic implementation of addres systems is as follows:
bin(71)
1010 1011
and if we divide it into nibbles
1010 - device manager 10
1011 - sub device manager 11
So in this example we have system of 15 device managers and 15 sub device menagers, and every device and sub device manager has its integer address.So let's say you want to access device manager no10 with sub device manager no11. You would need their address which is in binary 71 and you would go with:
system.config(address, parameter )
Where system.config funcion would look like this:
def config(self,address, parameter):
device_manager = (address&0xF0)>>4 #10
sub_device_manager = address&0xf # 11
if device_manager not in range(self.devices): raise LookupError("device manager not found")
if sub_device_manager not in range(self.devices[device_manager].device): raise LookupError("sub device manager not found")
self.devices[device_manager].device[sub_device_manager].implement(parameter)
In layman you would tell system that sub_device 11 from device 10 needs configuration with this parameter.
So how would this setup look in python inheritance class of some base class of system that could be then composited/inherited to different classes:
class systems(object):
parent = None #global parent element, defaults to None well for simplicity
def __init__(self):
self.addrMASK = 0xf # address mask for that nibble
self.addr = 0x1 # default address of that element
self.devices = [] # list of instances of device
self.data = { #some arbitrary data
"param1":"param_val",
"param2":"param_val",
"param3":"param_val",
}
def addSubSystem(self,sub_system): # connects elements to eachother
# checks for valiability
if not isinstance(sub_system,systems):
raise TypeError("defined input is not a system type") # to prevent passing an integer or something
# appends a device to system data
self.devices.append(sub_system)
# search parent variables from sub device manager to system
obj = self
while 1:
if obj.parent is not None:
obj.parent.addrMASK<<=4 #bitshifts 4 bits
obj.parent.addr <<=4 #bitshifts 4 bits
obj = obj.parent
else:break
#self management , i am lazy guy so i added this part so i wouldn't have to reset addresses manualy
self.addrMASK <<=4 #bitshifts 4 bits
self.addr <<=4 #bitshifts 4 bits
# this element is added so the obj address is coresponding to place in list, this could be done more eloquently but i didn't know what are your limitations
if not self.devices:
self.devices[ len(self.devices)-1 ].addr +=1
self.devices[ len(self.devices)-1 ].parent = self
# helpful for checking data ... gives the address of system
def __repr__(self):
return "system at {0:X}, {1:0X}".format(self.addr,self.addrMASK)
# extra helpful lists data as well
def __str__(self):
data = [ '{} : {}\n'.format(k,v) for k,v in self.data.items() ]
return " ".join([ repr(self),'\n',*data ])
#checking for data, skips looping over sub systems
def __contains__(self,system_index):
return system_index-1 in range(len(self.data))
# applying parameter change -- just an example
def apply(self,par_dict):
if not isinstance(par_dict,dict):
raise TypeError("parameter must be a dict type")
if any( key in self.data.keys() for key in par_dict.keys() ):
for k,v in par_dict.items():
if k in self.data.keys():
self.data[k]=v
else:pass
else:pass
# implementing parameters trough addresses
def implement(self,address,parameter_dictionary):
if address&self.addrMASK==self.addr:
if address-self.addr!=0:
item = (address-self.addr)>>4
self.devices[item-1].implement( address-self.addr,parameter_dictionary )
else:
self.apply(parameter_dictionary)
a = systems()
b = systems()
a.addSubSystem(b)
c = systems()
b.addSubSystem(c)
print('a')
print(a)
print('')
print('b')
print(b)
print('')
print('c')
print(c)
print('')
a.implement(0x100,{"param1":"a"})
a.implement(0x110,{"param1":"b"})
a.implement(0x111,{"param1":"c"})
print('a')
print(a)
print('')
print('b')
print(b)
print('')
print('c')
print(c)
print('')

Best way to do many try-except blocks within Python class assignments?

I have a class which is pulling JSON data with keys, but the problem is that per instance of this class, the JSON data may not have keys for everything I am trying to grab. Currently, my class is set up like this:
class Show():
def __init__(self, data):
self.data = data
self.status = self.data['status']
self.rating = self.data['rating']
self.genres = self.data['genres']
self.weight = self.data['weight']
self.updated = self.data['updated']
self.name = self.data['name']
self.language = self.data['language']
self.schedule = self.data['schedule']
self.url = self.data['url']
self.image = self.data['image']
And so on, there are more parameters than that. I'm trying to avoid the messiness of having a try-except block for EACH AND EVERY one of those (27) lines. Is there a better way? Ultimately, I want a parameter to be assigned None if the JSON key doesn't exist.
If you're going to set a default value to the attribute if it's not in the data dictionary, use data.get('key') rather than data['key']. The get method will return None if the key does not exist, rather than raising a KeyError exception. If you want a different default value than None, you can pass a second argument to get and that is what will be returned.
So, your code could become:
class Show():
def __init__(self, data):
self.data = data
self.status = self.data.get('status')
self.rating = self.data.get('rating')
self.genres = self.data.get('genres')
self.weight = self.data.get('weight')
self.updated = self.data.get('updated')
self.name = self.data.get('name')
self.language = self.data.get('language')
self.schedule = self.data.get('schedule')
self.url = self.data.get('url')
self.image = self.data.get('image')
Use dict.get, which provides a default value instead of raising an exception for missing keys.
For example, you can change this:
self.status = self.data['status']
into this:
self.status = self.data.get('status')
You could change your code to something like:
class Show():
def __init__(self, data):
self.data = data
self.__dict__.update(data)
data = {'status': True, 'ratings': [1,2,3], 'foo': "blahblah"}
aShow = Show(data)
"""
>>> aShow.status
True
>>> aShow.ratings
[1,2,3]
>>> aShow.something_not_in_dict
AttributeError: Show instance has no attribute 'something_not_in_dict'
"""
Which does exactly the same, and trying to access something from your Show instance that isn't a key in your data dictionary would raise an AttributeError

using object in whole module. Python

When I create an object in some method, I can't use it in any other method. So the use of the object is limited just to that method. But I would like to create the object somehow, that could use it in my whole module.
Here is the code of the module in which I want to create the object so I could use it in every method. (It's not so important what it should do, but for those who cares, it'll be network configurator which using netlink socket to communicate with the kernel).
In the method configureBridge() (the 4th method from the beginning) I tried to create an object and use it (ip = IPRoute() ... ip.release()) and it worked, but I couldn't use the object variable ip in any other function apart from configureBridge(). Could someone help me with that?
class PyrouteTwo(Configurator):
def __init__(self, inRollback=False):
super(PyrouteTwo, self).__init__(ConfigApplier(), inRollback)
self.runningConfig = RunningConfig()
logging.debug("testmark.PyR2.init")
def begin(self):
if self.configApplier is None:
self.configApplier = ConfigApplier()
if self.runningConfig is None:
self.runningConfig = RunningConfig()
logging.debug("testmark.PyR2.begin")
def commit(self):
self.configApplier = None
self.runningConfig.save()
self.runningConfig = None
logging.debug("testmark.PyR2.commit")
def configureBridge(self, bridge, **opts):
self.configApplier.addBridge(bridge)
if bridge.port:
bridge.port.configure(**opts)
self.configApplier.addBridgePort(bridge)
self.configApplier.setIfaceConfigAndUp(bridge)
logging.debug("testmark.PyR2.confBridge..")
# I am using the object here:
ip = IPRoute(fork=True)
dev = ip.link_lookup(ifname='em1')[0]
logging.debug("pyroute2 link_lookup output: %d", dev)
ip.release()
# there are some similar functions like configureVAN etc. in which I want
# to use the object
class ConfigApplier(object):
def _setIpConfig(self, iface):
ipConfig = iface.ipConfig
logging.debug("testmark.PyR2.ConfApplier.setIpConf.")
if ipConfig.ipaddr:
self.removeIpConfig(iface)
ipwrapper.addrAdd(iface.name, ipConfig.ipaddr,
ipConfig.netmask)
if ipConfig.gateway and ipConfig.defaultRoute:
ipwrapper.routeAdd(['default', 'via', ipConfig.gateway])
def removeIpConfig(self, iface):
ipwrapper.addrFlush(iface.name)
def setIfaceMtu(self, iface, mtu):
ipwrapper.linkSet(iface, ['mtu', str(mtu)])
def ifup(self, iface):
ipwrapper.linkSet(iface.name, ['up'])
if iface.ipConfig.bootproto == 'dhcp':
dhclient = DhcpClient(iface.name)
dhclient.start(iface.ipConfig.async)
def ifdown(self, iface):
ipwrapper.linkSet(iface.name, ['down'])
dhclient = DhcpClient(iface.name)
dhclient.shutdown()
def setIfaceConfigAndUp(self, iface):
if iface.ip:
self._setIpConfig(iface)
if iface.mtu:
self.setIfaceMtu(iface.name, iface.mtu)
self.ifup(iface)
def addBridge(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'addbr', bridge.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFUP, err)
def addBridgePort(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'addif', bridge.name,
bridge.port.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFUP, err)
def removeBridge(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'delbr', bridge.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFDOWN, err)
logging.debug("testmarkPyR2.ConfApplier.remBridge")
# ...
def createLibvirtNetwork(self, network, bridged, iface, qosInbound=None,
qosOutbound=None):
netXml = libvirtCfg.createNetworkDef(network, bridged, iface,
qosInbound, qosOutbound)
libvirtCfg.createNetwork(netXml)
logging.debug("testmarkPyR2.ConfApplier.createLibvirtNetwork")
def removeLibvirtNetwork(self, network):
libvirtCfg.removeNetwork(network)
logging.debug("testmarkPyR2.ConfApplier.remLibvirtNetwork")
You can either declare it as object specific attibute by doing -
self.ip = value # Now its a object specific variable
or make it a class veriable before assign it a value.
class PyrouteTwo(Configurator):
ip = None # Now its a class variable

getting test class instance in nose test object to sync up with instance in decorated generator target

I've been working on a way to get tests produced from a generator in nose to have descriptions that are customized for the specific iteration being tested. I have something that works, as long as my generator target method never tries to access self from my generator class. I'm seeing that all my generator target instances have a common test class instance while nose is generating a one-offed instance of the test class for each test run from the generator. This is resulting in setUp being run on each test instance nose creates, but never running on the instance the generator target is bound to (of course, the real problem is that I can't see how to bind the nose-created instance to the generator target). Here's the code I'm using to try to figure this all out (yes, I know the decorator would probably be better as a callable class, but nose, at least version 1.2.1 that I have, explicitly checks that tests are either functions or methods, so a callable class won't run at all):
import inspect
def labelable_yielded_case(case):
argspec = inspect.getargspec(case)
if argspec.defaults is not None:
defaults_list = [''] * (len(argspec.args) - len(argspec.defaults)) + argspec.defaults
else:
defaults_list = [''] * len(argspec.args)
argument_defaults_list = zip(argspec.args, defaults_list)
case_wrappers = []
def add_description(wrapper_id, argument_dict):
case_wrappers[wrapper_id].description = case.__doc__.format(**argument_dict)
def case_factory(*factory_args, **factory_kwargs):
def case_wrapper_wrapper():
wrapper_id = len(case_wrappers)
def case_wrapper(*args, **kwargs):
args = factory_args + args
argument_list = []
for argument in argument_defaults_list:
argument_list.append(list(argument))
for index, value in enumerate(args):
argument_list[index][1] = value
argument_dict = dict(argument_list)
argument_dict.update(factory_kwargs)
argument_dict.update(kwargs)
add_description(wrapper_id, argument_dict)
return case(*args, **kwargs)
case_wrappers.append(case_wrapper)
case_wrapper.__name__ = case.__name__
return case_wrapper
return case_wrapper_wrapper()
return case_factory
class TestTest(object):
def __init__(self):
self.data = None
def setUp(self):
print 'setup', self
self.data = (1,2,3)
def test_all(self):
for index, value in enumerate((1,2,3)):
yield self.validate_equality(), index, value
def test_all_again(self):
for index, value in enumerate((1,2,3)):
yield self.validate_equality_again, index, value
#labelable_yielded_case
def validate_equality(self, index, value):
'''element {index} equals {value}'''
print 'test', self
assert self.data[index] == value, 'expected %d got %d' % (value, self.data[index])
def validate_equality_again(self, index, value):
print 'test', self
assert self.data[index] == value, 'expected %d got %d' % (value, self.data[index])
validate_equality_again.description = 'again'
When run through nose, the again tests work just fine, but the set of tests using the decorated generator target all fail because self.data is None (because setUp is never run because the instance of TestTest stored in the closures is not the instances run by nose). I tried making the decorator an instance member of a base class for TestTest, but then nose threw errors about having too few arguments (no self) passed to the unbound labelable_yielded_case. Is there any way I can make this work (short of hacking nose), or am I stuck choosing between either not being able to have the yield target be an instance member or not having per-test labeling for each yielded test?
Fixed it (at least for the case here, though I think I got it for all cases). I had to fiddle with case_wrapper_wrapper and case_wrapper to get the factory to return the wrapped cases attached to the correct class, but not bound to any given instance in any way. I also had another code issue because I was building the argument dict in wrapper wrapper, but then not passing it to the case. Working code:
import inspect
def labelable_yielded_case(case):
argspec = inspect.getargspec(case)
if argspec.defaults is not None:
defaults_list = [''] * (len(argspec.args) - len(argspec.defaults)) + argspec.defaults
else:
defaults_list = [''] * len(argspec.args)
argument_defaults_list = zip(argspec.args, defaults_list)
case_wrappers = []
def add_description(wrapper_id, argument_dict):
case_wrappers[wrapper_id].description = case.__doc__.format(**argument_dict)
def case_factory(*factory_args, **factory_kwargs):
def case_wrapper_wrapper():
wrapper_id = len(case_wrappers)
def case_wrapper(*args, **kwargs):
argument_list = []
for argument in argument_defaults_list:
argument_list.append(list(argument))
for index, value in enumerate(args):
argument_list[index][1] = value
argument_dict = dict(argument_list)
argument_dict.update(kwargs)
add_description(wrapper_id, argument_dict)
return case(**argument_dict)
case_wrappers.append(case_wrapper)
case_name = case.__name__ + str(wrapper_id)
case_wrapper.__name__ = case_name
if factory_args:
setattr(factory_args[0].__class__, case_name, case_wrapper)
return getattr(factory_args[0].__class__, case_name)
else:
return case_wrapper
return case_wrapper_wrapper()
return case_factory
class TestTest(object):
def __init__(self):
self.data = None
def setUp(self):
self.data = (1,2,3)
def test_all(self):
for index, value in enumerate((1,2,3)):
yield self.validate_equality(), index, value
#labelable_yielded_case
def validate_equality(self, index, value):
'''element {index} equals {value}'''
assert self.data[index] == value, 'expected %d got %d' % (value, self.data[index])

Categories

Resources