Error in decorating a classmethod - python

I was working on a decorator that decorates the class. It woks fine for instance methods but gives an TypeError for class method. The code is as below:
def deco_method(fn):
def wrapper(*arg, **kwarg):
"""
Function: Wrapper
"""
print "Calling function {}".format(fn.__name__)
print arg, kwarg
ret_val = fn(*arg, **kwarg)
print "Executed function {}".format(fn.__name__)
return ret_val
return wrapper
def clsdeco(cls):
attributes = cls.__dict__.keys()
for attribute in attributes:
# Do not decorate private methods
if '__' in attribute:
continue
# Get the method
value = getattr(cls, attribute)
if not hasattr(value, '__call__'):
continue
# CHeck if method is a class method or normal method and decoate accordingly
if value.im_self is None:# non class method
setattr(cls, attribute, deco_method(value))
elif value.im_self is cls: # CHeck if the method is class method
setattr(cls, attribute, classmethod(deco_method(value)))
else:
assert False
return cls # return decorated class
#clsdeco
class Person:
message = "Hi Man"
def __init__(self, first_name, last_name):
self.fname = first_name
self.lname = last_name
self.age = None
def get_name(self):
print "Name is '{} {}'".format(self.fname, self.lname)
#classmethod
def greet_person(cls):
print cls.message
p = Person('John', 'snow')
p.greet_person()
It gives an error:
TypeError: greet_person() takes exactly 1 argument (2 given)
If i remove #clsdeco, it works perfectly fine.
Any idea what i am missing here?

If you add the line shown it will work. This is because the #classmethod decorator applied in the class definition changes what getattr(cls, attribute) returns—it will be a descriptor for the named method which adds the cls argument and then calls the real method.
What you need to do is retrieve the "raw" value of the attribute which is just a regular function and then turn it back into a class method by explicitly calling classmethod. This needed "raw" value is stored in the class dictionary __dict__ associated with the same attribute name, hence the need for adding the value = cls.__dict__[attribute].__func__ line.
Something similar will also be required to handle static methods properly. How to do this for all the different types of methods is described in this answer to the question Decorating a method that's already a classmethod? Some of the other answers also describe what's going on in more detail than I have here.
def clsdeco(cls):
attributes = cls.__dict__.keys()
for attribute in attributes:
# Do not decorate private methods
if '__' in attribute:
continue
# Get the method
value = getattr(cls, attribute)
if not hasattr(value, '__call__'):
continue
# Check if method is a class method or normal method and decoate accordingly
if value.im_self is None:# non class method
setattr(cls, attribute, deco_method(value))
elif value.im_self is cls: # Check if the method is class method
value = cls.__dict__[attribute].__func__ # ADDED
setattr(cls, attribute, classmethod(deco_method(value)))
else:
assert False
return cls # return decorated class

Related

Python: Dynamically add properties to class instance, properties return function value with inputs

I've been going through all the Stackoverflow answers on dynamic property setting, but for whatever reason I can't seem to get this to work.
I have a class, Evolution_Base, that in its init creates an instance of Value_Differences. Value_Differences should be dynamically creating properties, based on the list I pass, that returns the function value from _get_df_change:
from pandas import DataFrame
from dataclasses import dataclass
import pandas as pd
class Evolution_Base():
def __init__(self, res_date_0 : DataFrame , res_date_1 : DataFrame):
#dataclass
class Results_Data():
res_date_0_df : DataFrame
res_date_1_df : DataFrame
self.res = Results_Data(res_date_0_df= res_date_0,
res_date_1_df= res_date_1)
property_list = ['abc', 'xyz']
self.difference = Value_Differences(parent = self, property_list=property_list)
# Shared Functions
def _get_df_change(self, df_name, operator = '-'):
df_0 = getattr(self.res.res_date_0_df, df_name.lower())
df_1 = getattr(self.res.res_date_1_df, df_name.lower())
return self._df_change(df_1, df_0, operator=operator)
def _df_change(self, df_1 : pd.DataFrame, df_0 : pd.DataFrame, operator = '-') -> pd.DataFrame:
"""
Returns df_1 <operator | default = -> df_0
"""
# is_numeric mask
m_1 = df_1.select_dtypes('number')
m_0 = df_0.select_dtypes('number')
def label_me(x):
x.columns = ['t_1', 't_0']
return x
if operator == '-':
return label_me(df_1[m_1] - df_0[m_0])
elif operator == '+':
return label_me(df_1[m_1] + df_0[m_0])
class Value_Differences():
def __init__(self, parent : Evolution_Base, property_list = []):
self._parent = parent
for name in property_list:
def func(self, prop_name):
return self._parent._get_df_change(name)
# I've tried the following...
setattr(self, name, property(fget = lambda cls_self: func(cls_self, name)))
setattr(self, name, property(func(self, name)))
setattr(self, name, property(func))
Its driving me nuts... Any help appreciated!
My desired outcome is for:
evolution = Evolution_Base(df_1, df_2)
evolution.difference.abc == evolution._df_change('abc')
evolution.difference.xyz == evolution._df_change('xyz')
EDIT: The simple question is really, how do I setattr for a property function?
As asked
how do I setattr for a property function?
To be usable as a property, the accessor function needs to be wrapped as a property and then assigned as an attribute of the class, not the instance.
That function, meanwhile, needs to have a single unbound parameter - which will be an instance of the class, but is not necessarily the current self. Its logic needs to use the current value of name, but late binding will be an issue because of the desire to create lambdas in a loop.
A clear and simple way to work around this is to define a helper function accepting the Value_Differences instance and the name to use, and then bind the name value eagerly.
Naively:
from functools import partial
def _get_from_parent(name, instance):
return instance._parent._get_df_change(name)
class Value_Differences:
def __init__(self, parent: Evolution_Base, property_list = []):
self._parent = parent
for name in property_list:
setattr(Value_Differences, name, property(
fget = partial(_get_from_parent, name)
))
However, this of course has the issue that every instance of Value_Differences will set properties on the class, thus modifying what properties are available for each other instance. Further, in the case where there are many instances that should have the same properties, the setup work will be repeated at each instance creation.
The apparent goal
It seems that what is really sought, is the ability to create classes dynamically, such that a list of property names is provided and a corresponding class pops into existence, with code filled in for the properties implementing a certain logic.
There are multiple approaches to this.
Factory A: Adding properties to an instantiated template
Just like how functions can be nested within each other and the inner function will be an object that can be modified and returned (as is common when creating a decorator), a class body can appear within a function and a new class object (with the same name) is created every time the function runs. (The code in the OP already does this, for the Results_Data dataclass.)
def example():
class Template:
pass
return Template
>>> TemplateA, TemplateB = example(), example()
>>> TemplateA is TemplateB
False
>>> isinstance(TemplateA(), TemplateB)
False
>>> isinstance(TemplateB(), TemplateA)
False
So, a "factory" for value-difference classes could look like
from functools import partial
def _make_value_comparer(property_names, access_func):
class ValueDifferences:
def __init__(self, parent):
self._parent = parent
for name in property_names:
setattr(Value_Differences, name, property(
fget = partial(access_func, name)
))
return ValueDifferences
Notice that instead of hard-coding a helper, this factory expects to be provided with a function that implements the access logic. That function takes two parameters: a property name, and the ValueDifferences instance. (They're in that order because it's more convenient for functools.partial usage.)
Factory B: Using the type constructor directly
The built-in type in Python has two entirely separate functions.
With one argument, it discloses the type of an object.
With three arguments, it creates a new type. The class syntax is in fact syntactic sugar for a call to this builtin. The arguments are:
a string name (will be set as the __name__ attribute)
a list of classes to use as superclasses (will be set as __bases__)
a dict mapping attribute names to their values (including methods and properties - will become the __dict__, roughly)
In this style, the same factory could look something like:
from functools import partial
def _make_value_comparer(property_names, access_func):
methods = {
name: property(fget = partial(access_func, name)
for name in property_names
}
methods['__init__'] = lambda self, parent: setattr(self, '_parent', parent)
return type('ValueDifferences', [], methods)
Using the factory
In either of the above cases, EvolutionBase would be modified in the same way.
Presumably, every EvolutionBase should use the same ValueDifferences class (i.e., the one that specifically defines abc and xyz properties), so the EvolutionBase class can cache that class as a class attribute, and use it later:
class Evolution_Base():
def _get_from_parent(name, mvd):
# mvd._parent will be an instance of Evolution_Base.
return mvd._parent._get_df_change(name)
_MyValueDifferences = _make_value_comparer(['abc', 'xyz'], _get_from_parent)
def __init__(self, res_date_0 : DataFrame , res_date_1 : DataFrame):
#dataclass
class Results_Data():
res_date_0_df : DataFrame
res_date_1_df : DataFrame
self.res = Results_Data(res_date_0_df= res_date_0,
res_date_1_df= res_date_1)
self.difference = _MyValueDifferences(parent = self)
Notice that the cached _MyValueDifferences class no longer requires a list of property names to be constructed. That's because it was already provided when the class was created. The actual thing that varies per instance of _MyValueDifferences, is the parent, so that's all that gets passed.
Simpler approaches
It seems that the goal is to have a class whose instances are tightly associated with instances of Evolution_Base, providing properties specifically named abc and xyz that are computed using the Evolution_Base's data.
That could just be hard-coded as a nested class:
class Evolution_Base:
class EBValueDifferences:
def __init__(self, parent):
self._parent = parent
#property
def abc(self):
return self._parent._get_df_change('abc')
#property
def xyz(self):
return self._parent._get_df_change('xyz')
def __init__(self, res_date_0 : DataFrame , res_date_1 : DataFrame):
#dataclass
class Results_Data():
res_date_0_df : DataFrame
res_date_1_df : DataFrame
self.res = Results_Data(res_date_0_df = res_date_0,
res_date_1_df = res_date_1)
self.difference = EBValueDifferences(self)
# _get_df_change etc. as before
Even simpler, provide corresponding properties directly on Evolution_Base:
class Evolution_Base:
#property
def abc_difference(self):
return self._get_df_change('abc')
#property
def xyz_difference(self):
return self._get_df_change('xyz')
def __init__(self, res_date_0 : DataFrame , res_date_1 : DataFrame):
#dataclass
class Results_Data():
res_date_0_df : DataFrame
res_date_1_df : DataFrame
self.res = Results_Data(res_date_0_df = res_date_0,
res_date_1_df = res_date_1)
# _get_df_change etc. as before
# client code now calls my_evolution_base.abc_difference
# instead of my_evolution_base.difference.abc
If there are a lot of such properties, they could be attached using a much simpler dynamic approach (that would still be reusable for other classes that define a _get_df_change):
def add_df_change_property(name, cls):
setattr(
cls, f'{name}_difference',
property(fget = lambda instance: instance._get_df_change(name))
)
which can also be adapted for use as a decorator:
from functools import partial
def exposes_df_change(name):
return partial(add_df_change_property, name)
#exposes_df_change('abc')
#exposes_df_change('def')
class Evolution_Base:
# `self.difference` can be removed, no other changes needed
This is quite the rabbit hole. Impossible is a big call, but I will say this: they don't intend you to do this. The 'Pythonic' way of achieving your example use case is the __getattr__ method. You could also override the __dir__ method to insert your custom attributes for discoverability.
This is the code for that:
class Value_Differences():
def __init__(self, parent : Evolution_Base, property_list = []):
self._parent = parent
self._property_list = property_list
def __dir__(self):
return sorted(set(
dir(super(Value_Differences, self)) + \
list(self.__dict__.keys()) + self._property_list))
def __getattr__(self, __name: str):
if __name in self._property_list:
return self._parent._get_df_change(__name)
But that wasn't the question, and respect for a really, really interesting question. This is one of those things that you look at and say 'hmm, should be possible' and can get almost to a solution. I initially thought what you asked for was technically possible, just very hacky to achieve. But it turns out that it would be very, very weird hackery if it was possible.
Two small foundational things to start with:
Remind ourselves of the hierarchy of Python objects that the runtime is working with when defining and instantiating classes:
The metaclass (defaulting to type), which is used to build classes. I'm going to refer to this as the Metaclass Type Object (MTO).
The class definition, which is used to build objects. I'm going to refer to this as the Class Type Object (CTO).
And the class instance or object, which I'll refer to as the Class Instance Object (CIO).
MTOs are subclasses of type. CTOs are subclasses of object. CIOs are instances of CTOs, but instantiated by MTOs.
Python runs code inside class definitions as if it was running a function:
class Class1:
print("1")
def __init__(self, v1):
print("4")
print("2")
print("3")
c1 = Class1("x")
print("5")
gives 1, 2, 3, 4, 5
Put these two things together with:
class Class1:
def attr1_get(self):
return 'attr1 value'
attr1 = property(attr1_get)
we are defining a function attr1_get as part of the class definition. We are then running an inline piece of code that creates an object of type property. Note that this is just the name of the object's type - it isn't a property as you would describe it. Just an object with some attributes, being references to various functions. We then assign that object to an attribute in the class we are defining.
In the terms I used above, once that code is run we have a CTO instantiated as an object in memory that contains an attribute attr1 of type property (an object subclass, containing a bunch of attributes itself - one of which is a reference to the function attr1_get).
That can be used to instantiate an object, the CIO.
This is where the MTO comes in. You instantiate the property object while defining the CTO so that when the runtime applies the MTO to create the CIO from the CTO, an attribute on the CIO will be formed with a custom getter function for that attribute rather than the 'standard' getter function the runtime would use. The property object means something to the type object when it is building a new object.
So when we run:
c1 = Class1()
we don't get a CIO c1 with an attribute attr1 that is an object of type property. The metaclass of type type formed a set of references against the attribute's internal state to all the functions we stored in the property object. Note that this is happening inside the runtime, and you can't call this directly from your code - you just tell the type metaclass to do it by using the property wrapper object.
So if you directly assign a property() result to an attribute of a CIO, you have a Pythonic object assigned that references some functions, but the internal state for the runtime to use to reference the getter, setter, etc. is not set up. The getter of an attribute that contains a property object is the standard getter and so returns the object instance, and not the result of the functions it wraps,
This next bit of code demonstrates how this flows:
print("Let's begin")
class MetaClass1(type):
print("Starting to define MetaClass1")
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
print("Metaclass1 __new__({})".format(str(cls)))
return x
print("__new__ of MetaClass1 is defined")
def __init__(cls, name, bases, dct):
print("Metaclass1 __init__({})".format(str(cls)))
print("__init__ of MetaClass1 is defined")
print("Metaclass is defined")
class Class1(object,metaclass=MetaClass1):
print("Starting to define Class1")
def __new__(cls, *args, **kwargs):
print("Class1 __new__({})".format(str(cls)))
return super(Class1, cls).__new__(cls, *args, **kwargs)
print("__new__ of Class1 is defined")
def __init__(self):
print("Class1 __init__({})".format(str(self)))
print("__init__ of Class1 is defined")
def g1(self):
return 'attr1 value'
print("g1 of Class1 is defined")
attr1 = property(g1)
print("Class1.attr1 = ", attr1)
print("attr1 of Class1 is defined")
def addProperty(self, name, getter):
setattr(self, name, property(getter))
print("self.", name, " = ", getattr(self, name))
print("addProperty of Class1 is defined")
print("Class is defined")
c1 = Class1()
print("Instance is created")
print(c1.attr1)
def g2(cls):
return 'attr2 value'
c1.addProperty('attr2', g2)
print(c1.attr2)
I have put all those print statements there to demonstrate the order in which things happen very clearly.
In the middle, you see:
g1 of Class1 is defined
Class1.attr1 = <property object at 0x105115c10>
attr1 of Class1 is defined
We have created an object of type property and assigned it to a class attribute.
Continuing:
addProperty of Class1 is defined
Metaclass1 __new__(<class '__main__.MetaClass1'>)
Metaclass1 __init__(<class '__main__.Class1'>)
Class is defined
The metaclass got instantiated, being passed first itself (__new__) and then the class it will work on (__init__). This happened right as we stepped out of the class definition. I have only included the metaclass to show what will happen with the type metaclass by default.
Then:
Class1 __new__(<class '__main__.Class1'>)
Class1 __init__(<__main__.Class1 object at 0x105124c10>)
Instance is created
attr1 value
self. attr2 = <property object at 0x105115cb0>
<property object at 0x105115cb0>
Class1 is instantiated, providing first its type to __new__ and then its instance to __init__.
We see that attr1 is instantiated properly, but attr2 is not. That is because setattr is being called once the class instance is already constructed and is just saying attr2 is an instance of the class property and not defining attr2 as the actual runtime construct of a property.
Which is made more clear if we run:
print(c1.attr2.fget(c1))
print(c1.attr1.fget(c1))
attr2 (a property object) isn't aware of the class or instance of the containing attribute's parent. The function it wraps still needs to be given the instance to work on.
attr1 doesn't know what to do with that, because as far as it is concerned it is a string object, and has no concept of how the runtime is mapping its getter.
The fundamental reason why what you tried doesn't work is that a property, a use case of a descriptor, by design must be stored as a class variable, not as an instance attribute.
Excerpt from the documentation of descriptor:
To use the descriptor, it must be stored as a class variable in
another class:
To create a class with dynamically named properties that has access to a parent class, one elegant approach is to create the class within a method of the main class, and use setattr to create class attributes with dynamic names and property objects. A class created in the closure of a method automatically has access to the self object of the parent instance, avoiding having to manage a clunky _parent attribute like you do in your attempt:
class Evolution_Base:
def __init__(self, property_list):
self.property_list = property_list
self._difference = None
#property
def difference(self):
if not self._difference:
class Value_Differences:
pass
for name in self.property_list:
# use default value to store the value of name in each iteration
def func(obj, prop_name=name):
return self._get_df_change(prop_name) # access self via closure
setattr(Value_Differences, name, property(func))
self._difference = Value_Differences()
return self._difference
def _get_df_change(self, df_name):
return f'df change of {df_name}' # simplified return value for demo purposes
so that:
evolution = Evolution_Base(['abc', 'xyz'])
print(evolution.difference.abc)
print(evolution.difference.xyz)
would output:
df change of abc
df change of xyz
Demo: https://replit.com/#blhsing/ExtralargeNaturalCoordinate
Responding directly to your question, you can create a class:
class FooBar:
def __init__(self, props):
def make_prop(name):
return property(lambda accessor_self: self._prop_impl(name))
self.accessor = type(
'Accessor',
tuple(),
{p: make_prop(p) for p in props}
)()
def _prop_impl(self, arg):
return arg
o = FooBar(['foo', 'bar'])
assert o.accessor.foo == o._prop_impl('foo')
assert o.accessor.bar == o._prop_impl('bar')
Further, it would be beneficiary to cache created class to make equivalent objects more similar and eliminate potential issues with equality comparison.
That said, I am not sure if this is desired. There's little benefit of replacing method call syntax (o.f('a')) with property access (o.a). I believe it can be detrimental on multiple accounts: dynamic properties are confusing, harder to document, etc., finally while none of this is strictly guaranteed in crazy world of dynamic python -- they kind of communicate wrong message: that the access is cheap and does not involve computation and that perhaps you can attempt to write to it.
I think that when you define the function func in the loop, it closes over the current value of the name variable, not the value of the name variable at the time the property is accessed. To fix this, you can use a lambda function to create a closure that captures the value of name at the time the property is defined.
class Value_Differences():
def __init__(self, parent : Evolution_Base, property_list = []):
self._parent = parent
for name in property_list:
setattr(self, name, property(fget = lambda self, name=name: self._parent._get_df_change(name)))
Does this help you ?
The simple question is really, how do I setattr for a property function?
In python we can set dynamic attributes like this:
class DynamicProperties():
def __init__(self, property_list):
self.property_list = property_list
def add_properties(self):
for name in self.property_list:
setattr(self.__class__, name, property(fget=lambda self: 1))
dync = DynamicProperties(['a', 'b'])
dync.add_properties()
print(dync.a) # prints 1
print(dync.b) # prints 1
Correct me if I am wrong but from reviewing your code, you want to create a dynamic attributes then set their value to a specific function call within the same class, where the passed in data is passed in attributes in the constructor " init " this is achievable, an example:
class DynamicProperties():
def __init__(self, property_list, data1, data2):
self.property_list = property_list
self.data1 = data1
self.data2 = data2
def add_properties(self):
for name in self.property_list:
setattr(self.__class__, name, property(fget=lambda self: self.change(self.data1, self.data2) ))
def change(self, data1, data2):
return data1 - data2
dync = DynamicProperties(['a', 'b'], 1, 2)
dync.add_properties()
print(dync.a == dync.change(1, 2)) # prints true
print(dync.b == dync.change(1,2)) # prints true
You just have to add more complexity to the member, __getattr__ / __setattr__ gives you the string, so it can be interpreted as needed. The biggest "problem" doing this is that the return might no be consistent and piping it back to a library that expect an object to have a specific behavior can cause soft errors.
This example is not the same as yours, but it has the same concept, manipulate columns with members. To get a copy with changes a set is not needed, with a copy, modify and return, the new instance can be created with whatever needed.
For example, the __getattr__ in this line will:
Check and interpret the string xyz_mull_0
Validate that the members and the operand exists
Make a copy of data_a
Modify the copy and return it
var = data_a.xyz_mull_0()
This looks more complex that it actually is, with the same instance members its clear what it is doing, but the _of modifier needs a callback, this is because the __getattr__ can only have one parameter, so it needs to save the attr and return a callback to be called with the other instance that then will call back to the __getattr__ and complete the rest of the function.
import re
class FlexibleFrame:
operand_mod = {
'sub': lambda a, b: a - b,
'add': lambda a, b: a + b,
'div': lambda a, b: a / b,
'mod': lambda a, b: a % b,
'mull': lambda a, b: a * b,
}
#staticmethod
def add_operand(name, func):
if name not in FlexibleFrame.operand_mod.keys():
FlexibleFrame.operand_mod[name] = func
# This makes this class subscriptable
def __getitem__(self, item):
return self.__dict__[item]
# Uses:
# -> object.value
# -> object.member()
# -> object.<name>_<operand>_<name|int>()
# -> object.<name>_<operand>_<name|int>_<flow>()
def __getattr__(self, attr):
if re.match(r'^[a-zA-Z]+_[a-zA-Z]+_[a-zA-Z0-9]+(_of)?$', attr):
seg = attr.split('_')
var_a, operand, var_b = seg[0:3]
# If there is a _of: the second operand is from the other
# instance, the _of is removed and a callback is returned
if len(seg) == 4:
self.__attr_ref = '_'.join(seg[0:3])
return self.__getattr_of
# Checks if this was a _of attribute and resets it
if self.__back_ref is not None:
other = self.__back_ref
self.__back_ref = None
self.__attr_ref = None
else:
other = self
if var_a not in self.__dict__:
raise AttributeError(
f'No match of {var_a} in (primary) {__class__.__name__}'
)
if operand not in FlexibleFrame.operand_mod.keys():
raise AttributeError(
f'No match of operand {operand}'
)
# The return is a copy of self, if not the instance
# is getting modified making x = a.b() useless
ret = FlexibleFrame(**self.__dict__)
# Checks if the second operand is a int
if re.match(r'^\d+$', var_b) :
ref_b_num = int(var_b)
for i in range(len(self[var_a])):
ret[var_a][i] = FlexibleFrame.operand_mod[operand](
self[var_a][i], ref_b_num
)
elif var_b in other.__dict__:
for i in range(len(self[var_a])):
# out_index = operand[type](in_a_index, in_b_index)
ret[var_a][i] = FlexibleFrame.operand_mod[operand](
self[var_a][i], other[var_b][i]
)
else:
raise AttributeError(
f'No match of {var_b} in (secondary) {__class__.__name__}'
)
# This swaps the .member to a .member()
# it also adds and extra () in __getattr_of
return lambda: ret
# return ret
if attr in self.__dict__:
return self[attr]
raise AttributeError(
f'No match of {attr} in {__class__.__name__}'
)
def __getattr_of(self, other):
self.__back_ref = other
return self.__getattr__(self.__attr_ref)()
def __init__(self, **kwargs):
self.__back_ref = None
self.__attr_ref = None
#TODO: Check if data columns match in size
# if not, implement column_<name>_filler=<default>
for i in kwargs:
self.__dict__[i] = kwargs[i]
if __name__ == '__main__':
data_a = FlexibleFrame(**{
'abc': [i for i in range(10)],
'nmv': [i for i in range(10)],
'xyz': [i for i in range(10)],
})
data_b = FlexibleFrame(**{
'fee': [i + 10 for i in range(10)],
'foo': [i + 10 for i in range(10)],
})
FlexibleFrame.add_operand('set', lambda a, b: b)
var = data_a.xyz_mull_0()
var = var.abc_set_xyz()
var = var.xyz_add_fee_of(data_b)
As a extra thing, lambdas in python have this thing, so it can make difficult using them when self changes.
It seems you're bending the language to do weird things. I'd take it as a smell that your code is probably getting convoluted but I'm not saying there would never be a use-case for it so here is a minimal example of how to do it:
class Obj:
def _df_change(self, arg):
print('change', arg)
class DynAttributes(Obj):
def __getattr__(self, name):
return self._df_change(name)
class Something:
difference = DynAttributes()
a = Something()
b = Obj()
assert a.difference.hello == b._df_change('hello')
When calling setattr , use self.__class__ instead of self
Code sample:
class A:
def __init__(self,names : List[str]):
for name in names:
setattr(self.__class__,name,property(fget=self.__create_getter(name)))
def __create_getter(self,name: str):
def inner(self):
print(f"invoking {name}")
return 10
return inner
a = A(['x','y'])
print(a.x + 1)
print(a.y + 2)

How do I use same getter and setter properties and functions for different attributes of a class the pythonic way?

I've got this class that I'm working on that stores Employees details.
I want all attributes to be protected and be set and gotten with specific logic, but not all in a unique way. I would like the same logic to apply to my _f_name and to my _l_name attributes, I would like the same logic perhaps to be applied to attributes that take in booleans and other general cases.
I've got this for the first attribute:
#property
def f_name(self):
return self.f_name
#f_name.setter
def f_name(self, f_name):
if f_name != str(f_name):
raise TypeError("Name must be set to a string")
else:
self._f_name = self._clean_up_string(f_name)
#f_name.deleter
def available(self):
raise AttributeError("Can't delete, you can only change this value.")
How can I apply the same functions and properites to other attributes?
Thaaaanks!
While it may seem like defining a subclass of property is possible, too many details of how a particular property work is left to the getter and setter to define, meaning it's more straightforward to define a custom property-like descriptor.
class CleanableStringProperty:
def __set_name__(self, owner, name):
self._private_name = "_" + name
self.name = name
def __get__(self, obj, objtype=None):
# Boilerplate to handle accessing the property
# via a class, rather than an instance of the class.
if obj is None:
return self
return getattr(obj, self._private_name)
def __set__(self, obj, value):
if not isinstance(value, str):
raise TypeError(f'{self.name} value must be a str')
setattr(obj, self._private_name, obj._clean_up_string(value))
def __delete__(self, obj):
raise AttributeError("Can't delete, you can only change this value.")
__set_name__ constructs the name of the instance attribute that the getter and setter will use. __get__ acts as the getter, using getattr to retrieve the constructed attribute name from the given object. __set__ validates and modifies the value before using setattr to set the constructed attribute name. __del__ simply raises an attribute error, independent of whatever object the caller is trying to remove the attribute from.
Here's a simple demonstration which causes all values assigned to the descriptor to be put into title case.
class Foo:
f_name = CleanableStringProperty()
l_name = CleanableStringProperty()
def __init__(self, first, last):
self.f_name = first
self.l_name = last
def _clean_up_string(self, v):
return v.title()
f = Foo("john", "doe")
assert f.f_name == "John"
assert f.l_name == "Doe"
try:
del f.f_name
except AttributeError:
print("Prevented first name from being deleted")
It would also be possible for the cleaning function, rather than being somethign that obj is expected to provide, to be passed as an argument to CleanableStringProperty itself. __init__ and __set__ would be modified as
def __init__(self, cleaner):
self.cleaner = cleaner
def __set__(self, obj, value):
if not isinstance(value, str):
raise TypeError(f'{self.name} value must be a str')
setattr(obj, self._private_name, self.cleaner(value))
and the descriptor would be initialized with
class Foo:
fname = CleanableStringProperty(str.title)
Note that Foo is no longer responsible for providing a cleaning method.
A property is just an implementation of a descriptor, so to create a custom property, you need an object with a __get__, __set__, and/or __delete__ method.
In your case, you could do something like this:
from typing import Any, Callable, Tuple
class ValidatedProperty:
def __set_name__(self, obj, name):
self.name = name
self.storage = f"_{name}"
def __init__(self, validation: Callable[[Any], Tuple[str, Any]]=None):
"""Initializes a ValidatedProperty object
Args:
validation (Callable[[Any], Tuple[str, Any]], optional): A Callable that takes the given value and returns an error string (empty string if no error) and the cleaned-up value. Defaults to None.
"""
self.validation = validation
def __get__(self, instance, owner):
return getattr(instance, self.storage)
def __set__(self, instance, value):
if self.validation:
error, value = self.validation(value)
if error:
raise ValueError(f"Error setting property {self.name}: {error}")
setattr(instance, self.storage, value)
def __delete__(self, instance):
raise AttributeError("Can't delete, you can only change this value.")
Let's define an example class to use this:
class User:
def __name_validation(value):
if not isinstance(value, str):
return (f"Expected string value, received {type(value).__name__}", None)
return ("", value.strip().title())
f_name = ValidatedProperty(validation=__name_validation)
l_name = ValidatedProperty(validation=__name_validation)
def __init__(self, fname, lname):
self.f_name = fname
self.l_name = lname
and test:
u = User("Test", "User")
print(repr(u.f_name)) # 'Test'
u.f_name = 123 # ValueError: Error setting property f_name: Expected string value, received int
u.f_name = "robinson " # Notice the trailing space
print(repr(u.f_name)) # 'Robinson'
u.l_name = "crusoe "
print(repr(u.l_name)) # 'Crusoe'

Python Register Class Methods with a Decorator and Inheritance

How do I register class methods with a decorator and use inheritance at the same time. I've found a lot of semi-helpful posts/tutorials online, however, I haven't been able to find exactly what I'm looking for. In the process, I think I've confused myself (class decorators vs metaclasses). Here's the expected operation of what I am hoping to build. (FYI using Python 3.6)
class Parent:
#property
def tagged(self):
return # TODO: return list of "tagged" methods
#property
def parent1(self):
return 'parent1'
#tag
#property
def parent2(self):
return 'parent2'
class Child1(Parent):
#tag
#property
def child1_1(self):
return 'child1_1'
#tag
#property
def child1_2(self):
return 'child1_2'
class Child2(Parent):
#property
def child2_1(self):
return 'child2_1'
#tag
#property
def child2_2(self):
return 'child2_2'
c1 = Child1()
c2 = Child2()
print(c1.tagged)
# expect: ['parent2', 'child1_1', 'child1_2']
print(c2.tagged)
# expect: ['parent2', 'child2_2']
EDIT:
Here is what I have actually tried (originally omitted to try and keep the post size down)
In this attempt, I tried to use a class decorator. I was hoping to use the tag function as a decorator for the class methods to add a attribute to the method. My understanding is that everything in Python is an object (even functions) so adding an arbitrary attribute should be do-able. My thought was to then use the class decorator (tagger) to add a _tagged attribute of the class that would then contain a list of method names that were "tagged". This obviously does not work as I intended. The added attribute to class methods is not "seen" by the class decorator.
def tagger(cls):
cls._tagged = []
for methodname in dir(cls):
method = getattr(cls, methodname)
if hasattr(method, 'tagged'):
cls._tagged.append(methodname)
return cls
def tag(f):
def decorator():
f.tagged = True
return f
return decorator
#tagger
class Parent:
#property
def tagged(self):
return self._tagged
def parent1(self):
return 'parent1'
#tag
#property
def parent2(self):
return 'parent2'
#tagger
class Child1(Parent):
#tag
#property
def child1_1(self):
return 'child1_1'
#tag
#property
def child1_2(self):
return 'child1_2'
#tagger
class Child2(Parent):
#property
def child2_1(self):
return 'child2_1'
#tag
#property
def child2_2(self):
return 'child2_2'
c1 = Child1()
c2 = Child2()
print(c1.tagged)
# expect: ['parent2', 'child1_1', 'child1_2']
# got: []
print(c2.tagged)
# expect: ['parent2', 'child2_2']
# got: []
In this attempt I tried to use a MetaClass. Similar to the last approach, the idea was to add an attribute to the class method and then use that as a key for building the list of "tagged" methods. This didn't work either as I ran into what I think is the same fundamental issue as before where the attribute added to the function is not maintained. To start with, I was trying to shy away from this method because it required a global variable (tagged). I'm hoping to find a cleaner solution.
from collections import defaultdict
tagged = defaultdict(dict)
class TaggableType(type):
def __init__(cls, name, bases, attrs):
for name, method in attrs.items():
if hasattr(method, 'tagged'):
tagged[cls.__name__][name] = method
def tag(f):
def decorator():
f.tagged = True
return f
return decorator
class Parent(metaclass=TaggableType):
#property
def tagged(self):
return tagged[self.__class__.__name__]
def parent1(self):
return 'parent1'
#tag
#property
def parent2(self):
return 'parent2'
class Child1(Parent, metaclass=TaggableType):
#tag
#property
def child1_1(self):
return 'child1_1'
#tag
#property
def child1_2(self):
return 'child1_2'
class Child2(Parent, metaclass=TaggableType):
#property
def child2_1(self):
return 'child2_1'
#tag
#property
def child2_2(self):
return 'child2_2'
c1 = Child1()
c2 = Child2()
print(c1.tagged)
# expect: ['parent2', 'child1_1', 'child1_2']
# got: {}
print(c2.tagged)
# expect: ['parent2', 'child2_2']
# got: {}
I dislike both the metaclass and class decorator approaches (provided that they even work) because they require some operator by the child class (either the decorator or explicitly adding the metaclass value). I would ideally like to have a clean and simple solution wherein the child object does not need to have any special configuration outside of inheriting the parent class.
As I mentioned in the comments, you are basically on the right track. You have a decorator tag that marks your attributes for addition to the list. Either a metaclass or a class decorator is a perfectly valid tool to compile the list and add it to your class as an attribute.
There are two main problems with your implementation.
You seem to have a misunderstanding of how function decorators work (which is strange given that you seem to understand class decorators perfectly, but totally understandable because I too have read the same misleading tutorials). As with class decorators, function decorators can return anything at all, not necessarily the nested function that is so often given in examples. For example, #property returns a data descriptor that is not even callable!
Remember that the result of the decorator replaces whatever the input was. So in your current implementation, you start with some function that returns a property value. You then replace it with a function of no arguments that sets an attribute on the original function object. But the original function object is basically discarded and can't be called. That makes no sense. What you probably want is a function that sets an attribute on the original function and just returns that original function, so it does not get replaced:
def tag(f):
f.tagged = True
return f
You can't decorate a property in the way you are thinking. The #property decorator does not return a function. It returns an immutable object, and you can not set an attribute on it (except changing the docstring as of Py3.6). The reason that your code did not fail with an AttributeError: 'property' object has no attribute 'tagged' is that your tag decorator does not do what you think it does.
A possible solution for this to apply #tag before applying #property (given the version of tag shown above). Now, instead of checking if each "method" is tagged, you have to also check if the "method" is really a property object, and check if it's fset attribute is tagged. Alternatively, you can check if the input to tag is a property, and tag the fset attribute instead of the property itself. This will make your decorators order-independent again.
Fixing these two problems will give you the tagged items in each child class. It will not, however, give you the complete list including the parent classes. Luckily, the parents of your class object are fully known for both a class decorator and a metaclass, so you can create a complete list based on simple inheritance.
Before showing code, I also want to mention something about metaclasses vs. decorators for your code. Functionally, there will not be much difference between the two in the sense that they both look up attributes with tagged set and fill in a list on the class. There is going to be a usage difference though. You will only have to set the metaclass once for the parent to get the same behavior for all the children because the type of the parent is generally the type of the subclass (you do not need to have metaclass=TaggableType in all your child classes). A decorator would have to be applied to every child individually. I can see pros and cons for both approaches, so I will show both.
Using Class Decorators
def tagger(cls):
cls._tagged = set()
for methodname in dir(cls):
method = getattr(cls, methodname)
if isinstance(method, property):
method = method.fget
if hasattr(method, 'tagged'):
cls._tagged.add(methodname)
return cls
def tag(f):
if isinstance(f, property):
f.fget.tagged = True
else:
f.tagged = True
return f
#tagger
class Parent:
#property
def tagged(self):
tags = set()
# Support multiple inheritance out of the box
for t in type(self).__mro__:
if hasattr(t, '_tagged'):
tags.update(t._tagged)
return tags
#property
def parent1(self):
return 'parent1'
#tag
#property
def parent2(self):
return 'parent2'
#tagger
class Child1(Parent):
#tag
#property
def child1_1(self):
return 'child1_1'
#tag
#property
def child1_2(self):
return 'child1_2'
#tagger
class Child2(Parent):
#property
def child2_1(self):
return 'child2_1'
#tag
#property
def child2_2(self):
return 'child2_2'
c1 = Child1()
c2 = Child2()
print(c1.tagged)
print(c2.tagged)
Results in
{'child1_2', 'parent2', 'child1_1'}
{'parent2', 'child2_2'}
There is a slight conceptual disconnect here, because the decorator needs to be applied to every child to gather the data, but only the parent needs to have a tagged property for it to work on all children.
Using a Metaclass
class TaggableType(type):
def __init__(cls, name, bases, attrs):
cls._tagged = set()
for name, method in attrs.items():
if isinstance(method, property):
method = method.fget
if hasattr(method, 'tagged'):
cls._tagged.add(name)
def tag(f):
if isinstance(f, property):
f.fget.tagged = True
else:
f.tagged = True
return f
class Parent(metaclass=TaggableType):
#property
def tagged(self):
tags = set()
for t in type(self).__mro__:
if hasattr(t, '_tagged'):
tags.extend(t._tagged)
return tags
def parent1(self):
return 'parent1'
#tag
#property
def parent2(self):
return 'parent2'
class Child1(Parent):
#tag
#property
def child1_1(self):
return 'child1_1'
#tag
#property
def child1_2(self):
return 'child1_2'
class Child2(Parent):
#property
def child2_1(self):
return 'child2_1'
#tag
#property
def child2_2(self):
return 'child2_2'
c1 = Child1()
c2 = Child2()
print(c1.tagged)
print(c2.tagged)
Results in:
['child1_1', 'child1_2', 'parent2']
['child2_2', 'parent2']
Another thing to keep in mind is that in the metaclass option, you have the MRO option available up front. You can therefore construct a complete list of tags right there and then. I do not like redundant information because it usually ends up being a maintenance burden. A better alternative would be to define the property in the metaclass, and assign it to each child you create as a bonus.
Update
In fact, my last suggestion makes the metaclass option objectively better because it allows you to circumvent explicitly using MRO, instead delegating the resolution to super, as you should:
class TaggableType(type):
def __init__(cls, name, bases, attrs):
tagged = set()
for name, method in attrs.items():
if isinstance(method, property):
method = method.fget
if hasattr(method, 'tagged'):
tagged.add(name)
#property
def tagged(self):
tags = tagged.copy()
try:
tags.update(super(cls, self).tagged)
except AttributeError:
pass
return tags
cls.tagged = tagged
Now none of the classes need to explicitly define a tagged property at all.
The main issue with your two attempted implementations is that your tag decorator isn't working the way you intend. For a simple decorator that doesn't need to be passed any arguments (like #foo(x)) and returns the original function, you don't need a nested function at all:
def tag(f):
f.tagged = True
return f
You still have a problem though, since not all objects allow you to set arbitrary attributes on them. Functions do, but instances of property do not.
One way to work around that for that would be to change the order of the decorators on your property methods, so that tag gets applied first (on the function, where it will work), and the property decorator applies afterwards. Then you'd just need to check each property in the class to see if it's getter is tagged later on.
class Child1(Parent):
#property # swap decorator order!
#tag # this gets applied first, then property applies to the result
def child1_1(self):
return 'child1_1'
#property # same here (and in the other classes)
#tag
def child1_2(self):
return 'child1_2'
Now to address the question of how best to gather up the list of tagged methods. Either of the approaches you tried can be made to work with just a little tweaking (such as looking inside of property instances to see the tag on the getter). But another option would be to make the tagged method in the Parent class do all the work. This might be slow the first time you checked for tags, but you could cache the result for later calls:
class Parent:
#property
#classmethod
def tagged(cls):
if '_tagged' not in cls.__dict__: # check in dict directly to avoid inherited _tagged
tagged = []
for name in dir(cls):
var = cls.getattr(name)
if (hasattr(var, 'tagged') or
isinstance(var, property) and hasattr(var.getter, 'tagged')):
tagged.append(name)
cls._tagged = tagged
return cls._tagged
...
If you want to go with metaclasses, you don't need to worry about every class needing the metaclass=... declaration. Metaclasses are inherited, you only need to explicitly declare the metaclass in Parent. All classes that inherit from Parent will use it's metaclass too. The only downside to this is that they can't declare some other metaclass of their own, as you'll get a metaclass conflict (though you might be able to work around it by making the other metaclass inherit from TaggableType or even creating a new metaclass that inherits from both TaggableType and the other desired metaclass).

__getattr__ of meta class not being called

As the title says. It seems no matter what I do, __getattr__ will not be called. I also tried it for instance (absurd, I know), with predictably no response. As if __getattr__ was banned in meta classes.
I'd appreciate any pointer to documentation about this.
The code:
class PreinsertMeta(type):
def resolvedField(self):
if isinstance(self.field, basestring):
tbl, fld = self.field.split(".")
self.field = (tbl, fld)
return self.field
Field = property(resolvedField)
def __getattr__(self, attrname):
if attrname == "field":
if isinstance(self.field, basestring):
tbl, fld = self.field.split(".")
self.field = (tbl, fld)
return self.field
else:
return super(PreinsertMeta, self).__getattr__(attrname)
def __setattr__(self, attrname, value):
super(PreinsertMeta, self).__setattr__(attrname, value)
class Test(object):
__metaclass__ = PreinsertMeta
field = "test.field"
print Test.field # Should already print the tuple
Test.field = "another.field" # __setattr__ gets called nicely
print Test.field # Again with the string?
print Test.Field # note the capital 'F', this actually calls resolvedField() and prints the tuple
Thanks to BrenBarn, here's the final working implementation:
class PreinsertMeta(type):
def __getattribute__(self, attrname):
if attrname == "field" and isinstance(object.__getattribute__(self, attrname), basestring):
tbl, fld = object.__getattribute__(self, attrname).split(".")
self.field = (tbl, fld)
return object.__getattribute__(self, attrname)
As documented, __getattr__ is only called if the attribute does not exist. Since your class has a field attribute, that blocks __getattr__. You can use __getattribute__ if you really want to intercept all attribute access, although it's not clear from your example why you need to do this. Note that this has nothing to do with metaclasses; you would see the same behavior if you created an instance of an ordinary class and gave it some attribute.
Even assuming you used __getattribute__, so it was called when the attribute exists, your implementation doesn't make much sense. Inside __getattr__ you try to get a value for self.field. But if __getattribute__ was called in the first place, it will be called again for this access, creating an infinite recursion: in order to get self.field, it has to call __getattribute__, which again tries to get self.field, which again calls __getattribute__, etc. See the documentation for __getattribute__ for how to get around this.

How do I directly mock a superclass with python mock?

I am using the python mock framework for testing (http://www.voidspace.org.uk/python/mock/) and I want to mock out a superclass and focus on testing the subclasses' added behavior.
(For those interested I have extended pymongo.collection.Collection and I want to only test my added behavior. I do not want to have to run mongodb as another process for testing purposes.)
For this discussion, A is the superclass and B is the subclass. Furthermore, I define direct and indirect superclass calls as shown below:
class A(object):
def method(self):
...
def another_method(self):
...
class B(A):
def direct_superclass_call(self):
...
A.method(self)
def indirect_superclass_call(self):
...
super(A, self).another_method()
Approach #1
Define a mock class for A called MockA and use mock.patch to substitute it for the test at runtime. This handles direct superclass calls. Then manipulate B.__bases__ to handle indirect superclass calls. (see below)
The issue that arises is that I have to write MockA and in some cases (as in the case for pymongo.collection.Collection) this can involve a lot of work to unravel all of the internal calls to mock out.
Approach #2
The desired approach is to somehow use a mock.Mock() class to handle calls on the the mock just in time, as well as defined return_value or side_effect in place in the test. In this manner, I have to do less work by avoiding the definition of MockA.
The issue that I am having is that I cannot figure out how to alter B.__bases__ so that an instance of mock.Mock() can be put in place as a superclass (I must need to somehow do some direct binding here). Thus far I have determined, that super() examines the MRO and then calls the first class that defines the method in question. I cannot figure out how to get a superclass to handle the check to it and succeed if it comes across a mock class. __getattr__ does not seem to be used in this case. I want super to to think that the method is defined at this point and then use the mock.Mock() functionality as usual.
How does super() discover what attributes are defined within the class in the MRO sequence? And is there a way for me to interject here and to somehow get it to utilize a mock.Mock() on the fly?
import mock
class A(object):
def __init__(self, value):
self.value = value
def get_value_direct(self):
return self.value
def get_value_indirect(self):
return self.value
class B(A):
def __init__(self, value):
A.__init__(self, value)
def get_value_direct(self):
return A.get_value_direct(self)
def get_value_indirect(self):
return super(B, self).get_value_indirect()
# approach 1 - use a defined MockA
class MockA(object):
def __init__(self, value):
pass
def get_value_direct(self):
return 0
def get_value_indirect(self):
return 0
B.__bases__ = (MockA, ) # - mock superclass
with mock.patch('__main__.A', MockA):
b2 = B(7)
print '\nApproach 1'
print 'expected result = 0'
print 'direct =', b2.get_value_direct()
print 'indirect =', b2.get_value_indirect()
B.__bases__ = (A, ) # - original superclass
# approach 2 - use mock module to mock out superclass
# what does XXX need to be below to use mock.Mock()?
#B.__bases__ = (XXX, )
with mock.patch('__main__.A') as mymock:
b3 = B(7)
mymock.get_value_direct.return_value = 0
mymock.get_value_indirect.return_value = 0
print '\nApproach 2'
print 'expected result = 0'
print 'direct =', b3.get_value_direct()
print 'indirect =', b3.get_value_indirect() # FAILS HERE as the old superclass is called
#B.__bases__ = (A, ) # - original superclass
is there a way for me to interject here and to somehow get it to utilize a mock.Mock() on the fly?
There may be better approaches, but you can always write your own super() and inject it into the module that contains the class you're mocking. Have it return whatever it should based on what's calling it.
You can either just define super() in the current namespace (in which case the redefinition only applies to the current module after the definition), or you can import __builtin__ and apply the redefinition to __builtin__.super, in which case it will apply globally in the Python session.
You can capture the original super function (if you need to call it from your implementation) using a default argument:
def super(type, obj=None, super=super):
# inside the function, super refers to the built-in
I played around with mocking out super() as suggested by kindall. Unfortunately, after a great deal of effort it became quite complicated to handle complex inheritance cases.
After some work I realized that super() accesses the __dict__ of classes directly when resolving attributes through the MRO (it does not do a getattr type of call). The solution is to extend a mock.MagicMock() object and wrap it with a class to accomplish this. The wrapped class can then be placed in the __bases__ variable of a subclass.
The wrapped object reflects all defined attributes of the target class to the __dict__ of the wrapping class so that super() calls resolve to the properly patched in attributes within the internal MagicMock().
The following code is the solution that I have found to work thus far. Note that I actually implement this within a context handler. Also, care has to be taken to patch in the proper namespaces if importing from other modules.
This is a simple example illustrating the approach:
from mock import MagicMock
import inspect
class _WrappedMagicMock(MagicMock):
def __init__(self, *args, **kwds):
object.__setattr__(self, '_mockclass_wrapper', None)
super(_WrappedMagicMock, self).__init__(*args, **kwds)
def wrap(self, cls):
# get defined attribtues of spec class that need to be preset
base_attrs = dir(type('Dummy', (object,), {}))
attrs = inspect.getmembers(self._spec_class)
new_attrs = [a[0] for a in attrs if a[0] not in base_attrs]
# pre set mocks for attributes in the target mock class
for name in new_attrs:
setattr(cls, name, getattr(self, name))
# eat up any attempts to initialize the target mock class
setattr(cls, '__init__', lambda *args, **kwds: None)
object.__setattr__(self, '_mockclass_wrapper', cls)
def unwrap(self):
object.__setattr__(self, '_mockclass_wrapper', None)
def __setattr__(self, name, value):
super(_WrappedMagicMock, self).__setattr__(name, value)
# be sure to reflect to changes wrapper class if activated
if self._mockclass_wrapper is not None:
setattr(self._mockclass_wrapper, name, value)
def _get_child_mock(self, **kwds):
# when created children mocks need only be MagicMocks
return MagicMock(**kwds)
class A(object):
x = 1
def __init__(self, value):
self.value = value
def get_value_direct(self):
return self.value
def get_value_indirect(self):
return self.value
class B(A):
def __init__(self, value):
super(B, self).__init__(value)
def f(self):
return 2
def get_value_direct(self):
return A.get_value_direct(self)
def get_value_indirect(self):
return super(B, self).get_value_indirect()
# nominal behavior
b = B(3)
assert b.get_value_direct() == 3
assert b.get_value_indirect() == 3
assert b.f() == 2
assert b.x == 1
# using mock class
MockClass = type('MockClassWrapper', (), {})
mock = _WrappedMagicMock(A)
mock.wrap(MockClass)
# patch the mock in
B.__bases__ = (MockClass, )
A = MockClass
# set values within the mock
mock.x = 0
mock.get_value_direct.return_value = 0
mock.get_value_indirect.return_value = 0
# mocked behavior
b = B(7)
assert b.get_value_direct() == 0
assert b.get_value_indirect() == 0
assert b.f() == 2
assert b.x == 0

Categories

Resources