I'd like to bind a specific property/descriptor on an instance of one class to an another attribute on an instance of another class (dynamic property?). Something like:
class Base(object):
def __init__(self, value=None):
self._value = value
#property
def value(self):
return self._value
#value.setter
def value(self, value):
self._value = value
class Child(Base):
pass
class Parent(Base):
def __init__(self, *args, **kwargs):
super(Parent, self).__init__(*args, **kwargs)
self.children = []
def add_attrs(self, attr_child):
for attr, child in attr_child:
self.children.append(child)
setattr(self, attr, child.value)
av = [("id_", Child(123)), ("name", Child("test"))]
p = Parent()
p.add_attrs(av)
assert p.name == p.children[-1].value
# at this point p.name == "test"
p.name = "abc"
# would like above to also set the child.value to "abc"
assert p.name == p.children[-1].value
Ultimately I could do p.name.value if instead I set name to the Child instance, but I was wondering if this could be done as I think it's a bit nicer. I tried something like:
def get_dict_attr(obj, attr):
for obj in [obj] + obj.__class__.mro():
if attr in obj.__dict__:
return obj.__dict__[attr]
raise AttributeError
class Parent(Base):
def __init__(self, *args, **kwargs):
super(Parent, self).__init__(*args, **kwargs)
self.children = []
def add_attrs(self, attr_child):
for attr, child in attr_child:
self.children.append(child)
val = get_dict_attr(child, "value")
setattr(self, attr, val)
but then p.name == <property object at ... >. Wondering if something like this is possible and haven't come across anything suggesting it is so far. Thanks!
Related
I'd like to cache an object in __new__ method so that it can load the cache when a new object is constructed, but now the following code will got an exception:
RecursionError: maximum recursion depth exceeded while calling a Python object
I have no idea about how to break the recursion
import pickle
class Cache:
def __init__(self):
self.d = {}
def __setitem__(self, obj, val):
self.d[obj] = pickle.dumps(val)
def __getitem__(self, obj):
return pickle.loads(self.d[obj])
class Car:
cache = Cache()
def __reduce__(self):
return (self.__class__, (self.name,))
def __new__(cls, name):
try:
return cls.cache[name]
except KeyError:
return cls.new(name)
#classmethod
def new(cls, name):
car = object.__new__(cls)
car.init(name)
cls.cache[name] = car
return car
def init(self, name):
self.name = name
def __repr__(self):
return self.name
a = Car('audi')
b = Car('audi')
Have a try. This may fix this, but it may not be the proper solution. If anyone have any better idea, feel free to leave comments.
Just remove the __reduce__ method.
Then implement __getnewargs__ and __getnewargs_ex__
import pickle
class Cache:
def __init__(self):
self.d = {}
def __setitem__(self, obj, val):
self.d[obj] = pickle.dumps(val)
def __getitem__(self, obj):
return pickle.loads(self.d[obj])
def __contains__(self, x):
return x in self.d
class Car:
cache = Cache()
def __new__(cls, name, extra=None, _FORCE_CREATE=False):
if _FORCE_CREATE or name not in cls.cache:
car = object.__new__(cls)
car.init(name)
car.extra = extra
cls.cache[name] = car
return car
else:
return cls.cache[name]
def init(self, name):
self.name = name
def __repr__(self):
return self.name
def __getnewargs__(self):
return (self.name, None, True)
def __getnewargs_ex__(self):
# override __getnewargs_ex__ and __getnewargs__ to provide args for __new__
return (self.name, ), {"_FORCE_CREATE": True}
a = Car('audi', extra="extra_attr")
b = Car('audi')
print(id(a), a.extra) # 1921399938016 extra_attr
print(id(b), b.extra) # 1921399937728 extra_attr
I am using a class (MainClass) over which I have no control. I want to base my class on MainClass but to add extra functionality. I have added an attribute (index) to my class (SuperClass), but when I try convert index to a property, the #.setter seems to be ignored. What is wrong here?
class MainClass(object):
def __init__(self):
self.name = 'abc'
class SuperClass(object):
def __init__(self, main, *args, **kwargs):
super(SuperClass, self).__init__(*args, **kwargs)
self.__main = main
self._index = 0
def __getattr__(self, attr):
return getattr(self.__main, attr)
def __setattr__(self, attr, val):
if attr == '_SuperClass__main':
object.__setattr__(self, attr, val)
return setattr(self.__main, attr, val)
#property
def index(self):
return self._index
#index.setter
def index(self, value):
self._index = value
main_object = MainClass()
super_object = SuperClass(main_object)
print('x', super_object.index, super_object.name)
super_object.index = 3
print('y', super_object.index)
super_object.index += 2
print('z', super_object.index)
__getattr__ is only used when the normal lookup mechanism fails.
__setattr__, however, is called for all attempts to set an attribute. This means your current definition creates an attribute named index on the
MainClass instance, rather than accessing the property's setter.
>>> super_object._SuperClass__main.index
2
Because __setattr__ always calls setattr(self.__main, attr, val), += is effectively treated as =.
__setattr__ has to handle three cases:
The attribute _SuperClass__main itself, for when you assign to self.__main in __init__.
Assignments to attributes that exist on self.__main
Assignments to attributes specific to SuperClass.
With that in mind, try
def __setattr__(self, attr, val):
if attr == '_SuperClass__main':
super().__setattr__(attr, val)
elif hasattr(self.__main, attr):
setattr(self.__main, attr, val)
else:
super().__setattr__(attr, val)
The __setattr__ method you have defined is taking precedence over the #index.setter
Simplify the code and it should work:
class MainClass(object):
def __init__(self):
self.name = 'abc'
class SuperClass(object):
def __init__(self, main, *args, **kwargs):
super(SuperClass, self).__init__(*args, **kwargs)
self.__main = main
self._index = 0
#property
def name(self):
return self.__main.name
#name.setter
def name(self):
return self.__main.name
#property
def index(self):
return self._index
#index.setter
def index(self, value):
self._index = value
main_object = MainClass()
super_object = SuperClass(main_object)
print('x', super_object.index, super_object.name)
super_object.index = 3
print('y', super_object.index)
super_object.index += 2
print('z', super_object.index)
Output:
x 0 abc
y 3
z 5
I would also suggest the simpler option of just inheriting from MainClass instead of using composition and delegation:
class SuperClass(MainClass):
def __init__(self):
super().__init__()
self._index = 0
#property
def index(self):
return self._index
#index.setter
def index(self, value):
self._index = value
Lets take the following example of class decorators (origin http://www.informit.com/articles/article.aspx?p=1309289&seqNum=4):
class GenericDescriptor:
def __init__(self, getter, setter):
self.getter = getter
self.setter = setter
def __get__(self, instance, owner=None):
if instance is None:
return self
return self.getter(instance)
def __set__(self, instance, value):
return self.setter(instance, value)
def valid_string(attr_name, empty_allowed=True, regex=None,
acceptable=None):
def decorator(cls):
name = "__" + attr_name
def getter(self):
return getattr(self, name)
def setter(self, value):
assert isinstance(value, str), (attr_name +
" must be a string")
if not empty_allowed and not value:
raise ValueError("{0} may not be empty".format(
attr_name))
if ((acceptable is not None and value not in acceptable) or
(regex is not None and not regex.match(value))):
raise ValueError("{attr_name} cannot be set to "
"{value}".format(**locals()))
setattr(self, name, value)
setattr(cls, attr_name, GenericDescriptor(getter, setter))
return cls
return decorator
#valid_string("name", empty_allowed=False)
class StockItem:
name = None
def __init__(self, **kwargs):
if kwargs.get('second_call'):
pass
# proceed normally without calling #valid_string
self.name = kwargs.get('name', None)
self.price = kwargs.get('price', None)
self.quantity = kwargs.get('quantity', None)
if __name__ == "__main__":
import doctest
doctest.testmod()
# valid value for name
cameras1 = StockItem(name="Camera", price=45.99, quatity=2)
# invalid value for name according to #valid_string but I need this to be also valid if 'second_call'
cameras2 = StockItem(name=67, price=45.99, quatity=2, second_call=True)
The StockItem class constructor is invoked twice and on the second turn I want the #valid_string decorator to be somehow canceled (I don't want name attribute's value to be altered anymore).
Read the Python Cookbook and saw descriptors, particularly the example for enforcing types when using class attributes. I am writing a few classes where that would be useful, but I would also like to enforce immutability. How to do it? Type checking descriptor adapted from the book:
class Descriptor(object):
def __init__(self, name=None, **kwargs):
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
def __set__(self, instance, value):
instance.__dict__[self.name] = value
# by default allows None
class Typed(Descriptor):
def __init__(self, expected_types=None, **kwargs):
self.expected_types = expected_types
super().__init__(**kwargs)
def __set__(self, instance, value):
if value is not None and not isinstance(value, self.expected_types):
raise TypeError('Expected: {}'.format(str(self.expected_types)))
super(Typed, self).__set__(instance, value)
class T(object):
v = Typed(int)
def __init__(self, v):
self.v = v
Attempt #1: add a self.is_set attribute to Typed
# by default allows None
class ImmutableTyped(Descriptor):
def __init__(self, expected_types=None, **kwargs):
self.expected_types = expected_types
self.is_set = False
super().__init__(**kwargs)
def __set__(self, instance, value):
if self.is_set:
raise ImmutableException(...)
if value is not None and not isinstance(value, self.expected_types):
raise TypeError('Expected: {}'.format(str(self.expected_types)))
self.is_set = True
super(Typed, self).__set__(instance, value)
Wrong, because when doing the following, ImmutableTyped is 'global' in the sense that it's a singleton throughout all instances of the class. When t2 is instantiated, is_set is already True from the previous object.
class T(object):
v = ImmutableTyped(int)
def __init__(self, v):
self.v = v
t1 = T()
t2 = T() # fail when instantiating
Attempt #2: Thought instance in __set__ refers to the class containing the attribute so tried to check if instance.__dict__[self.name] is still a Typed. That is also wrong.
Idea #3: Make Typed be used more similar to #property by accepting a 'fget' method returning the __dict__ of T instances. This would require the definition of a function in T similar to:
#Typed
def v(self):
return self.__dict__
which seems wrong.
How to implement immutability AND type checking as a descriptor?
Now this is my approach to the problem:
class ImmutableTyped:
def __set_name__(self, owner, name):
self.name = name
def __init__(self, *, immutable=False, types=None)
self.immutable == immutable is True
self.types = types if types else []
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, value):
if self.immutable is True:
raise TypeError('read-only attribute')
elif not any(isinstance(value, cls)
for cls in self.types):
raise TypeError('invalid argument type')
else:
instance.__dict__[self.name] = value
Side note: __set_name__ can be used to allow you to not specify the attribute name in initialisation. This means you can just do:
class Foo:
bar = ImmutableTyped()
and the instance of ImmutableTyped will automatically have the name attribute bar since I typed for that to occur in the __set_name__ method.
Could not succeed in making such a descriptor. Perhaps it's also unnecessarily complicated. The following method + property use suffices.
# this also allows None to go through
def check_type(data, expected_types):
if data is not None and not isinstance(data, expected_types):
raise TypeError('Expected: {}'.format(str(expected_types)))
return data
class A():
def __init__(self, value=None):
self._value = check_type(value, (str, bytes))
#property
def value(self):
return self._value
foo = A()
print(foo.value) # None
foo.value = 'bla' # AttributeError
bar = A('goosfraba')
print(bar.value) # goosfraba
bar.value = 'bla' # AttributeError
class ImmutableTyped(object):
def __set_name__(self, owner, name):
self.name = name
def __init__(self, *, types=None):
self.types = tuple(types or [])
self.instances = {}
return None
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, value):
is_set = self.instances.setdefault(id(instance), False)
if is_set:
raise AttributeError("read-only attribute '%s'" % (self.name))
if self.types:
if not isinstance(value, self.types):
raise TypeError("invalid argument type '%s' for '%s'" % (type(value), self.name))
self.instances[id(instance)] = True
instance.__dict__[self.name] = value
return None
Examples:
class Something(object):
prop1 = ImmutableTyped(types=[int])
something = Something()
something.prop1 = "1"
Will give:
TypeError: invalid argument type '<class 'str'>' for 'prop1'
And:
something = Something()
something.prop1 = 1
something.prop1 = 2
Will give:
TypeError: read-only attribute 'prop1'
I am trying to override the __setattr__ method of a Python class, since I want to call another function each time an instance attribute changes its value. However, I don't want this behaviour in the __init__ method, because during this initialization I set some attributes which are going to be used later:
So far I have this solution, without overriding __setattr__ at runtime:
class Foo(object):
def __init__(self, a, host):
object.__setattr__(self, 'a', a)
object.__setattr__(self, 'b', b)
result = self.process(a)
for key, value in result.items():
object.__setattr__(self, key, value)
def __setattr__(self, name, value):
print(self.b) # Call to a function using self.b
object.__setattr__(self, name, value)
However, I would like to avoid these object.__setattr__(...) and override __setattr__ at the end of the __init__ method:
class Foo(object):
def __init__(self, a, b):
self.a = a
self.b = b
result = self.process(a)
for key, value in result.items():
setattr(self, key, value)
# override self.__setattr__ here
def aux(self, name, value):
print(self.b)
object.__setattr__(self, name, value)
I have tried with self.__dict__['__setitem__'] = self.aux and object.__setitem__['__setitem__'] = self.aux, but none of these attemps has effect. I have read this section of the data model reference, but it looks like the assignment of the own __setattr__ is a bit tricky.
How could be possible to override __setattr__ at the end of __init__, or at least have a pythonic solution where __setattr__ is called in the normal way only in the constructor?
Unfortunately, there's no way to "override, after init" python special methods; as a side effect of how that lookup works. The crux of the problem is that python doesn't actually look at the instance; except to get its class; before it starts looking up the special method; so there's no way to get the object's state to affect which method is looked up.
If you don't like the special behavior in __init__, you could refactor your code to put the special knowledge in __setattr__ instead. Something like:
class Foo(object):
__initialized = False
def __init__(self, a, b):
try:
self.a = a
self.b = b
# ...
finally:
self.__initialized = True
def __setattr__(self, attr, value):
if self.__initialzed:
print(self.b)
super(Foo, self).__setattr__(attr, value)
Edit: Actually, there is a way to change which special method is looked up, so long as you change its class after it has been initialized. This approach will send you far into the weeds of metaclasses, so without further explanation, here's how that looks:
class AssignableSetattr(type):
def __new__(mcls, name, bases, attrs):
def __setattr__(self, attr, value):
object.__setattr__(self, attr, value)
init_attrs = dict(attrs)
init_attrs['__setattr__'] = __setattr__
init_cls = super(AssignableSetattr, mcls).__new__(mcls, name, bases, init_attrs)
real_cls = super(AssignableSetattr, mcls).__new__(mcls, name, (init_cls,), attrs)
init_cls.__real_cls = real_cls
return init_cls
def __call__(cls, *args, **kwargs):
self = super(AssignableSetattr, cls).__call__(*args, **kwargs)
print "Created", self
real_cls = cls.__real_cls
self.__class__ = real_cls
return self
class Foo(object):
__metaclass__ = AssignableSetattr
def __init__(self, a, b):
self.a = a
self.b = b
for key, value in process(a).items():
setattr(self, key, value)
def __setattr__(self, attr, value):
frob(self.b)
super(Foo, self).__setattr__(attr, value)
def process(a):
print "processing"
return {'c': 3 * a}
def frob(x):
print "frobbing", x
myfoo = Foo(1, 2)
myfoo.d = myfoo.c + 1
#SingleNegationElimination's answer is great, but it cannot work with inheritence, since the child class's __mro__ store's the original class of super class. Inspired by his answer, with little change,
The idea is simple, switch __setattr__ before __init__, and restore it back after __init__ completed.
class CleanSetAttrMeta(type):
def __call__(cls, *args, **kwargs):
real_setattr = cls.__setattr__
cls.__setattr__ = object.__setattr__
self = super(CleanSetAttrMeta, cls).__call__(*args, **kwargs)
cls.__setattr__ = real_setattr
return self
class Foo(object):
__metaclass__ = CleanSetAttrMeta
def __init__(self):
super(Foo, self).__init__()
self.a = 1
self.b = 2
def __setattr__(self, key, value):
print 'after __init__', self.b
super(Foo, self).__setattr__(key, value)
class Bar(Foo):
def __init__(self):
super(Bar, self).__init__()
self.c = 3
>>> f = Foo()
>>> f.a = 10
after __init__ 2
>>>
>>> b = Bar()
>>> b.c = 30
after __init__ 2