I want to make a data object:
class GameData:
def __init__(self, data={}):
self.data = data
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __getattr__(self, item):
return self.data[item]
def __setattr__(self, key, value):
self.data[kay] = value
def __repr__(self):
return str(self.data)
When I create a GameData object, I get RecursionError. How can I avoid setitem recall itself?
In the assignment self.data = data, __setattr__ is called because self has no attribute called data at the moment. __setattr__ then calls __getattr__ to obtain the non-existing attribute data. __getattr__ itself calls __getattr__ again. This is a recursion.
Use object.__setattr__(self, 'data', data) to do the assignment when implementing __setattr__.
class GameData:
def __init__(self, data=None):
object.__setattr__(self, 'data', {} if data is None else data)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __getattr__(self, item):
return self.data[item]
def __setattr__(self, key, value):
self.data[key] = value
def __repr__(self):
return str(self.data)
For details, see the __getattr__ manual
Additionally, do not use mutable objects as default parameter because the same object {} in the default argument is shared between GameData instances.
Related
I am using a class (MainClass) over which I have no control. I want to base my class on MainClass but to add extra functionality. I have added an attribute (index) to my class (SuperClass), but when I try convert index to a property, the #.setter seems to be ignored. What is wrong here?
class MainClass(object):
def __init__(self):
self.name = 'abc'
class SuperClass(object):
def __init__(self, main, *args, **kwargs):
super(SuperClass, self).__init__(*args, **kwargs)
self.__main = main
self._index = 0
def __getattr__(self, attr):
return getattr(self.__main, attr)
def __setattr__(self, attr, val):
if attr == '_SuperClass__main':
object.__setattr__(self, attr, val)
return setattr(self.__main, attr, val)
#property
def index(self):
return self._index
#index.setter
def index(self, value):
self._index = value
main_object = MainClass()
super_object = SuperClass(main_object)
print('x', super_object.index, super_object.name)
super_object.index = 3
print('y', super_object.index)
super_object.index += 2
print('z', super_object.index)
__getattr__ is only used when the normal lookup mechanism fails.
__setattr__, however, is called for all attempts to set an attribute. This means your current definition creates an attribute named index on the
MainClass instance, rather than accessing the property's setter.
>>> super_object._SuperClass__main.index
2
Because __setattr__ always calls setattr(self.__main, attr, val), += is effectively treated as =.
__setattr__ has to handle three cases:
The attribute _SuperClass__main itself, for when you assign to self.__main in __init__.
Assignments to attributes that exist on self.__main
Assignments to attributes specific to SuperClass.
With that in mind, try
def __setattr__(self, attr, val):
if attr == '_SuperClass__main':
super().__setattr__(attr, val)
elif hasattr(self.__main, attr):
setattr(self.__main, attr, val)
else:
super().__setattr__(attr, val)
The __setattr__ method you have defined is taking precedence over the #index.setter
Simplify the code and it should work:
class MainClass(object):
def __init__(self):
self.name = 'abc'
class SuperClass(object):
def __init__(self, main, *args, **kwargs):
super(SuperClass, self).__init__(*args, **kwargs)
self.__main = main
self._index = 0
#property
def name(self):
return self.__main.name
#name.setter
def name(self):
return self.__main.name
#property
def index(self):
return self._index
#index.setter
def index(self, value):
self._index = value
main_object = MainClass()
super_object = SuperClass(main_object)
print('x', super_object.index, super_object.name)
super_object.index = 3
print('y', super_object.index)
super_object.index += 2
print('z', super_object.index)
Output:
x 0 abc
y 3
z 5
I would also suggest the simpler option of just inheriting from MainClass instead of using composition and delegation:
class SuperClass(MainClass):
def __init__(self):
super().__init__()
self._index = 0
#property
def index(self):
return self._index
#index.setter
def index(self, value):
self._index = value
Read the Python Cookbook and saw descriptors, particularly the example for enforcing types when using class attributes. I am writing a few classes where that would be useful, but I would also like to enforce immutability. How to do it? Type checking descriptor adapted from the book:
class Descriptor(object):
def __init__(self, name=None, **kwargs):
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
def __set__(self, instance, value):
instance.__dict__[self.name] = value
# by default allows None
class Typed(Descriptor):
def __init__(self, expected_types=None, **kwargs):
self.expected_types = expected_types
super().__init__(**kwargs)
def __set__(self, instance, value):
if value is not None and not isinstance(value, self.expected_types):
raise TypeError('Expected: {}'.format(str(self.expected_types)))
super(Typed, self).__set__(instance, value)
class T(object):
v = Typed(int)
def __init__(self, v):
self.v = v
Attempt #1: add a self.is_set attribute to Typed
# by default allows None
class ImmutableTyped(Descriptor):
def __init__(self, expected_types=None, **kwargs):
self.expected_types = expected_types
self.is_set = False
super().__init__(**kwargs)
def __set__(self, instance, value):
if self.is_set:
raise ImmutableException(...)
if value is not None and not isinstance(value, self.expected_types):
raise TypeError('Expected: {}'.format(str(self.expected_types)))
self.is_set = True
super(Typed, self).__set__(instance, value)
Wrong, because when doing the following, ImmutableTyped is 'global' in the sense that it's a singleton throughout all instances of the class. When t2 is instantiated, is_set is already True from the previous object.
class T(object):
v = ImmutableTyped(int)
def __init__(self, v):
self.v = v
t1 = T()
t2 = T() # fail when instantiating
Attempt #2: Thought instance in __set__ refers to the class containing the attribute so tried to check if instance.__dict__[self.name] is still a Typed. That is also wrong.
Idea #3: Make Typed be used more similar to #property by accepting a 'fget' method returning the __dict__ of T instances. This would require the definition of a function in T similar to:
#Typed
def v(self):
return self.__dict__
which seems wrong.
How to implement immutability AND type checking as a descriptor?
Now this is my approach to the problem:
class ImmutableTyped:
def __set_name__(self, owner, name):
self.name = name
def __init__(self, *, immutable=False, types=None)
self.immutable == immutable is True
self.types = types if types else []
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, value):
if self.immutable is True:
raise TypeError('read-only attribute')
elif not any(isinstance(value, cls)
for cls in self.types):
raise TypeError('invalid argument type')
else:
instance.__dict__[self.name] = value
Side note: __set_name__ can be used to allow you to not specify the attribute name in initialisation. This means you can just do:
class Foo:
bar = ImmutableTyped()
and the instance of ImmutableTyped will automatically have the name attribute bar since I typed for that to occur in the __set_name__ method.
Could not succeed in making such a descriptor. Perhaps it's also unnecessarily complicated. The following method + property use suffices.
# this also allows None to go through
def check_type(data, expected_types):
if data is not None and not isinstance(data, expected_types):
raise TypeError('Expected: {}'.format(str(expected_types)))
return data
class A():
def __init__(self, value=None):
self._value = check_type(value, (str, bytes))
#property
def value(self):
return self._value
foo = A()
print(foo.value) # None
foo.value = 'bla' # AttributeError
bar = A('goosfraba')
print(bar.value) # goosfraba
bar.value = 'bla' # AttributeError
class ImmutableTyped(object):
def __set_name__(self, owner, name):
self.name = name
def __init__(self, *, types=None):
self.types = tuple(types or [])
self.instances = {}
return None
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, value):
is_set = self.instances.setdefault(id(instance), False)
if is_set:
raise AttributeError("read-only attribute '%s'" % (self.name))
if self.types:
if not isinstance(value, self.types):
raise TypeError("invalid argument type '%s' for '%s'" % (type(value), self.name))
self.instances[id(instance)] = True
instance.__dict__[self.name] = value
return None
Examples:
class Something(object):
prop1 = ImmutableTyped(types=[int])
something = Something()
something.prop1 = "1"
Will give:
TypeError: invalid argument type '<class 'str'>' for 'prop1'
And:
something = Something()
something.prop1 = 1
something.prop1 = 2
Will give:
TypeError: read-only attribute 'prop1'
If I have a dictionary of several Object:value,, How can I retrieve certain Object using it as [key]?
For example
class Obj():
def __init__(self, value):
self.value = value
dct = {Obj(foo):foo_value, Obj(bar):bar_value}
#How to do something like
#>>> dct[foo]
#foo_value
Suppose that foo_value can't be aasigned as property of Obj.
So far, this is what I get (abstracted)
class Obj():
def __init__(self, name):
self.name = name
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __repr__(self):
return str(self.name)
dct = {Obj('item1'):1, Obj('item2'):2}
print(dct.keys())
dct['item1']
And the output
dict_keys([item1, item2])
Traceback (most recent call last):
File "C:\Users\ivan\Desktop\multi_e.py", line 197, in <module>
dct['item1']
KeyError: 'item1'
What about using a custom implementation of dict?
class FieldDict(dict):
def __getitem__(self, item):
return dict.__getitem__(self, Obj(item))
dct = FieldDict({Obj('item1'):1, Obj('item2'):2})
print(dct.keys())
print(dct['item1']) # prints 1
That will not work as the keys are not the strings, but they are objects of type Obj. Even though all the objects hold that string
You could do this. Store variables as references to objects as keys
x = Obj("item1")
y = Obj("item2")
dct= {x:1, y:2}
And to retrieve you need to do:
>>> dct[x]
1
You could roll-out your own dict subclass with custom __getitem__ and __setitem__ and you really don't need any complicated __hash__ method on your Obj class after this, just __init__ will suffice.
class Obj:
def __init__(self, value):
self.name = value
class MyDict(dict):
def __setitem__(self, key, value):
if isinstance(key, Obj):
dict.__setitem__(self, key.name, value)
else:
dict.__setitem__(self, key, value)
def __getitem__(self, key):
if isinstance(key, Obj):
return dict.__getitem__(self, key.name)
return dict.__getitem__(self, key)
Demo:
>>> dct = MyDict()
>>> dct[Obj('item1')] = 1
>>> dct[Obj('item2')] = 2
>>> dct
{'item1': 1, 'item2': 2}
>>> dct['item1']
1
>>> dct[Obj('item1')]
1
I was a trying for a little bit time, but I think I got what you want, look:
edit thanks to #user2357112
class Obj():
def __init__(self, value):
self.value = value
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
return self.value == other.value
return False
def __ne__(self, other):
"""Define a non-equality test"""
return not self.__eq__(other)
def __hash__(self):
return id(self.value)
class Custom_dict(dict):
def __getitem__(self, item):
return dict.__getitem__(self, Obj(item))
x = Custom_dict()
x[Obj('asdf')] = 5
print(x['asdf'])
dct = Custom_dict({Obj('item1'):1, Obj('item2'):2})
print([key.value for key in dct.keys()])
print(dct['item1'])
5
['item1', 'item2']
1
I have two classes A and B, where B inherits from A and overrides a property. A is not under my control so I cannot change it.
The code looks as follows:
class A():
def __init__(self, value):
self.value = value
#property
def value(self):
return self._value
#value.setter
def value(self, value):
self._value = value
class B(A):
def __init__(self, value):
super(B, self).__init__(value)
#property
def value(self):
return super(B, self).value
#value.setter
def value(self, value):
raise AttributeError("can't set attribute")
When I try to call B(1) I obviously get AttributeError: can't set attribute.
I would like to have a different behaviour when value is set from inside class methods
#value.setter
def value(self, value):
if set from inside class hierarchy:
pass
else:
raise AttributeError("can't set attribute")
The module inspect does not seem to give me enough information to do this, except checking against a list of known functions.
You can inspect the stack to determine who called, and whether that it's in the class hierarchy to decide whether or not to allow it:
import inspect
def who_called():
frame = inspect.stack()[2][0]
if 'self' not in frame.f_locals:
return None, None
cls = frame.f_locals['self'].__class__
method = frame.f_code.co_name
return cls, method
class A(object):
def __init__(self, value):
self._value = value
#property
def value(self):
return self._value
#value.setter
def value(self, value):
self._value = value
# Assuming this existed it would also work
def change_value(self, value):
self.value = value
Class B now checking:
class B(A):
def __init__(self, value):
super(B, self).__init__(value)
#property
def value(self):
return super(B, self).value
#value.setter
def value(self, value):
cls, method = who_called()
if cls in B.__mro__ and method in A.__dict__:
self._value = value
else:
raise AttributeError("can't set attribute")
Proof:
b = B('does not raise error')
b.change_value('does not raise error')
b.value = 'raises error'
You could use the code that made the call to determine whether the call came from inside the class. Only throw an exception if the call didn't start with self.value =.
import re
import traceback
class A(object):
def __init__(self, value):
self.value = value
#property
def value(self):
return self._value
#value.setter
def value(self, value):
self._value = value
class B(A):
def __init__(self, value):
super(B, self).__init__(value)
#property
def value(self):
return super(B, self).value
#value.setter
def value(self, value):
call = traceback.extract_stack(limit=2)[0][3]
if re.match(r'self.value\s*=', call):
pass
else:
raise AttributeError("can't set attribute")
b = B(1) # OK
b.value = 3 # Exception
Of course, this breaks as soon as you start calling your variables self:
self = B(1) # OK
self.value = 3 # Meh, doesn't fail
New to python...
I have the following class Key, that extends dict:
class Key( dict ):
def __init__( self ):
self = { some dictionary stuff... }
def __getstate__(self):
state = self.__dict__.copy()
return state
def __setstate__(self, state):
self.__dict__.update( state )
I want to save an instance of the class with its data using pickle.dump and then retrieve the data using pickle.load. I understand that I am supposed to somehow change the getstate and the setstate, however, am not entirely clear on how I am supposed to do that... any help would be greatly appreciated!
I wrote a subclass of dict that does this here it is.
class AttrDict(dict):
"""A dictionary with attribute-style access. It maps attribute access to
the real dictionary. """
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def __getstate__(self):
return self.__dict__.items()
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
return super(AttrDict, self).__getitem__(name)
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
return AttrDict(self)
It basically converts the state to a basic tuple, and takes that back again to unpickle.
But be aware that you have to have to original source file available to unpickle. The pickling does not actually save the class itself, only the instance state. Python will need the original class definition to re-create from.