Classmethod Item Assignment - python

So I have essentially created a dictionary Class that uses classmethods for all of its magic methods that looks like this:
class ClassDict(object):
_items = {}
#classmethod
def __getitem__(cls, key):
return cls._items[key]
#classmethod
def __setitem__(cls, key, val):
cls._items[key] = val
#classmethod
def __len__(cls):
return len(cls._items)
#classmethod
def __delitem__(cls, key):
cls._items.__delitem__(key)
#classmethod
def __iter__(cls):
return iter(cls._items)
And so when I try to assign an item to it:
ClassDict['item'] = 'test'
I get an error saying TypeError: 'type' object does not support item assignment, but if I call the actual method, __setitem__ like so it works fine:
ClassDict.__setitem__('item', 'test')
And this also works:
ClassDict().__setitem__('item', 'test')
Is there anything I am doing wrong here that would prevent the first example from working? Is there any way I can fix this issue?

To get the behavior desired of being able to do:
ClassDict['item'] = 'test'
I had to implement the special methods as a metaclass instead as Martijn pointed out.
So my final implementation looks like this:
class MetaClassDict(type):
_items = {}
#classmethod
def __getitem__(cls, key):
return cls._items[key]
#classmethod
def __setitem__(cls, key, val):
cls._items[key] = val
#classmethod
def __len__(cls):
return len(cls._items)
#classmethod
def __delitem__(cls, key):
cls._items.__delitem__(key)
#classmethod
def __iter__(cls):
return iter(cls._items)
class ClassDict(object):
__metaclass__ = MetaClassDict

Related

How do I define setter, getter for dynamically added attributes

I have a class as follows:
class A:
def __init__(self):
pass
def add_attr(self, name):
setattr(self, name, 'something')
How do I define custom setter, getter for self.name? I cannot use __setattr__, __getattribute__ because that will change the behaviour of add_attr too.
EDIT: the users of this class will add arbitrary number of attributes with arbitrary names:
a = A()
a.add_attr('attr1')
a.add_attr('attr2')
I want custom behavior for only these user added attributes.
Building off #Devesh Kumar Singh’s answer, I would implement it in some way like this:
class A:
def __init__(self):
self.attrs = {}
def __setattr__(self, key, value):
if key in self.attrs:
self.set_attr(key, value)
else:
object.__setattr__(self, key, value)
def __getattribute__(self, key):
if key in self.__dict__.get(attrs, {}):
return self.__dict__['get_attr'](self, key)
return object.__getattribute__(self, key)
def get_attr(self, key):
r = self.attrs[key]
# logic
return r
def set_attr(self, key, value):
# logic
self.attrs[key] = value
def add_attr(self, key, value=None):
self.attrs[key] = value
add_attr is only used to initialise the variable the first time. You could also edit __setattr__ to set all new attributes in the self.attrs rather than self.__dict__
Custom getter and setter logic? That's what a property is made for. Usually these are used to magically mask function calls and make them look like attribute access
class MyDoubler(object):
def __init__(self, x):
self._x = x
#property
def x(self):
return x * 2
#x.setter
def x(self, value):
self._x = value
>>> md = MyDoubler(10)
>>> md.x
20
>>> md.x = 20
>>> md.x
40
>>> md._x
20
But there's no rule saying you can't abuse that power to add custom behavior to your getters and setters.
class A(object):
def __init__(self):
pass
#staticmethod
def default_getter_factory(name):
def default_getter(self):
return self.name
return default_getter
#staticmethod
def default_setter_factory(name):
def default_setter(self, value):
setattr(self, name, value)
return default_setter
def add_attr(self, name, getterfactory=None, setterfactory=None):
private_name = f"_{name}"
if getterfactory is None:
getterfactory = self.__class__.default_getter_factory
if setterfactory is None:
setterfactory = self.__class__.default_setter_factory
getter, setter = getterfactory(private_name), setterfactory(private_name)
getter = property(getter)
setattr(self.__class__, name, getter)
setattr(self.__class__, name, getter.setter(setter))
That said this is all a bit silly, and chances are that whatever it is you're trying to do is a thing that shouldn't be done. Dynamic programming is all well and good, but if I were to review code that did this, I would think very long and hard about alternative solutions before approving it. This reeks of technical debt to me.
One possibility I could think of is to have a dictionary of dynamic attributes, and set and get the dynamic attributes using the dictionary
class A:
def __init__(self):
#Dictionary of attributes
self.attrs = {}
#Set attribute
def set_attr(self, name):
self.attrs[name] = 'something'
#Get attribute
def get_attr(self, name):
return self.attrs.get(name)
a = A()
a.set_attr('var')
print(a.get_attr('var'))
The output will be something
Or an alternate is to use property decorator to add arguments explicitly outside the class, as described here
class A:
def __init__(self):
pass
a = A()
#Add attributes via property decorator
a.attr_1 = property(lambda self: self.attr_1)
a.attr_2 = property(lambda self: self.attr_2)
#Assign them values and print them
a.attr_1 = 4
a.attr_2 = 6
print(a.attr_1, a.attr_2)
The output will be 4 6
I am gonna answer my own question just for reference. This is based on others' answers here. The idea is to use default __setattr__ and __getattribute__ on attributes not added through add_attr.
class A:
def __init__(self):
self.attrs = {}
def add_attr(self, name):
self.attrs[name] = 'something'
def __getattribute__(self, name):
try:
object.__getattribute__(self, 'attrs')[name] # valid only if added by user
# custom logic and return
except (KeyError, AttributeError):
return object.__getattribute__(self, name)
def __setattr__(self, name, val):
# similar to __getattribute__

Calling property setter using event handler

I use following class to define event:
class Event(object):
def __init__(self):
self.handlers = set()
def handle(self, handler):
self.handlers.add(handler)
return self
def unhandle(self, handler):
try:
self.handlers.remove(handler)
except:
raise ValueError("Handler is not handling this event, so cannot unhandle it.")
return self
def fire(self, *args, **kwargs):
for handler in self.handlers:
print(handler)
handler(*args, **kwargs)
def getHandlerCount(self):
return len(self.handlers)
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = getHandlerCount
I have some model class defined like this:
class SomeModel(object):
def __init__(self):
self._foo = 0
self.fooChanged = Event()
#property
def foo(self):
return self._foo
#foo.setter
def foo(self, value):
self._foo = value
self.fooChanged(value)
Now, suppose that I want to change foo like this:
model = SomeModel()
other_model = SomeModel()
model.fooChanged += other_model.foo
model.foo = 1
After model.foo = 1, I get following error:
TypeError: 'int' object is not callable
Now, suppose that I use this code for defining model:
class SomeModel(object):
def __init__(self):
self._foo = 0
self.fooChanged = Event()
def get_foo(self):
return self._foo
def set_foo(self, value):
self._foo = value
self.fooChanged(value)
foo = property(get_foo, set_foo)
and this code to change the value of foo:
model = SomeModel()
other_model = SomeModel()
model.fooChanged += other_model.set_foo
model.foo = 1
Second version works fine, however, it seems little un-Pythonic to me. I have to define get_foo method, which I'd like to avoid (since properties are available). Is there some other workaround here, so first version of code could run?
Note: error will depend on self._foo type. If it's None, it will return error stating that NoneType is not callable, if it's string, error will state that str object is not callable.
After a lot of digging, I found this answer to be very informative and it pushed me in the right direction.
Using this knowledge, I was able to solve this problem by using:
model.fooChanged += lambda value: type(other_model).foo.__set__(other_model, value)
or
model.fooChanged += lambda value: type(other_model).foo.fset(other_model, value)
The later line looks more Pythonic to me, since no calls for double-underscore functions are made.
while you write model.fooChanged += other_model.foo, I guess what you actually want is its setter method, but as other_model.foo is a property object, you have to get from its class other_model.__class__.foo.fset, write as:
model.fooChanged += lambda value: other_model.__class__.foo.fset(other_model, value)
OTOH, I think your second version is pythonic to me, as:
Explicit is better than implicit.

Proxy class that isn't yet defined

Consider a registry with a dict-like interface. Each key is a string name and each value is a class. Using it in this order works:
registry['foo'] = FooClass
cls = registry['foo']
instance = cls
But in this order it wouldn't of course:
cls = registry['foo']
registry['foo'] = FooClass
instance = cls()
To support the second use-case, I implemented a class constructor wrapper in a function but it "denaturates" the class. I mean that this won't work:
cls = registry['foo']
registry['foo'] = FooClass
issubclass(cls, FooClass)
I'd like to support that third case, so I'm looking for a better way to proxy the class registry items.
Interesting problem, I would try something like this:
from abc import ABCMeta
class Registry(object):
def __init__(self):
self._proxies = {}
self._classes = {}
def resolve(self, name):
try:
return self._classes[name]
except KeyError:
raise KeyError('Cannot resolve "%s".'
' Class not registered yet.' % name)
def __getitem__(self, name):
"""Return a proxy class bound to `name`."""
if name not in self._proxies:
self._proxies[name] = make_proxy(lambda: self.resolve(name))
return self._proxies[name]
def __setitem__(self, name, val):
"""Store a class for `name`."""
self._classes[name] = val
def make_proxy(resolve):
"""
Return a proxy class.
:param resolve: a function that returns the actual class
"""
class ProxyMeta(ABCMeta):
"""
Custom meta class based on ABCMeta that forwards various checks
to the resolved class.
"""
def __eq__(self, y):
return resolve() == y
def __repr__(self):
return repr(resolve())
def __str__(self):
return str(resolve())
class Proxy(object):
"""
The actual proxy class.
"""
__metaclass__ = ProxyMeta
def __new__(cls, *args, **kwargs):
"""Calling this class returns an instance of the resolved class."""
return resolve()(*args, **kwargs)
#classmethod
def __subclasshook__(cls, subclass):
"""issubclass() overwrite."""
return issubclass(resolve(), subclass)
return Proxy
>>> registry = Registry()
>>> List = registry['list']
>>> List
KeyError: 'Cannot resolve "list". Class not registered yet.'
>>> registry['list'] = list
>>> List
<type 'list'>
>>> issubclass(List, List)
True
>>> issubclass(list, List)
True
>>> List == list
True
>>> List()
[]
>>> registry['list'] = tuple
>>> List()
()

Implementing Python persistent properties

In a class, I want to define N persistent properties. I can implement them as follow:
#property
def prop1(self):
return self.__prop1
#prop1.setter
def prop1(self, value):
self.__prop1 = value
persistenceManagement()
#property
def prop2(self):
return self.__prop2
#prop2.setter
def prop2(self, value):
self.__prop2 = value
persistenceManagement()
[...]
#property
def propN(self):
return self.__propN
#propN.setter
def propN(self, value):
self.__propN = value
persistenceManagement()
Of course, the only different thing between these blocks is the property name (prop1, prop2, ..., propN). persistenceManagement() is a function that has to be called when the value of one of these property changes.
Since these blocks of code are identical except for a single information (i.e., the property name), I suppose there must be some way to replace each of these blocks by single lines declaring the existence of a persistent property with a given name. Something like
def someMagicalPatternFunction(...):
[...]
someMagicalPatternFunction("prop1")
someMagicalPatternFunction("prop2")
[...]
someMagicalPatternFunction("propN")
...or maybe some decorating trick that I cannot see at the moment. Is someone has an idea how this could be done?
Properties are just descriptor classes and you can create your own and use them:
class MyDescriptor(object):
def __init__(self, name, func):
self.func = func
self.attr_name = '__' + name
def __get__(self, instance, owner):
return getattr(self, self.attr_name)
def __set__(self, instance, value):
setattr(self, self.attr_name, value)
self.func(self.attr_name)
def postprocess(attr_name):
print 'postprocess called after setting', attr_name
class Example(object):
prop1 = MyDescriptor('prop1', postprocess)
prop2 = MyDescriptor('prop2', postprocess)
obj = Example()
obj.prop1 = 'answer' # prints 'postprocess called after setting __prop1'
obj.prop2 = 42 # prints 'postprocess called after setting __prop2'
Optionally you can make it a little easier to use with something like this:
def my_property(name, postprocess=postprocess):
return MyDescriptor(name, postprocess)
class Example(object):
prop1 = my_property('prop1')
prop2 = my_property('prop2')
If you like the decorator # syntax, you could do it this way (which also alleviates having to type the name of the property twice) -- however the dummy functions it requires seem a little weird...
def my_property(method):
name = method.__name__
return MyDescriptor(name, postprocess)
class Example(object):
#my_property
def prop1(self): pass
#my_property
def prop2(self): pass
The property class (yes it's a class) is just one possible implementation of the descriptor protocol (which is fully documented here: http://docs.python.org/2/howto/descriptor.html). Just write your own custom descriptor and you'll be done.

How to use __setattr__ correctly, avoiding infinite recursion

I want to define a class containing read and write methods, which can be called as follows:
instance.read
instance.write
instance.device.read
instance.device.write
To not use interlaced classes, my idea was to overwrite the __getattr__ and __setattr__ methods and to check, if the given name is device to redirect the return to self. But I encountered a problem giving infinite recursions. The example code is as follows:
class MyTest(object):
def __init__(self, x):
self.x = x
def __setattr__(self, name, value):
if name=="device":
print "device test"
else:
setattr(self, name, value)
test = MyTest(1)
As in __init__ the code tried to create a new attribute x, it calls __setattr__, which again calls __setattr__ and so on. How do I need to change this code, that, in this case, a new attribute x of self is created, holding the value 1?
Or is there any better way to handle calls like instance.device.read to be 'mapped' to instance.read?
As there are always questions about the why: I need to create abstractions of xmlrpc calls, for which very easy methods like myxmlrpc.instance,device.read and similar can be created. I need to 'mock' this up to mimic such multi-dot-method calls.
You must call the parent class __setattr__ method:
class MyTest(object):
def __init__(self, x):
self.x = x
def __setattr__(self, name, value):
if name=="device":
print "device test"
else:
super(MyTest, self).__setattr__(name, value)
# in python3+ you can omit the arguments to super:
#super().__setattr__(name, value)
Regarding the best-practice, since you plan to use this via xml-rpc I think this is probably better done inside the _dispatch method.
A quick and dirty way is to simply do:
class My(object):
def __init__(self):
self.device = self
Or you can modify self.__dict__ from inside __setattr__():
class SomeClass(object):
def __setattr__(self, name, value):
print(name, value)
self.__dict__[name] = value
def __init__(self, attr1, attr2):
self.attr1 = attr1
self.attr2 = attr2
sc = SomeClass(attr1=1, attr2=2)
sc.attr1 = 3
You can also use object.
class TestClass:
def __init__(self):
self.data = 'data'
def __setattr__(self, name, value):
print("Attempt to edit the attribute %s" %(name))
object.__setattr__(self, name, value)
or you can just use #property:
class MyTest(object):
def __init__(self, x):
self.x = x
#property
def device(self):
return self
If you don't want to specify which attributes can or cannot be set, you can split the class to delay the get/set hooks until after initialization:
class MyTest(object):
def __init__(self, x):
self.x = x
self.__class__ = _MyTestWithHooks
class _MyTestWithHooks(MyTest):
def __setattr__(self, name, value):
...
def __getattr__(self, name):
...
if __name__ == '__main__':
a = MyTest(12)
...
As noted in the code you'll want to instantiate MyTest, since instantiating _MyTestWithHooks will result in the same infinite recursion problem as before.

Categories

Resources