Python how to get __qualname__ of method wrapped with property.setter - python

I have an instance attribute that I made a property using Python's property decorator.
I then made a setter for the property using the decorator #property_name.setter.
How can I get the __qualname__ of the original method definition, decorated with #property.setter?
Where I Have Looked
Python: __qualname__ of function with decorator
I don't think property uses #functools.wraps()
Python #property.setter
I realize property is actually a descriptor
Decorating a class method after #property
Tells me I may want to use __get__, but I can't figure out the syntax
Example Code
This was written in Python 3.6.
#!/usr/bin/env python3
def print_qualname():
"""Wraps a method, printing its qualified name."""
def print_qualname_decorator(func):
# print(f"func = {func} and dir(): {dir(func)}")
if hasattr(func, "__qualname__"):
print(f"Qualified name = {func.__qualname__}.")
else:
print("Doesn't have qualified name.")
return print_qualname_decorator
class SomeClass:
def __init__(self):
self._some_attr = 0
self._another_attr = 0
#property
def some_attr(self) -> int:
return self._some_attr
#print_qualname()
#some_attr.setter
def some_attr(self, val: int) -> None:
self._some_attr = val
#print_qualname()
def get_another_attr(self) -> int:
return self._another_attr
Output:
Doesn't have qualified name.
Qualified name = SomeClass.get_another_attr.
How can I get the __qualname__ for some_attr from inside the print_qualname decorator? In other words, how do I get SomeClass.some_attr to be output?

You could flip the ordering of the decorators for the setter. Note I've adjusted the print_qualname_decorator to call the underlying function and return it (otherwise the setter will not execute).
from functools import wraps
def print_qualname(func):
"""Wraps a method, printing its qualified name."""
#wraps(func)
def print_qualname_decorator(*args):
if hasattr(func, "__qualname__"):
print(f"Qualified name = {func.__qualname__}.")
else:
print("Doesn't have qualified name.")
return func(*args)
return print_qualname_decorator
class SomeClass:
def __init__(self):
self._some_attr = 0
self._another_attr = 0
#property
def some_attr(self) -> int:
return self._some_attr
#some_attr.setter
#print_qualname
def some_attr(self, val: int) -> None:
self._some_attr = val
#print_qualname
def get_another_attr(self) -> int:
return self._another_attr
Use
In [46]: foo = SomeClass()
In [47]: foo.get_another_attr()
Qualified name = SomeClass.get_another_attr.
Out[47]: 0
In [48]: foo.some_attr = 5
Qualified name = SomeClass.some_attr.
In [49]: foo._some_attr
Out[49]: 5

Related

ClassVar for variable type defined on __init__subclass__

class School:
def __init__(self) -> None:
self.number = 0
def test(self) -> None:
self.number = 0
class Sophism(School):
def test(self) -> None:
self.number = 1
class Epicureanism(School):
def test(self) -> None:
self.number = 2
PhilosophySchool = TypeVar("PhilosophySchool", bound=School)
class Philosopher:
school: ClassVar[PhilosophySchool] # Type variable "PhilosophySchool" is unbound [valid-type]
def __init_subclass__(cls, /, school: type[PhilosophySchool], **kwargs: object) -> None:
super().__init_subclass__(**kwargs)
cls.school = school()
class Sophist(Philosopher, school=Sophism):
pass
s1 = Sophist()
s2 = Sophist()
s1.school.test() # PhilosophySchool? has no attribute "test"
s1.school.number == s2.school.number == Sophist.school.number # True # PhilosophySchool? has no attribute "number"
s1.school == s2.school == Sophist.school # True # Unsupported left operand type for == (PhilosophySchool?)
I am trying to make a class that automatically instantiates some properties on definition. I get multiple warnings from mypy, but I cannot understand any of them, because this code works in the interpreter.
How can I tell mypy that Philosopher's "school" variable, which I define on subclassing, is always a subclass of School, the very same subclass that I pass on school=Sophism?
In the last line, s.school.test(), mypy cannot even tell that school is an instance of Sophism(), not School(), and somehow it thinks it doesn't have test nor number, despite School itself having them defined.
There are a copious number of problems when trying to link a metaclass to an instance, which I believe is due to implementation immaturity of metaclass type checking. For example, unlike typing.ClassVars on a class (which are accessible from class instances), metaclass instance variables are not accessible from class instances in mypy:
from typing import TYPE_CHECKING
class M(type):
# *not* `ClassVar[int]`, which would definitely break at runtime
# if accessing from the class instance later
var: int
def __init__(
cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]
) -> None:
super().__init__(name, bases, namespace)
cls.var = 0
class C(metaclass=M):
pass
if TYPE_CHECKING:
reveal_type(C.var) # mypy: Revealed type is "builtins.int"
reveal_type(C().var) # mypy: "C" has no attribute "var" [attr-defined]
>>> print(f"{C.var=}")
C.var=0
>>> print(f"{C().var=}")
C().var=0
Instead of relying on things like __init_subclass__ or metaclass __init__ / __new__, which don't (yet?) transfer type variable genericity to classes due to implementation immaturity, I would just implement a read-only descriptor for Philosopher.school which is accessible from both the class and the instance:
from __future__ import annotations
import typing as t
if t.TYPE_CHECKING:
class _schooldescriptor:
def __init__(
self, func: t.Callable[[Philosopher[PhilosophySchool]], PhilosophySchool], /
) -> None: ...
# `obj: object` handles both `obj: None` (accessing descriptor from the class)
# and `obj: Philosopher[PhilosophySchool]` (accessing descriptor from the instance).
def __get__(
self, obj: object, class_: type[Philosopher[PhilosophySchool]], /
) -> PhilosophySchool: ...
# Do not implement `__set__` or `__delete__`; implementing
# these would mean the descriptor is no longer read-only.
Define your schools of thought as usual,
class School:
def __init__(self) -> None:
self.number = 0
def test(self) -> None:
self.number = 0
class Sophism(School):
def test(self) -> None:
self.number = 1
class Epicureanism(School):
def test(self) -> None:
self.number = 2
PhilosophySchool = t.TypeVar("PhilosophySchool", bound=School)
Then,
#_schooldescriptor can be used to imitate a read-only class property at type-checking time;
__class_getitem__ can be used to set the school instance at runtime. Despite the discouraging messages in the Python documentation, this override of __class_getitem__ still fulfils the primary purpose of resolving generic types - it just introduces an additional functionality on top.
class Philosopher(t.Generic[PhilosophySchool]):
if t.TYPE_CHECKING:
#_schooldescriptor
def school(self) -> PhilosophySchool:
...
else:
def __class_getitem__(
cls, school: type[PhilosophySchool], /
) -> type[Philosopher[PhilosophySchool]]:
ConcretePhilosopher: type[
Philosopher[PhilosophySchool]
] = super().__class_getitem__(school)
ConcretePhilosopher.school = school()
return ConcretePhilosopher
Choosing __class_getitem__ as the type resolver (instead of keyword arguments to __init_subclass__ or metaclass __init__ / __new__, which don't work) then allows the following syntactic sugar and complete type-safety when accessing school from any concrete Philsopher class or subclass, or any of their instances:
class Sophist(Philosopher[Sophism]):
pass
if t.TYPE_CHECKING:
reveal_type(Sophist.school) # mypy: revealed type is "Sophism"
reveal_type(Philosopher[Epicureanism].school) # mypy: revealed type is "Epicureanism"
>>> # No typing issues in any of the following!
>>> s1 = Sophist()
>>> s2 = Sophist()
>>> s1.school.test()
>>> s1.school.number == s2.school.number == Sophist.school.number
>>> s1.school == s2.school == Sophist.school
This gets you pretty close. The sticking point seems to be that you can't use a TypeVar as the parameter to a ClassVar -- the best workaround I can think of for that is to have a #classmethod that casts School to PhilosophySchool. (Sadly, #classproperty isn't a thing and the workarounds to that are really annoying so I won't try to replicate them here; having to call school as a method seems like the lesser evil.)
from typing import ClassVar, Generic, TypeVar, cast
class School:
def __init__(self) -> None:
self.number = 0
def test(self) -> None:
self.number = 0
class Sophism(School):
def test(self) -> None:
self.number = 1
class Epicureanism(School):
def test(self) -> None:
self.number = 2
PhilosophySchool = TypeVar("PhilosophySchool", bound=School)
class Philosopher(Generic[PhilosophySchool]):
__school: ClassVar[School]
#classmethod
def school(cls) -> PhilosophySchool:
return cast(PhilosophySchool, cls.__school)
def __init_subclass__(cls, /, school: type[PhilosophySchool], **kwargs: object) -> None:
super().__init_subclass__(**kwargs)
cls.__school = school()
class Sophist(Philosopher[Sophism], school=Sophism):
pass
s1 = Sophist()
s2 = Sophist()
s1.school().test()
assert s1.school().number == s2.school().number == Sophist.school().number
assert s1.school() == s2.school() == Sophist.school()
reveal_type(s1.school()) # revealed type is "Sophism"

How do I use same getter and setter properties and functions for different attributes of a class the pythonic way?

I've got this class that I'm working on that stores Employees details.
I want all attributes to be protected and be set and gotten with specific logic, but not all in a unique way. I would like the same logic to apply to my _f_name and to my _l_name attributes, I would like the same logic perhaps to be applied to attributes that take in booleans and other general cases.
I've got this for the first attribute:
#property
def f_name(self):
return self.f_name
#f_name.setter
def f_name(self, f_name):
if f_name != str(f_name):
raise TypeError("Name must be set to a string")
else:
self._f_name = self._clean_up_string(f_name)
#f_name.deleter
def available(self):
raise AttributeError("Can't delete, you can only change this value.")
How can I apply the same functions and properites to other attributes?
Thaaaanks!
While it may seem like defining a subclass of property is possible, too many details of how a particular property work is left to the getter and setter to define, meaning it's more straightforward to define a custom property-like descriptor.
class CleanableStringProperty:
def __set_name__(self, owner, name):
self._private_name = "_" + name
self.name = name
def __get__(self, obj, objtype=None):
# Boilerplate to handle accessing the property
# via a class, rather than an instance of the class.
if obj is None:
return self
return getattr(obj, self._private_name)
def __set__(self, obj, value):
if not isinstance(value, str):
raise TypeError(f'{self.name} value must be a str')
setattr(obj, self._private_name, obj._clean_up_string(value))
def __delete__(self, obj):
raise AttributeError("Can't delete, you can only change this value.")
__set_name__ constructs the name of the instance attribute that the getter and setter will use. __get__ acts as the getter, using getattr to retrieve the constructed attribute name from the given object. __set__ validates and modifies the value before using setattr to set the constructed attribute name. __del__ simply raises an attribute error, independent of whatever object the caller is trying to remove the attribute from.
Here's a simple demonstration which causes all values assigned to the descriptor to be put into title case.
class Foo:
f_name = CleanableStringProperty()
l_name = CleanableStringProperty()
def __init__(self, first, last):
self.f_name = first
self.l_name = last
def _clean_up_string(self, v):
return v.title()
f = Foo("john", "doe")
assert f.f_name == "John"
assert f.l_name == "Doe"
try:
del f.f_name
except AttributeError:
print("Prevented first name from being deleted")
It would also be possible for the cleaning function, rather than being somethign that obj is expected to provide, to be passed as an argument to CleanableStringProperty itself. __init__ and __set__ would be modified as
def __init__(self, cleaner):
self.cleaner = cleaner
def __set__(self, obj, value):
if not isinstance(value, str):
raise TypeError(f'{self.name} value must be a str')
setattr(obj, self._private_name, self.cleaner(value))
and the descriptor would be initialized with
class Foo:
fname = CleanableStringProperty(str.title)
Note that Foo is no longer responsible for providing a cleaning method.
A property is just an implementation of a descriptor, so to create a custom property, you need an object with a __get__, __set__, and/or __delete__ method.
In your case, you could do something like this:
from typing import Any, Callable, Tuple
class ValidatedProperty:
def __set_name__(self, obj, name):
self.name = name
self.storage = f"_{name}"
def __init__(self, validation: Callable[[Any], Tuple[str, Any]]=None):
"""Initializes a ValidatedProperty object
Args:
validation (Callable[[Any], Tuple[str, Any]], optional): A Callable that takes the given value and returns an error string (empty string if no error) and the cleaned-up value. Defaults to None.
"""
self.validation = validation
def __get__(self, instance, owner):
return getattr(instance, self.storage)
def __set__(self, instance, value):
if self.validation:
error, value = self.validation(value)
if error:
raise ValueError(f"Error setting property {self.name}: {error}")
setattr(instance, self.storage, value)
def __delete__(self, instance):
raise AttributeError("Can't delete, you can only change this value.")
Let's define an example class to use this:
class User:
def __name_validation(value):
if not isinstance(value, str):
return (f"Expected string value, received {type(value).__name__}", None)
return ("", value.strip().title())
f_name = ValidatedProperty(validation=__name_validation)
l_name = ValidatedProperty(validation=__name_validation)
def __init__(self, fname, lname):
self.f_name = fname
self.l_name = lname
and test:
u = User("Test", "User")
print(repr(u.f_name)) # 'Test'
u.f_name = 123 # ValueError: Error setting property f_name: Expected string value, received int
u.f_name = "robinson " # Notice the trailing space
print(repr(u.f_name)) # 'Robinson'
u.l_name = "crusoe "
print(repr(u.l_name)) # 'Crusoe'

Dataclasses and property decorator

I've been reading up on Python 3.7's dataclass as an alternative to namedtuples (what I typically use when having to group data in a structure). I was wondering if dataclass is compatible with the property decorator to define getter and setter functions for the data elements of the dataclass. If so, is this described somewhere? Or are there examples available?
It sure does work:
from dataclasses import dataclass
#dataclass
class Test:
_name: str="schbell"
#property
def name(self) -> str:
return self._name
#name.setter
def name(self, v: str) -> None:
self._name = v
t = Test()
print(t.name) # schbell
t.name = "flirp"
print(t.name) # flirp
print(t) # Test(_name='flirp')
In fact, why should it not? In the end, what you get is just a good old class, derived from type:
print(type(t)) # <class '__main__.Test'>
print(type(Test)) # <class 'type'>
Maybe that's why properties are nowhere mentioned specifically. However, the PEP-557's Abstract mentions the general usability of well-known Python class features:
Because Data Classes use normal class definition syntax, you are free
to use inheritance, metaclasses, docstrings, user-defined methods,
class factories, and other Python class features.
TWO VERSIONS THAT SUPPORT DEFAULT VALUES
Most published approaches don't provide a readable way to set a default value for the property, which is quite an important part of dataclass. Here are two possible ways to do that.
The first way is based on the approach referenced by #JorenV. It defines the default value in _name = field() and utilises the observation that if no initial value is specified, then the setter is passed the property object itself:
from dataclasses import dataclass, field
#dataclass
class Test:
name: str
_name: str = field(init=False, repr=False, default='baz')
#property
def name(self) -> str:
return self._name
#name.setter
def name(self, value: str) -> None:
if type(value) is property:
# initial value not specified, use default
value = Test._name
self._name = value
def main():
obj = Test(name='foo')
print(obj) # displays: Test(name='foo')
obj = Test()
obj.name = 'bar'
print(obj) # displays: Test(name='bar')
obj = Test()
print(obj) # displays: Test(name='baz')
if __name__ == '__main__':
main()
The second way is based on the same approach as #Conchylicultor: bypassing the dataclass machinery by overwriting the field outside the class definition.
Personally I think this way is cleaner and more readable than the first because it follows the normal dataclass idiom to define the default value and requires no 'magic' in the setter.
Even so I'd prefer everything to be self-contained... perhaps some clever person can find a way to incorporate the field update in dataclass.__post_init__() or similar?
from dataclasses import dataclass
#dataclass
class Test:
name: str = 'foo'
#property
def _name(self):
return self._my_str_rev[::-1]
#_name.setter
def _name(self, value):
self._my_str_rev = value[::-1]
# --- has to be called at module level ---
Test.name = Test._name
def main():
obj = Test()
print(obj) # displays: Test(name='foo')
obj = Test()
obj.name = 'baz'
print(obj) # displays: Test(name='baz')
obj = Test(name='bar')
print(obj) # displays: Test(name='bar')
if __name__ == '__main__':
main()
A solution with minimal additional code and no hidden variables is to override the __setattr__ method to do any checks on the field:
#dataclass
class Test:
x: int = 1
def __setattr__(self, prop, val):
if prop == "x":
self._check_x(val)
super().__setattr__(prop, val)
#staticmethod
def _check_x(x):
if x <= 0:
raise ValueError("x must be greater than or equal to zero")
An #property is typically used to store a seemingly public argument (e.g. name) into a private attribute (e.g. _name) through getters and setters, while dataclasses generate the __init__() method for you.
The problem is that this generated __init__() method should interface through the public argument name, while internally setting the private attribute _name.
This is not done automatically by dataclasses.
In order to have the same interface (through name) for setting values and creation of the object, the following strategy can be used (Based on this blogpost, which also provides more explanation):
from dataclasses import dataclass, field
#dataclass
class Test:
name: str
_name: str = field(init=False, repr=False)
#property
def name(self) -> str:
return self._name
#name.setter
def name(self, name: str) -> None:
self._name = name
This can now be used as one would expect from a dataclass with a data member name:
my_test = Test(name='foo')
my_test.name = 'bar'
my_test.name('foobar')
print(my_test.name)
The above implementation does the following things:
The name class member will be used as the public interface, but it actually does not really store anything
The _name class member stores the actual content. The assignment with field(init=False, repr=False) makes sure that the #dataclass decorator ignores it when constructing the __init__() and __repr__() methods.
The getter/setter for name actually returns/sets the content of _name
The initializer generated through the #dataclass will use the setter that we just defined. It will not initialize _name explicitly, because we told it not to do so.
Currently, the best way I found was to overwrite the dataclass fields by property in a separate child class.
from dataclasses import dataclass, field
#dataclass
class _A:
x: int = 0
class A(_A):
#property
def x(self) -> int:
return self._x
#x.setter
def x(self, value: int):
self._x = value
The class behave like a regular dataclass. And will correctly define the __repr__ and __init__ field (A(x=4) instead of A(_x=4). The drawback is that the properties cannot be read-only.
This blog post, tries to overwrite the wheels dataclass attribute by the property of the same name.
However, the #property overwrite the default field, which leads to unexpected behavior.
from dataclasses import dataclass, field
#dataclass
class A:
x: int
# same as: `x = property(x) # Overwrite any field() info`
#property
def x(self) -> int:
return self._x
#x.setter
def x(self, value: int):
self._x = value
A() # `A(x=<property object at 0x7f0cf64e5fb0>)` Oups
print(A.__dataclass_fields__) # {'x': Field(name='x',type=<class 'int'>,default=<property object at 0x>,init=True,repr=True}
One way solve this, while avoiding inheritance would be to overwrite the field outside the class definition, after the dataclass metaclass has been called.
#dataclass
class A:
x: int
def x_getter(self):
return self._x
def x_setter(self, value):
self._x = value
A.x = property(x_getter)
A.x = A.x.setter(x_setter)
print(A(x=1))
print(A()) # missing 1 required positional argument: 'x'
It should probably possible to overwrite this automatically by creating some custom metaclass and setting some field(metadata={'setter': _x_setter, 'getter': _x_getter}).
Here's what I did to define the field as a property in __post_init__. This is a total hack, but it works with dataclasses dict-based initialization and even with marshmallow_dataclasses.
from dataclasses import dataclass, field, asdict
#dataclass
class Test:
name: str = "schbell"
_name: str = field(init=False, repr=False)
def __post_init__(self):
# Just so that we don't create the property a second time.
if not isinstance(getattr(Test, "name", False), property):
self._name = self.name
Test.name = property(Test._get_name, Test._set_name)
def _get_name(self):
return self._name
def _set_name(self, val):
self._name = val
if __name__ == "__main__":
t1 = Test()
print(t1)
print(t1.name)
t1.name = "not-schbell"
print(asdict(t1))
t2 = Test("llebhcs")
print(t2)
print(t2.name)
print(asdict(t2))
This would print:
Test(name='schbell')
schbell
{'name': 'not-schbell', '_name': 'not-schbell'}
Test(name='llebhcs')
llebhcs
{'name': 'llebhcs', '_name': 'llebhcs'}
I actually started off from this blog post mentioned somewhere in this SO, but ran into the issue that the dataclass field was being set to type property because the decorator is applied to the class. That is,
#dataclass
class Test:
name: str = field(default='something')
_name: str = field(init=False, repr=False)
#property
def name():
return self._name
#name.setter
def name(self, val):
self._name = val
would make name to be of type property and not str. So, the setter will actually receive property object as the argument instead of the field default.
Some wrapping could be good:
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
#
# Copyright (C) 2020 Xu Siyuan <inqb#protonmail.com>
#
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
from dataclasses import dataclass, field
MISSING = object()
__all__ = ['property_field', 'property_dataclass']
class property_field:
def __init__(self, fget=None, fset=None, fdel=None, doc=None, **kwargs):
self.field = field(**kwargs)
self.property = property(fget, fset, fdel, doc)
def getter(self, fget):
self.property = self.property.getter(fget)
return self
def setter(self, fset):
self.property = self.property.setter(fset)
return self
def deleter(self, fdel):
self.property = self.property.deleter(fdel)
return self
def property_dataclass(cls=MISSING, / , **kwargs):
if cls is MISSING:
return lambda cls: property_dataclass(cls, **kwargs)
remembers = {}
for k in dir(cls):
if isinstance(getattr(cls, k), property_field):
remembers[k] = getattr(cls, k).property
setattr(cls, k, getattr(cls, k).field)
result = dataclass(**kwargs)(cls)
for k, p in remembers.items():
setattr(result, k, p)
return result
You can use it like this:
#property_dataclass
class B:
x: int = property_field(default_factory=int)
#x.getter
def x(self):
return self._x
#x.setter
def x(self, value):
self._x = value
Here's another way which allows you to have fields without a leading underscore:
from dataclasses import dataclass
#dataclass
class Person:
name: str = property
#name
def name(self) -> str:
return self._name
#name.setter
def name(self, value) -> None:
self._name = value
def __post_init__(self) -> None:
if isinstance(self.name, property):
self.name = 'Default'
The result is:
print(Person().name) # Prints: 'Default'
print(Person('Joel').name) # Prints: 'Joel'
print(repr(Person('Jane'))) # Prints: Person(name='Jane')
This method of using properties in dataclasses also works with asdict and is simpler too. Why? Fields that are typed with ClassVar are ignored by the dataclass, but we can still use them in our properties.
#dataclass
def SomeData:
uid: str
_uid: ClassVar[str]
#property
def uid(self) -> str:
return self._uid
#uid.setter
def uid(self, uid: str) -> None:
self._uid = uid
Ok, so this is my first attempt at having everything self-contained within the class.
I tried a couple different approaches, including having a class decorator right next to #dataclass above the class definition. The issue with the decorator version is that my IDE complains if I decide to use it, and then I lose most of the type hints that the dataclass decorator provides. For example, if I'm trying to pass a field name into the constructor method, it doesn't auto-complete anymore when I add a new class decorator. I suppose that makes sense since the IDE assumes a decorator overwrites the original definition in some important way, however that succeeded in convincing me not to try with the decorator approach.
I ended up adding a metaclass to update the properties associated with dataclass fields to check if the value passed to the setter is a property object as mentioned by a few other solutions, and that seems to be working well enough now. Either of the two approaches below should work for testing (based on #Martin CR's solution)
from dataclasses import dataclass, field
#dataclass
class Test(metaclass=dataclass_property_support):
name: str = property
_name: str = field(default='baz', init=False, repr=False)
#name
def name(self) -> str:
return self._name
#name.setter
def name(self, value: str) -> None:
self._name = value
# --- other properties like these should not be affected ---
#property
def other_prop(self) -> str:
return self._other_prop
#other_prop.setter
def other_prop(self, value):
self._other_prop = value
And here is an approach which (implicitly) maps the property _name that begins with an underscore to the dataclass field name:
#dataclass
class Test(metaclass=dataclass_property_support):
name: str = 'baz'
#property
def _name(self) -> str:
return self._name[::-1]
#_name.setter
def _name(self, value: str):
self._name = value[::-1]
I personally prefer the latter approach, because it looks a little cleaner in my opinion and also the field _name doesn't show up when invoking the dataclass helper function asdict for example.
The below should work for testing purposes with either of the approaches above. The best part is my IDE doesn't complain about any of the code either.
def main():
obj = Test(name='foo')
print(obj) # displays: Test(name='foo')
obj = Test()
obj.name = 'bar'
print(obj) # displays: Test(name='bar')
obj = Test()
print(obj) # displays: Test(name='baz')
if __name__ == '__main__':
main()
Finally, here is the definition for the metaclass dataclass_property_support that now seems to be working:
from dataclasses import MISSING, Field
from functools import wraps
from typing import Dict, Any, get_type_hints
def dataclass_property_support(*args, **kwargs):
"""Adds support for using properties with default values in dataclasses."""
cls = type(*args, **kwargs)
# the args passed in to `type` will be a tuple of (name, bases, dict)
cls_dict: Dict[str, Any] = args[2]
# this accesses `__annotations__`, but should also work with sub-classes
annotations = get_type_hints(cls)
def get_default_from_annotation(field_: str):
"""Get the default value for the type annotated on a field"""
default_type = annotations.get(field_)
try:
return default_type()
except TypeError:
return None
for f, val in cls_dict.items():
if isinstance(val, property):
public_f = f.lstrip('_')
if val.fset is None:
# property is read-only, not settable
continue
if f not in annotations and public_f not in annotations:
# adding this to check if it's a regular property (not
# associated with a dataclass field)
continue
try:
# Get the value of the field named without a leading underscore
default = getattr(cls, public_f)
except AttributeError:
# The public field is probably type-annotated but not defined
# i.e. my_var: str
default = get_default_from_annotation(public_f)
else:
if isinstance(default, property):
# The public field is a property
# Check if the value of underscored field is a dataclass
# Field. If so, we can use the `default` if one is set.
f_val = getattr(cls, '_' + f, None)
if isinstance(f_val, Field) \
and f_val.default is not MISSING:
default = f_val.default
else:
default = get_default_from_annotation(public_f)
def wrapper(fset, initial_val):
"""
Wraps the property `setter` method to check if we are passed
in a property object itself, which will be true when no
initial value is specified (thanks to #Martin CR).
"""
#wraps(fset)
def new_fset(self, value):
if isinstance(value, property):
value = initial_val
fset(self, value)
return new_fset
# Wraps the `setter` for the property
val = val.setter(wrapper(val.fset, default))
# Replace the value of the field without a leading underscore
setattr(cls, public_f, val)
# Delete the property if the field name starts with an underscore
# This is technically not needed, but it supports cases where we
# define an attribute with the same name as the property, i.e.
# #property
# def _wheels(self)
# return self._wheels
if f.startswith('_'):
delattr(cls, f)
return cls
Update (10/2021):
I've managed to encapsulate the above logic - including support for additional edge cases - into the helper library dataclass-wizard, in case this is of interest to anyone. You can find out more about using field properties in the linked documentation as well. Happy coding!
Update (11/2021):
A more performant approach is to use a metaclass to generate a __post_init__() on the class that only runs once to fix field properties so it works with dataclasses. You can check out the gist here which I added. I was able to test it out and when creating multiple class instances, this approach is optimized as it sets everything up properly the first time __post_init__() is run.
Following a very thorough post about data classes and properties that can be found here the TL;DR version which solves some very ugly cases where you have to call MyClass(_my_var=2) and strange __repr__ outputs:
from dataclasses import field, dataclass
#dataclass
class Vehicle:
wheels: int
_wheels: int = field(init=False, repr=False)
def __init__(self, wheels: int):
self._wheels = wheels
#property
def wheels(self) -> int:
return self._wheels
#wheels.setter
def wheels(self, wheels: int):
self._wheels = wheels
Just put the field definition after the property:
#dataclasses.dataclass
class Test:
#property
def driver(self):
print("In driver getter")
return self._driver
#driver.setter
def driver(self, value):
print("In driver setter")
self._driver = value
_driver: typing.Optional[str] =\
dataclasses.field(init=False, default=None, repr=False)
driver: typing.Optional[str] =\
dataclasses.field(init=False, default=driver)
>>> t = Test(1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes 1 positional argument but 2 were given
>>> t = Test()
>>> t._driver is None
True
>>> t.driver is None
In driver getter
True
>>> t.driver = "asdf"
In driver setter
>>> t._driver == "asdf"
True
>>> t
In driver getter
Test(driver='asdf')
I'm surprised this isn't already an answer but I question its wisdom. The only reason for this answer is to include the property in the representation - because the property's backing store (_driver) is already included in comparison tests and equality tests and so on. For example, this is a common idiom:
class Test:
def __init__(self):
self._driver = "default"
#property
def driver(self):
if self._driver == "default":
self._driver = "new"
return self._driver
>>> t = Test()
>>> t
<__main__.Test object at 0x6fffffec11f0>
>>> t._driver
'default'
>>> t.driver
'new'
Here is the dataclass equivalent - except that it adds the property to the representation. In the standard class, the result of (t._driver,t.driver) is ("default","new"). Notice that the result from the dataclass is instead ("new","new"). This is a very simple example but you must recognize that including properties with possible side effects in special methods may not be the best idea.
#dataclasses.dataclass
class Test:
#property
def driver(self):
print("In driver getter")
if self._driver == "default":
self._driver = "new"
return self._driver
_driver: typing.Optional[str] =\
dataclasses.field(init=False, default="default", repr=False)
driver: typing.Optional[str] =\
dataclasses.field(init=False, default=driver)
>>> t = Test()
>>> t
In driver getter
Test(driver='new')
>>> t._driver
'new'
>>> t.driver
In driver getter
'new'
So I would recommend just using:
#dataclasses.dataclass
class Test:
_driver: typing.Optional[str] =\
dataclasses.field(init=False, default="default", repr=False)
#property
def driver(self):
print("In driver getter")
if self._driver == "default":
self._driver = "new"
return self._driver
>>> t
Test()
>>> t._driver
'default'
>>> t.driver
In driver getter
'new'
And you can sidestep the entire issue, avoiding dataclasses for initialization, by simply using hasattr in the property getter.
#dataclasses.dataclass
class Test:
#property
def driver(self):
print("In driver getter")
if not hasattr(self, "_driver"):
self._driver = "new"
return self._driver
Or by using __post_init__:
#dataclasses.dataclass
class Test:
def __post_init__(self):
self._driver = None
#property
def driver(self):
print("In driver getter")
if self._driver is None:
self._driver = "new"
return self._driver
Why do this? Because init=False dataclass defaults are stored only on the class and not the instance.
From the ideas from above, I created a class decorator function resolve_abc_prop that creates a new class containing the getter and setter functions as suggested
by #shmee.
def resolve_abc_prop(cls):
def gen_abstract_properties():
""" search for abstract properties in super classes """
for class_obj in cls.__mro__:
for key, value in class_obj.__dict__.items():
if isinstance(value, property) and value.__isabstractmethod__:
yield key, value
abstract_prop = dict(gen_abstract_properties())
def gen_get_set_properties():
""" for each matching data and abstract property pair,
create a getter and setter method """
for class_obj in cls.__mro__:
if '__dataclass_fields__' in class_obj.__dict__:
for key, value in class_obj.__dict__['__dataclass_fields__'].items():
if key in abstract_prop:
def get_func(self, key=key):
return getattr(self, f'__{key}')
def set_func(self, val, key=key):
return setattr(self, f'__{key}', val)
yield key, property(get_func, set_func)
get_set_properties = dict(gen_get_set_properties())
new_cls = type(
cls.__name__,
cls.__mro__,
{**cls.__dict__, **get_set_properties},
)
return new_cls
Here we define a data class AData and a mixin AOpMixin implementing operations
on the data.
from dataclasses import dataclass, field, replace
from abc import ABC, abstractmethod
class AOpMixin(ABC):
#property
#abstractmethod
def x(self) -> int:
...
def __add__(self, val):
return replace(self, x=self.x + val)
Finally, the decorator resolve_abc_prop is then used to create a new class
with the data from AData and the operations from AOpMixin.
#resolve_abc_prop
#dataclass
class A(AOpMixin):
x: int
A(x=4) + 2 # A(x=6)
EDIT #1: I created a python package that makes it possible to overwrite abstract properties with a dataclass: dataclass-abc
After trying different suggestions from this thread I've come with a little modified version of #Samsara Apathika answer. In short: I removed the "underscore" field variable from the __init__ (so it is available for internal use, but not seen by asdict() or by __dataclass_fields__).
from dataclasses import dataclass, InitVar, field, asdict
#dataclass
class D:
a: float = 10. # Normal attribut with a default value
b: InitVar[float] = 20. # init-only attribute with a default value
c: float = field(init=False) # an attribute that will be defined in __post_init__
def __post_init__(self, b):
if not isinstance(getattr(D, "a", False), property):
print('setting `a` to property')
self._a = self.a
D.a = property(D._get_a, D._set_a)
print('setting `c`')
self.c = self.a + b
self.d = 50.
def _get_a(self):
print('in the getter')
return self._a
def _set_a(self, val):
print('in the setter')
self._a = val
if __name__ == "__main__":
d1 = D()
print(asdict(d1))
print('\n')
d2 = D()
print(asdict(d2))
Gives:
setting `a` to property
setting `c`
in the getter
in the getter
{'a': 10.0, 'c': 30.0}
in the setter
setting `c`
in the getter
in the getter
{'a': 10.0, 'c': 30.0}
I use this idiom to get around the default value during __init__ problem. Returning None from __set__ if a property object is passed in (as is the case during __init__) will keep the initial default value untouched. Defining the default value of the private attribute as that of the previously defined public attribute, ensures the private attribute is available. Type hints are shown with the correct default value, and the comments silence the pylint and mypy warnings:
from dataclasses import dataclass, field
from pprint import pprint
from typing import Any
class dataclass_property(property): # pylint: disable=invalid-name
def __set__(self, __obj: Any, __value: Any) -> None:
if isinstance(__value, self.__class__):
return None
return super().__set__(__obj, __value)
#dataclass
class Vehicle:
wheels: int = 1
_wheels: int = field(default=wheels, init=False, repr=False)
#dataclass_property # type: ignore
def wheels(self) -> int:
print("Get wheels")
return self._wheels
#wheels.setter # type: ignore
def wheels(self, val: int):
print("Set wheels to", val)
self._wheels = val
if __name__ == "__main__":
pprint(Vehicle())
pprint('#####')
pprint(Vehicle(wheels=4))
Output:
└─ $ python wheels.py
Get wheels
Vehicle(wheels=1)
'#####'
Set wheels to 4
Get wheels
Vehicle(wheels=4)
Type hint:
Type hint with correct default value
I went through the previous comments, and although most of them answer thet need to tweak the dataclass itself.
I came up with an approach using a decorator which I think is more concise:
from dataclasses import dataclass
import wrapt
def dataclass_properties(cls, property_starts='_'):
#wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
properties = [prop for prop in dir(cls) if isinstance(getattr(cls, prop), property)]
new_kwargs = {f"{property_starts}{k}" if k in properties else k: v for k, v in kwargs.items()}
return wrapped(*args, **new_kwargs)
return wrapt.FunctionWrapper(cls, wrapper)()
#dataclass_properties
#dataclass
class State:
_a: int
b: int
_c: int
#property
def a(self):
return self._a
#a.setter
def time(self, value):
self._a = value
if __name__=='__main__':
s = State(b=1,a=2,_c=1)
print(s) # returns: State(_a=2, b=1, _c=1)
print(s.a) # returns: 2
It can filter between properties and those variables that are not properties but start by "_".
It also supports the instantiation providing the property true name. In this case "_a".
if __name__=='__main__':
s = State(b=1,_a=2,_c=1)
print(s) # returns: State(_a=2, b=1, _c=1)
I does not solve the problem of the representation though.
For the use case that brought me to this page, namely to have a dataclass that is immutable, there is a simple option to use #dataclass(frozen=True). This removes all the rather verbose explicit definition of getters and setters. The option eq=True is helpful too.
Credit: a reply from joshorr to this post, linked in a comment to the accepted answer. Also a bit of a classical case of RTFM.

Error in decorating a classmethod

I was working on a decorator that decorates the class. It woks fine for instance methods but gives an TypeError for class method. The code is as below:
def deco_method(fn):
def wrapper(*arg, **kwarg):
"""
Function: Wrapper
"""
print "Calling function {}".format(fn.__name__)
print arg, kwarg
ret_val = fn(*arg, **kwarg)
print "Executed function {}".format(fn.__name__)
return ret_val
return wrapper
def clsdeco(cls):
attributes = cls.__dict__.keys()
for attribute in attributes:
# Do not decorate private methods
if '__' in attribute:
continue
# Get the method
value = getattr(cls, attribute)
if not hasattr(value, '__call__'):
continue
# CHeck if method is a class method or normal method and decoate accordingly
if value.im_self is None:# non class method
setattr(cls, attribute, deco_method(value))
elif value.im_self is cls: # CHeck if the method is class method
setattr(cls, attribute, classmethod(deco_method(value)))
else:
assert False
return cls # return decorated class
#clsdeco
class Person:
message = "Hi Man"
def __init__(self, first_name, last_name):
self.fname = first_name
self.lname = last_name
self.age = None
def get_name(self):
print "Name is '{} {}'".format(self.fname, self.lname)
#classmethod
def greet_person(cls):
print cls.message
p = Person('John', 'snow')
p.greet_person()
It gives an error:
TypeError: greet_person() takes exactly 1 argument (2 given)
If i remove #clsdeco, it works perfectly fine.
Any idea what i am missing here?
If you add the line shown it will work. This is because the #classmethod decorator applied in the class definition changes what getattr(cls, attribute) returns—it will be a descriptor for the named method which adds the cls argument and then calls the real method.
What you need to do is retrieve the "raw" value of the attribute which is just a regular function and then turn it back into a class method by explicitly calling classmethod. This needed "raw" value is stored in the class dictionary __dict__ associated with the same attribute name, hence the need for adding the value = cls.__dict__[attribute].__func__ line.
Something similar will also be required to handle static methods properly. How to do this for all the different types of methods is described in this answer to the question Decorating a method that's already a classmethod? Some of the other answers also describe what's going on in more detail than I have here.
def clsdeco(cls):
attributes = cls.__dict__.keys()
for attribute in attributes:
# Do not decorate private methods
if '__' in attribute:
continue
# Get the method
value = getattr(cls, attribute)
if not hasattr(value, '__call__'):
continue
# Check if method is a class method or normal method and decoate accordingly
if value.im_self is None:# non class method
setattr(cls, attribute, deco_method(value))
elif value.im_self is cls: # Check if the method is class method
value = cls.__dict__[attribute].__func__ # ADDED
setattr(cls, attribute, classmethod(deco_method(value)))
else:
assert False
return cls # return decorated class

Wrap calls to methods of a Python class

I would like to wrap a number of class methods in Python with the same wrapper.
Conceptually it would look something like this in the simplest scenario:
x = 0 # some arbitrary context
class Base(object):
def a(self):
print "a x: %s" % x
def b(self):
print "b x: %s" % x
class MixinWithX(Base):
"""Wrap"""
def a(self):
global x
x = 1
super(MixinWithX, self).a()
x = 0
def b(self):
global x
x = 1
super(MixinWithX, self).a()
x = 0
Of course, when there are more methods than a and b, this becomes a mess. It seems like there ought to be something simpler. Obviously x could be modified in a decorator but one still ends up having a long list of garbage, which instead of the above looks like:
from functools import wraps
def withx(f):
#wraps(f) # good practice
def wrapped(*args, **kwargs):
global x
x = 1
f(*args, **kwargs)
x = 0
return wrapped
class MixinWithX(Base):
"""Wrap"""
#withx
def a(self):
super(MixinWithX, self).a()
#withx
def b(self):
super(MixinWithX, self).b()
I thought about using __getattr__ in the mixin, but of course since methods such as a and b are already defined this is never called.
I also thought about using __getattribute__ but it returns the attribute, not wrapping the call. I suppose __getattribute__ could return a closure (example below) but I am not sure how sound a design that is. Here is an example:
class MixinWithX(Base):
# a list of the methods of our parent class (Base) that are wrapped
wrapped = ['a', 'b']
# application of the wrapper around the methods specified
def __getattribute__(self, name):
original = object.__getattribute__(self, name)
if name in wrapped:
def wrapped(self, *args, **kwargs):
global x
x = 1 # in this example, a context manager would be handy.
ret = original(*args, **kwargs)
x = 0
return ret
return wrapped
return original
It has occurred to me that there may be something built into Python that may alleviate the need to manually reproduce every method of the parent class that is to be wrapped. Or maybe a closure in __getattribute__ is the proper way to do this. I would be grateful for thoughts.
Here's my attempt, which allows for a more terse syntax...
x = 0 # some arbitrary context
# Define a simple function to return a wrapped class
def wrap_class(base, towrap):
class ClassWrapper(base):
def __getattribute__(self, name):
original = base.__getattribute__(self, name)
if name in towrap:
def func_wrapper(*args, **kwargs):
global x
x = 1
try:
return original(*args, **kwargs)
finally:
x = 0
return func_wrapper
return original
return ClassWrapper
# Our existing base class
class Base(object):
def a(self):
print "a x: %s" % x
def b(self):
print "b x: %s" % x
# Create a wrapped class in one line, without needing to define a new class
# for each class you want to wrap.
Wrapped = wrap_class(Base, ('a',))
# Now use it
m = Wrapped()
m.a()
m.b()
# ...or do it in one line...
m = wrap_class(Base, ('a',))()
...which outputs...
a x: 1
b x: 0
You can do this using decorators and inspect:
from functools import wraps
import inspect
def withx(f):
#wraps(f)
def wrapped(*args, **kwargs):
print "decorator"
x = 1
f(*args, **kwargs)
x = 0
return wrapped
class MyDecoratingBaseClass(object):
def __init__(self, *args, **kwargs):
for member in inspect.getmembers(self, predicate=inspect.ismethod):
if member[0] in self.wrapped_methods:
setattr(self, member[0], withx(member[1]))
class MyDecoratedSubClass(MyDecoratingBaseClass):
wrapped_methods = ['a', 'b']
def a(self):
print 'a'
def b(self):
print 'b'
def c(self):
print 'c'
if __name__ == '__main__':
my_instance = MyDecoratedSubClass()
my_instance.a()
my_instance.b()
my_instance.c()
Output:
decorator
a
decorator
b
c
There are two general directions I can think of which are useful in your case.
One is using a class decorator. Write a function which takes a class, and returns a class with the same set of methods, decorated (either by creating a new class by calling type(...), or by changing the input class in place).
EDIT: (the actual wrapping/inspecting code I had in mind is similar to
what #girasquid has in his answer, but connecting is done using decoration instead of mixin/inheritance, which I think is more flexible an robust.)
Which brings me to the second option, which is to use a metaclass, which may be cleaner (yet trickier if you're not used to working with metaclasses). If you don't have access to the definition of the original class, or don't want to change the original definition, you can subclass the original class, setting the metaclass on the derived.
There is a solution, and it's called a decorator. Google "python decorators" for lots of information.
The basic concept is that a decorator is a function which takes a function as a parameter, and returns a function:
def decorate_with_x(f)
def inner(self):
self.x = 1 #you must always use self to refer to member variables, even if you're not decorating
f(self)
self.x = 0
return inner
class Foo(object):
#decorate_with_x # #-syntax passes the function defined on next line
# to the function named s.t. it is equivalent to
# foo_func = decorate_with_x(foo_func)
def foo_func(self):
pass

Categories

Resources