Defining constants in python class, is self really needed? - python

I want to define a set of constants in a class like:
class Foo(object):
(NONEXISTING,VAGUE,CONFIRMED) = (0,1,2)
def __init__(self):
self.status = VAGUE
However, I get
NameError: global name 'VAGUE' is not defined
Is there a way of defining these constants to be visiable inside the class without resorting to global or self.NONEXISTING = 0 etc.?

When you assign to names in the class body, you're creating attributes of the class. You can't refer to them without referring to the class either directly or indirectly. You can use Foo.VAGUE as the other answers say, or you can use self.VAGUE. You do not have to assign to attributes of self.
Usually, using self.VAGUE is what you want because it allows subclasses to redefine the attribute without having to reimplement all the methods that use them -- not that that seems like a sensible thing to do in this particular example, but who knows.

try instead of:
self.status = VAGUE
this one:
self.status = Foo.VAGUE
you MUST specify the class

This one is NOT RECOMMENDED FOR ANY CODE by any means, but an ugly hack like below can be done.
I did this just to have better understanding of Python AST API, so anyone who uses this in real-world code should be shot before it does any harm :-)
#!/usr/bin/python
# -*- coding: utf-8-unix -*-
#
# AST hack to replace symbol reference in instance methods,
# so it will be resolved as a reference to class variables.
#
import inspect, types, ast
def trim(src):
lines = src.split("\n")
start = lines[0].lstrip()
n = lines[0].index(start)
src = "\n".join([line[n:] for line in lines])
return src
#
# Method decorator that replaces symbol reference in a method
# so it will use symbols in belonging class instead of the one
# in global namespace.
#
def nsinclude(*args):
# usecase: #nsinclude()
# use classname in calling frame as a fallback
stack = inspect.stack()
opts = [stack[1][3]]
def wrap(func):
if func.func_name == "tempfunc":
return func
def invoke(*args, **kw):
base = eval(opts[0])
src = trim(inspect.getsource(func))
basenode = ast.parse(src)
class hackfunc(ast.NodeTransformer):
def visit_Name(self, node):
try:
# if base class (set in #nsinclude) can resolve
# given name, modify AST node to use that instead
val = getattr(base, node.id)
newnode = ast.parse("%s.%s" % (opts[0], node.id))
newnode = next(ast.iter_child_nodes(newnode))
newnode = next(ast.iter_child_nodes(newnode))
ast.copy_location(newnode, node)
return ast.fix_missing_locations(newnode)
except:
return node
class hackcode(ast.NodeVisitor):
def visit_FunctionDef(self, node):
if func.func_name != "tempfunc":
node.name = "tempfunc"
hackfunc().visit(node)
hackcode().visit(basenode)
newmod = compile(basenode, '<ast>', 'exec')
eval(newmod)
newfunc = eval("tempfunc")
newfunc(*args, **kw)
return invoke
# usecase: #nsinclude
if args and isinstance(args[0], types.FunctionType):
return wrap(args[0])
# usecase: #nsinclude("someclass")
if args and args[0]:
opts[0] = args[0]
return wrap
class Bar:
FOO = 987
BAR = 876
class Foo:
FOO = 123
BAR = 234
# import from belonging class
#nsinclude
def dump1(self, *args):
print("dump1: FOO = " + str(FOO))
# import from specified class (Bar)
#nsinclude("Bar")
def dump2(self, *args):
print("dump2: BAR = " + str(BAR))
Foo().dump1()
Foo().dump2()

The only way is to access it through the class name such as
Foo.VAGUE
If accessing just VAGUE inside the __init__ function, or a function, it must be declared inside that to access it the way you want.
Using self is for the instance of the class also.

In Python3, you can also reference VAGUE as:
type(self).VAGUE
This way, you are clearly referencing it as a class attribute and not an object attribute, yet this way is robust against a name change of the class. Also if you override VAGUE in a subclass, the value from the subclass will be used, just like if you were to use self.VAGUE.
Note that this method does not appear to work in Python2, at least not in my tests, where type(self) returned instance instead of the class I instantiated. Therefore Thomas Wouters's answer is probably preferable, considering how widespread Python2 still is.

Related

Python: Dynamically add properties to class instance, properties return function value with inputs

I've been going through all the Stackoverflow answers on dynamic property setting, but for whatever reason I can't seem to get this to work.
I have a class, Evolution_Base, that in its init creates an instance of Value_Differences. Value_Differences should be dynamically creating properties, based on the list I pass, that returns the function value from _get_df_change:
from pandas import DataFrame
from dataclasses import dataclass
import pandas as pd
class Evolution_Base():
def __init__(self, res_date_0 : DataFrame , res_date_1 : DataFrame):
#dataclass
class Results_Data():
res_date_0_df : DataFrame
res_date_1_df : DataFrame
self.res = Results_Data(res_date_0_df= res_date_0,
res_date_1_df= res_date_1)
property_list = ['abc', 'xyz']
self.difference = Value_Differences(parent = self, property_list=property_list)
# Shared Functions
def _get_df_change(self, df_name, operator = '-'):
df_0 = getattr(self.res.res_date_0_df, df_name.lower())
df_1 = getattr(self.res.res_date_1_df, df_name.lower())
return self._df_change(df_1, df_0, operator=operator)
def _df_change(self, df_1 : pd.DataFrame, df_0 : pd.DataFrame, operator = '-') -> pd.DataFrame:
"""
Returns df_1 <operator | default = -> df_0
"""
# is_numeric mask
m_1 = df_1.select_dtypes('number')
m_0 = df_0.select_dtypes('number')
def label_me(x):
x.columns = ['t_1', 't_0']
return x
if operator == '-':
return label_me(df_1[m_1] - df_0[m_0])
elif operator == '+':
return label_me(df_1[m_1] + df_0[m_0])
class Value_Differences():
def __init__(self, parent : Evolution_Base, property_list = []):
self._parent = parent
for name in property_list:
def func(self, prop_name):
return self._parent._get_df_change(name)
# I've tried the following...
setattr(self, name, property(fget = lambda cls_self: func(cls_self, name)))
setattr(self, name, property(func(self, name)))
setattr(self, name, property(func))
Its driving me nuts... Any help appreciated!
My desired outcome is for:
evolution = Evolution_Base(df_1, df_2)
evolution.difference.abc == evolution._df_change('abc')
evolution.difference.xyz == evolution._df_change('xyz')
EDIT: The simple question is really, how do I setattr for a property function?
As asked
how do I setattr for a property function?
To be usable as a property, the accessor function needs to be wrapped as a property and then assigned as an attribute of the class, not the instance.
That function, meanwhile, needs to have a single unbound parameter - which will be an instance of the class, but is not necessarily the current self. Its logic needs to use the current value of name, but late binding will be an issue because of the desire to create lambdas in a loop.
A clear and simple way to work around this is to define a helper function accepting the Value_Differences instance and the name to use, and then bind the name value eagerly.
Naively:
from functools import partial
def _get_from_parent(name, instance):
return instance._parent._get_df_change(name)
class Value_Differences:
def __init__(self, parent: Evolution_Base, property_list = []):
self._parent = parent
for name in property_list:
setattr(Value_Differences, name, property(
fget = partial(_get_from_parent, name)
))
However, this of course has the issue that every instance of Value_Differences will set properties on the class, thus modifying what properties are available for each other instance. Further, in the case where there are many instances that should have the same properties, the setup work will be repeated at each instance creation.
The apparent goal
It seems that what is really sought, is the ability to create classes dynamically, such that a list of property names is provided and a corresponding class pops into existence, with code filled in for the properties implementing a certain logic.
There are multiple approaches to this.
Factory A: Adding properties to an instantiated template
Just like how functions can be nested within each other and the inner function will be an object that can be modified and returned (as is common when creating a decorator), a class body can appear within a function and a new class object (with the same name) is created every time the function runs. (The code in the OP already does this, for the Results_Data dataclass.)
def example():
class Template:
pass
return Template
>>> TemplateA, TemplateB = example(), example()
>>> TemplateA is TemplateB
False
>>> isinstance(TemplateA(), TemplateB)
False
>>> isinstance(TemplateB(), TemplateA)
False
So, a "factory" for value-difference classes could look like
from functools import partial
def _make_value_comparer(property_names, access_func):
class ValueDifferences:
def __init__(self, parent):
self._parent = parent
for name in property_names:
setattr(Value_Differences, name, property(
fget = partial(access_func, name)
))
return ValueDifferences
Notice that instead of hard-coding a helper, this factory expects to be provided with a function that implements the access logic. That function takes two parameters: a property name, and the ValueDifferences instance. (They're in that order because it's more convenient for functools.partial usage.)
Factory B: Using the type constructor directly
The built-in type in Python has two entirely separate functions.
With one argument, it discloses the type of an object.
With three arguments, it creates a new type. The class syntax is in fact syntactic sugar for a call to this builtin. The arguments are:
a string name (will be set as the __name__ attribute)
a list of classes to use as superclasses (will be set as __bases__)
a dict mapping attribute names to their values (including methods and properties - will become the __dict__, roughly)
In this style, the same factory could look something like:
from functools import partial
def _make_value_comparer(property_names, access_func):
methods = {
name: property(fget = partial(access_func, name)
for name in property_names
}
methods['__init__'] = lambda self, parent: setattr(self, '_parent', parent)
return type('ValueDifferences', [], methods)
Using the factory
In either of the above cases, EvolutionBase would be modified in the same way.
Presumably, every EvolutionBase should use the same ValueDifferences class (i.e., the one that specifically defines abc and xyz properties), so the EvolutionBase class can cache that class as a class attribute, and use it later:
class Evolution_Base():
def _get_from_parent(name, mvd):
# mvd._parent will be an instance of Evolution_Base.
return mvd._parent._get_df_change(name)
_MyValueDifferences = _make_value_comparer(['abc', 'xyz'], _get_from_parent)
def __init__(self, res_date_0 : DataFrame , res_date_1 : DataFrame):
#dataclass
class Results_Data():
res_date_0_df : DataFrame
res_date_1_df : DataFrame
self.res = Results_Data(res_date_0_df= res_date_0,
res_date_1_df= res_date_1)
self.difference = _MyValueDifferences(parent = self)
Notice that the cached _MyValueDifferences class no longer requires a list of property names to be constructed. That's because it was already provided when the class was created. The actual thing that varies per instance of _MyValueDifferences, is the parent, so that's all that gets passed.
Simpler approaches
It seems that the goal is to have a class whose instances are tightly associated with instances of Evolution_Base, providing properties specifically named abc and xyz that are computed using the Evolution_Base's data.
That could just be hard-coded as a nested class:
class Evolution_Base:
class EBValueDifferences:
def __init__(self, parent):
self._parent = parent
#property
def abc(self):
return self._parent._get_df_change('abc')
#property
def xyz(self):
return self._parent._get_df_change('xyz')
def __init__(self, res_date_0 : DataFrame , res_date_1 : DataFrame):
#dataclass
class Results_Data():
res_date_0_df : DataFrame
res_date_1_df : DataFrame
self.res = Results_Data(res_date_0_df = res_date_0,
res_date_1_df = res_date_1)
self.difference = EBValueDifferences(self)
# _get_df_change etc. as before
Even simpler, provide corresponding properties directly on Evolution_Base:
class Evolution_Base:
#property
def abc_difference(self):
return self._get_df_change('abc')
#property
def xyz_difference(self):
return self._get_df_change('xyz')
def __init__(self, res_date_0 : DataFrame , res_date_1 : DataFrame):
#dataclass
class Results_Data():
res_date_0_df : DataFrame
res_date_1_df : DataFrame
self.res = Results_Data(res_date_0_df = res_date_0,
res_date_1_df = res_date_1)
# _get_df_change etc. as before
# client code now calls my_evolution_base.abc_difference
# instead of my_evolution_base.difference.abc
If there are a lot of such properties, they could be attached using a much simpler dynamic approach (that would still be reusable for other classes that define a _get_df_change):
def add_df_change_property(name, cls):
setattr(
cls, f'{name}_difference',
property(fget = lambda instance: instance._get_df_change(name))
)
which can also be adapted for use as a decorator:
from functools import partial
def exposes_df_change(name):
return partial(add_df_change_property, name)
#exposes_df_change('abc')
#exposes_df_change('def')
class Evolution_Base:
# `self.difference` can be removed, no other changes needed
This is quite the rabbit hole. Impossible is a big call, but I will say this: they don't intend you to do this. The 'Pythonic' way of achieving your example use case is the __getattr__ method. You could also override the __dir__ method to insert your custom attributes for discoverability.
This is the code for that:
class Value_Differences():
def __init__(self, parent : Evolution_Base, property_list = []):
self._parent = parent
self._property_list = property_list
def __dir__(self):
return sorted(set(
dir(super(Value_Differences, self)) + \
list(self.__dict__.keys()) + self._property_list))
def __getattr__(self, __name: str):
if __name in self._property_list:
return self._parent._get_df_change(__name)
But that wasn't the question, and respect for a really, really interesting question. This is one of those things that you look at and say 'hmm, should be possible' and can get almost to a solution. I initially thought what you asked for was technically possible, just very hacky to achieve. But it turns out that it would be very, very weird hackery if it was possible.
Two small foundational things to start with:
Remind ourselves of the hierarchy of Python objects that the runtime is working with when defining and instantiating classes:
The metaclass (defaulting to type), which is used to build classes. I'm going to refer to this as the Metaclass Type Object (MTO).
The class definition, which is used to build objects. I'm going to refer to this as the Class Type Object (CTO).
And the class instance or object, which I'll refer to as the Class Instance Object (CIO).
MTOs are subclasses of type. CTOs are subclasses of object. CIOs are instances of CTOs, but instantiated by MTOs.
Python runs code inside class definitions as if it was running a function:
class Class1:
print("1")
def __init__(self, v1):
print("4")
print("2")
print("3")
c1 = Class1("x")
print("5")
gives 1, 2, 3, 4, 5
Put these two things together with:
class Class1:
def attr1_get(self):
return 'attr1 value'
attr1 = property(attr1_get)
we are defining a function attr1_get as part of the class definition. We are then running an inline piece of code that creates an object of type property. Note that this is just the name of the object's type - it isn't a property as you would describe it. Just an object with some attributes, being references to various functions. We then assign that object to an attribute in the class we are defining.
In the terms I used above, once that code is run we have a CTO instantiated as an object in memory that contains an attribute attr1 of type property (an object subclass, containing a bunch of attributes itself - one of which is a reference to the function attr1_get).
That can be used to instantiate an object, the CIO.
This is where the MTO comes in. You instantiate the property object while defining the CTO so that when the runtime applies the MTO to create the CIO from the CTO, an attribute on the CIO will be formed with a custom getter function for that attribute rather than the 'standard' getter function the runtime would use. The property object means something to the type object when it is building a new object.
So when we run:
c1 = Class1()
we don't get a CIO c1 with an attribute attr1 that is an object of type property. The metaclass of type type formed a set of references against the attribute's internal state to all the functions we stored in the property object. Note that this is happening inside the runtime, and you can't call this directly from your code - you just tell the type metaclass to do it by using the property wrapper object.
So if you directly assign a property() result to an attribute of a CIO, you have a Pythonic object assigned that references some functions, but the internal state for the runtime to use to reference the getter, setter, etc. is not set up. The getter of an attribute that contains a property object is the standard getter and so returns the object instance, and not the result of the functions it wraps,
This next bit of code demonstrates how this flows:
print("Let's begin")
class MetaClass1(type):
print("Starting to define MetaClass1")
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
print("Metaclass1 __new__({})".format(str(cls)))
return x
print("__new__ of MetaClass1 is defined")
def __init__(cls, name, bases, dct):
print("Metaclass1 __init__({})".format(str(cls)))
print("__init__ of MetaClass1 is defined")
print("Metaclass is defined")
class Class1(object,metaclass=MetaClass1):
print("Starting to define Class1")
def __new__(cls, *args, **kwargs):
print("Class1 __new__({})".format(str(cls)))
return super(Class1, cls).__new__(cls, *args, **kwargs)
print("__new__ of Class1 is defined")
def __init__(self):
print("Class1 __init__({})".format(str(self)))
print("__init__ of Class1 is defined")
def g1(self):
return 'attr1 value'
print("g1 of Class1 is defined")
attr1 = property(g1)
print("Class1.attr1 = ", attr1)
print("attr1 of Class1 is defined")
def addProperty(self, name, getter):
setattr(self, name, property(getter))
print("self.", name, " = ", getattr(self, name))
print("addProperty of Class1 is defined")
print("Class is defined")
c1 = Class1()
print("Instance is created")
print(c1.attr1)
def g2(cls):
return 'attr2 value'
c1.addProperty('attr2', g2)
print(c1.attr2)
I have put all those print statements there to demonstrate the order in which things happen very clearly.
In the middle, you see:
g1 of Class1 is defined
Class1.attr1 = <property object at 0x105115c10>
attr1 of Class1 is defined
We have created an object of type property and assigned it to a class attribute.
Continuing:
addProperty of Class1 is defined
Metaclass1 __new__(<class '__main__.MetaClass1'>)
Metaclass1 __init__(<class '__main__.Class1'>)
Class is defined
The metaclass got instantiated, being passed first itself (__new__) and then the class it will work on (__init__). This happened right as we stepped out of the class definition. I have only included the metaclass to show what will happen with the type metaclass by default.
Then:
Class1 __new__(<class '__main__.Class1'>)
Class1 __init__(<__main__.Class1 object at 0x105124c10>)
Instance is created
attr1 value
self. attr2 = <property object at 0x105115cb0>
<property object at 0x105115cb0>
Class1 is instantiated, providing first its type to __new__ and then its instance to __init__.
We see that attr1 is instantiated properly, but attr2 is not. That is because setattr is being called once the class instance is already constructed and is just saying attr2 is an instance of the class property and not defining attr2 as the actual runtime construct of a property.
Which is made more clear if we run:
print(c1.attr2.fget(c1))
print(c1.attr1.fget(c1))
attr2 (a property object) isn't aware of the class or instance of the containing attribute's parent. The function it wraps still needs to be given the instance to work on.
attr1 doesn't know what to do with that, because as far as it is concerned it is a string object, and has no concept of how the runtime is mapping its getter.
The fundamental reason why what you tried doesn't work is that a property, a use case of a descriptor, by design must be stored as a class variable, not as an instance attribute.
Excerpt from the documentation of descriptor:
To use the descriptor, it must be stored as a class variable in
another class:
To create a class with dynamically named properties that has access to a parent class, one elegant approach is to create the class within a method of the main class, and use setattr to create class attributes with dynamic names and property objects. A class created in the closure of a method automatically has access to the self object of the parent instance, avoiding having to manage a clunky _parent attribute like you do in your attempt:
class Evolution_Base:
def __init__(self, property_list):
self.property_list = property_list
self._difference = None
#property
def difference(self):
if not self._difference:
class Value_Differences:
pass
for name in self.property_list:
# use default value to store the value of name in each iteration
def func(obj, prop_name=name):
return self._get_df_change(prop_name) # access self via closure
setattr(Value_Differences, name, property(func))
self._difference = Value_Differences()
return self._difference
def _get_df_change(self, df_name):
return f'df change of {df_name}' # simplified return value for demo purposes
so that:
evolution = Evolution_Base(['abc', 'xyz'])
print(evolution.difference.abc)
print(evolution.difference.xyz)
would output:
df change of abc
df change of xyz
Demo: https://replit.com/#blhsing/ExtralargeNaturalCoordinate
Responding directly to your question, you can create a class:
class FooBar:
def __init__(self, props):
def make_prop(name):
return property(lambda accessor_self: self._prop_impl(name))
self.accessor = type(
'Accessor',
tuple(),
{p: make_prop(p) for p in props}
)()
def _prop_impl(self, arg):
return arg
o = FooBar(['foo', 'bar'])
assert o.accessor.foo == o._prop_impl('foo')
assert o.accessor.bar == o._prop_impl('bar')
Further, it would be beneficiary to cache created class to make equivalent objects more similar and eliminate potential issues with equality comparison.
That said, I am not sure if this is desired. There's little benefit of replacing method call syntax (o.f('a')) with property access (o.a). I believe it can be detrimental on multiple accounts: dynamic properties are confusing, harder to document, etc., finally while none of this is strictly guaranteed in crazy world of dynamic python -- they kind of communicate wrong message: that the access is cheap and does not involve computation and that perhaps you can attempt to write to it.
I think that when you define the function func in the loop, it closes over the current value of the name variable, not the value of the name variable at the time the property is accessed. To fix this, you can use a lambda function to create a closure that captures the value of name at the time the property is defined.
class Value_Differences():
def __init__(self, parent : Evolution_Base, property_list = []):
self._parent = parent
for name in property_list:
setattr(self, name, property(fget = lambda self, name=name: self._parent._get_df_change(name)))
Does this help you ?
The simple question is really, how do I setattr for a property function?
In python we can set dynamic attributes like this:
class DynamicProperties():
def __init__(self, property_list):
self.property_list = property_list
def add_properties(self):
for name in self.property_list:
setattr(self.__class__, name, property(fget=lambda self: 1))
dync = DynamicProperties(['a', 'b'])
dync.add_properties()
print(dync.a) # prints 1
print(dync.b) # prints 1
Correct me if I am wrong but from reviewing your code, you want to create a dynamic attributes then set their value to a specific function call within the same class, where the passed in data is passed in attributes in the constructor " init " this is achievable, an example:
class DynamicProperties():
def __init__(self, property_list, data1, data2):
self.property_list = property_list
self.data1 = data1
self.data2 = data2
def add_properties(self):
for name in self.property_list:
setattr(self.__class__, name, property(fget=lambda self: self.change(self.data1, self.data2) ))
def change(self, data1, data2):
return data1 - data2
dync = DynamicProperties(['a', 'b'], 1, 2)
dync.add_properties()
print(dync.a == dync.change(1, 2)) # prints true
print(dync.b == dync.change(1,2)) # prints true
You just have to add more complexity to the member, __getattr__ / __setattr__ gives you the string, so it can be interpreted as needed. The biggest "problem" doing this is that the return might no be consistent and piping it back to a library that expect an object to have a specific behavior can cause soft errors.
This example is not the same as yours, but it has the same concept, manipulate columns with members. To get a copy with changes a set is not needed, with a copy, modify and return, the new instance can be created with whatever needed.
For example, the __getattr__ in this line will:
Check and interpret the string xyz_mull_0
Validate that the members and the operand exists
Make a copy of data_a
Modify the copy and return it
var = data_a.xyz_mull_0()
This looks more complex that it actually is, with the same instance members its clear what it is doing, but the _of modifier needs a callback, this is because the __getattr__ can only have one parameter, so it needs to save the attr and return a callback to be called with the other instance that then will call back to the __getattr__ and complete the rest of the function.
import re
class FlexibleFrame:
operand_mod = {
'sub': lambda a, b: a - b,
'add': lambda a, b: a + b,
'div': lambda a, b: a / b,
'mod': lambda a, b: a % b,
'mull': lambda a, b: a * b,
}
#staticmethod
def add_operand(name, func):
if name not in FlexibleFrame.operand_mod.keys():
FlexibleFrame.operand_mod[name] = func
# This makes this class subscriptable
def __getitem__(self, item):
return self.__dict__[item]
# Uses:
# -> object.value
# -> object.member()
# -> object.<name>_<operand>_<name|int>()
# -> object.<name>_<operand>_<name|int>_<flow>()
def __getattr__(self, attr):
if re.match(r'^[a-zA-Z]+_[a-zA-Z]+_[a-zA-Z0-9]+(_of)?$', attr):
seg = attr.split('_')
var_a, operand, var_b = seg[0:3]
# If there is a _of: the second operand is from the other
# instance, the _of is removed and a callback is returned
if len(seg) == 4:
self.__attr_ref = '_'.join(seg[0:3])
return self.__getattr_of
# Checks if this was a _of attribute and resets it
if self.__back_ref is not None:
other = self.__back_ref
self.__back_ref = None
self.__attr_ref = None
else:
other = self
if var_a not in self.__dict__:
raise AttributeError(
f'No match of {var_a} in (primary) {__class__.__name__}'
)
if operand not in FlexibleFrame.operand_mod.keys():
raise AttributeError(
f'No match of operand {operand}'
)
# The return is a copy of self, if not the instance
# is getting modified making x = a.b() useless
ret = FlexibleFrame(**self.__dict__)
# Checks if the second operand is a int
if re.match(r'^\d+$', var_b) :
ref_b_num = int(var_b)
for i in range(len(self[var_a])):
ret[var_a][i] = FlexibleFrame.operand_mod[operand](
self[var_a][i], ref_b_num
)
elif var_b in other.__dict__:
for i in range(len(self[var_a])):
# out_index = operand[type](in_a_index, in_b_index)
ret[var_a][i] = FlexibleFrame.operand_mod[operand](
self[var_a][i], other[var_b][i]
)
else:
raise AttributeError(
f'No match of {var_b} in (secondary) {__class__.__name__}'
)
# This swaps the .member to a .member()
# it also adds and extra () in __getattr_of
return lambda: ret
# return ret
if attr in self.__dict__:
return self[attr]
raise AttributeError(
f'No match of {attr} in {__class__.__name__}'
)
def __getattr_of(self, other):
self.__back_ref = other
return self.__getattr__(self.__attr_ref)()
def __init__(self, **kwargs):
self.__back_ref = None
self.__attr_ref = None
#TODO: Check if data columns match in size
# if not, implement column_<name>_filler=<default>
for i in kwargs:
self.__dict__[i] = kwargs[i]
if __name__ == '__main__':
data_a = FlexibleFrame(**{
'abc': [i for i in range(10)],
'nmv': [i for i in range(10)],
'xyz': [i for i in range(10)],
})
data_b = FlexibleFrame(**{
'fee': [i + 10 for i in range(10)],
'foo': [i + 10 for i in range(10)],
})
FlexibleFrame.add_operand('set', lambda a, b: b)
var = data_a.xyz_mull_0()
var = var.abc_set_xyz()
var = var.xyz_add_fee_of(data_b)
As a extra thing, lambdas in python have this thing, so it can make difficult using them when self changes.
It seems you're bending the language to do weird things. I'd take it as a smell that your code is probably getting convoluted but I'm not saying there would never be a use-case for it so here is a minimal example of how to do it:
class Obj:
def _df_change(self, arg):
print('change', arg)
class DynAttributes(Obj):
def __getattr__(self, name):
return self._df_change(name)
class Something:
difference = DynAttributes()
a = Something()
b = Obj()
assert a.difference.hello == b._df_change('hello')
When calling setattr , use self.__class__ instead of self
Code sample:
class A:
def __init__(self,names : List[str]):
for name in names:
setattr(self.__class__,name,property(fget=self.__create_getter(name)))
def __create_getter(self,name: str):
def inner(self):
print(f"invoking {name}")
return 10
return inner
a = A(['x','y'])
print(a.x + 1)
print(a.y + 2)

How do I pass attributes of first class to another class inside a class [duplicate]

I have a situation like so...
class Outer(object):
def some_method(self):
# do something
class Inner(object):
def __init__(self):
self.Outer.some_method() # <-- this is the line in question
How can I access the Outer class's method from the Inner class?
You're trying to access Outer's class instance, from inner class instance. So just use factory-method to build Inner instance and pass Outer instance to it.
class Outer(object):
def createInner(self):
return Outer.Inner(self)
class Inner(object):
def __init__(self, outer_instance):
self.outer_instance = outer_instance
self.outer_instance.somemethod()
def inner_method(self):
self.outer_instance.anothermethod()
The methods of a nested class cannot directly access the instance attributes of the outer class.
Note that it is not necessarily the case that an instance of the outer class exists even when you have created an instance of the inner class.
In fact, it is often recommended against using nested classes, since the nesting does not imply any particular relationship between the inner and outer classes.
maybe I'm mad but this seems very easy indeed - the thing is to make your inner class inside a method of the outer class...
def do_sthg(self):
...
def mess_around(self):
outer_class_self = self
class Mooble():
def do_sthg_different(self):
...
outer_class_self.do_sthg()
Plus... "self" is only used by convention, so you could do this:
def do_sthg(self):
...
def mess_around(outer_class_self):
class Mooble():
def do_sthg_different(self):
...
outer_class_self.do_sthg()
It might be objected that you can't then create this inner class from outside the outer class... but this ain't true:
class Bumblebee():
def do_sthg(self):
print "sthg"
def give_me_an_inner_class(outer_class_self):
class Mooble():
def do_sthg_different(self):
print "something diff\n"
outer_class_self.do_sthg()
return Mooble
then, somewhere miles away:
blob = Bumblebee().give_me_an_inner_class()()
blob.do_sthg_different()
even push the boat out a bit and extend this inner class (NB to get super() to work you have to change the class signature of Mooble to class Mooble(object)).
class InnerBumblebeeWithAddedBounce(Bumblebee().give_me_an_inner_class()):
def bounce(self):
print "bounce"
def do_sthg_different(self):
super(InnerBumblebeeWithAddedBounce, self).do_sthg_different()
print "and more different"
ibwab = InnerBumblebeeWithAddedBounce()
ibwab.bounce()
ibwab.do_sthg_different()
later
mrh1997 raised an interesting point about the non-common inheritance of inner classes delivered using this technique. But it seems that the solution is pretty straightforward:
class Fatty():
def do_sthg(self):
pass
class InnerFatty(object):
pass
def give_me_an_inner_fatty_class(self):
class ExtendedInnerFatty(Fatty.InnerFatty):
pass
return ExtendedInnerFatty
fatty1 = Fatty()
fatty2 = Fatty()
innerFattyClass1 = fatty1.give_me_an_inner_fatty_class()
innerFattyClass2 = fatty2.give_me_an_inner_fatty_class()
print (issubclass(innerFattyClass1, Fatty.InnerFatty))
print (issubclass(innerFattyClass2, Fatty.InnerFatty))
I found this.
Tweaked to suite your question:
class Outer(object):
def some_method(self):
# do something
class _Inner(object):
def __init__(self, outer):
outer.some_method()
def Inner(self):
return _Inner(self)
I’m sure you can somehow write a decorator for this or something
related: What is the purpose of python's inner classes?
A few years late to the party.... but to expand on #mike rodent's wonderful answer, I've provided my own example below that shows just how flexible his solution is, and why it should be (or should have been) the accepted answer.
Python 3.7
class Parent():
def __init__(self, name):
self.name = name
self.children = []
class Inner(object):
pass
def Child(self, name):
parent = self
class Child(Parent.Inner):
def __init__(self, name):
self.name = name
self.parent = parent
parent.children.append(self)
return Child(name)
parent = Parent('Bar')
child1 = parent.Child('Foo')
child2 = parent.Child('World')
print(
# Getting its first childs name
child1.name, # From itself
parent.children[0].name, # From its parent
# Also works with the second child
child2.name,
parent.children[1].name,
# Go nuts if you want
child2.parent.children[0].name,
child1.parent.children[1].name
)
print(
# Getting the parents name
parent.name, # From itself
child1.parent.name, # From its children
child2.parent.name,
# Go nuts again if you want
parent.children[0].parent.name,
parent.children[1].parent.name,
# Or insane
child2.parent.children[0].parent.children[1].parent.name,
child1.parent.children[1].parent.children[0].parent.name
)
# Second parent? No problem
parent2 = Parent('John')
child3 = parent2.Child('Doe')
child4 = parent2.Child('Appleseed')
print(
child3.name, parent2.children[0].name,
child4.name, parent2.children[1].name,
parent2.name # ....
)
Output:
Foo Foo World World Foo World
Bar Bar Bar Bar Bar Bar Bar
Doe Doe Appleseed Appleseed John
Again, a wonderful answer, props to you mike!
You can easily access to outer class using metaclass: after creation of outer class check it's attribute dict for any classes (or apply any logic you need - mine is just trivial example) and set corresponding values:
import six
import inspect
# helper method from `peewee` project to add metaclass
_METACLASS_ = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(_METACLASS_, (base,), {})
class OuterMeta(type):
def __new__(mcs, name, parents, dct):
cls = super(OuterMeta, mcs).__new__(mcs, name, parents, dct)
for klass in dct.values():
if inspect.isclass(klass):
print("Setting outer of '%s' to '%s'" % (klass, cls))
klass.outer = cls
return cls
# #six.add_metaclass(OuterMeta) -- this is alternative to `with_metaclass`
class Outer(with_metaclass(OuterMeta)):
def foo(self):
return "I'm outer class!"
class Inner(object):
outer = None # <-- by default it's None
def bar(self):
return "I'm inner class"
print(Outer.Inner.outer)
>>> <class '__main__.Outer'>
assert isinstance(Outer.Inner.outer(), Outer)
print(Outer().foo())
>>> I'm outer class!
print(Outer.Inner.outer().foo())
>>> I'm outer class!
print(Outer.Inner().outer().foo())
>>> I'm outer class!
print(Outer.Inner().bar())
>>> I'm inner class!
Using this approach, you can easily bind and refer two classes between each other.
I've created some Python code to use an outer class from its inner class, based on a good idea from another answer for this question. I think it's short, simple and easy to understand.
class higher_level__unknown_irrelevant_name__class:
def __init__(self, ...args...):
...other code...
# Important lines to access sub-classes.
subclasses = self._subclass_container()
self.some_subclass = subclasses["some_subclass"]
del subclasses # Free up variable for other use.
def sub_function(self, ...args...):
...other code...
def _subclass_container(self):
_parent_class = self # Create access to parent class.
class some_subclass:
def __init__(self):
self._parent_class = _parent_class # Easy access from self.
# Optional line, clears variable space, but SHOULD NOT BE USED
# IF THERE ARE MULTIPLE SUBCLASSES as would stop their parent access.
# del _parent_class
class subclass_2:
def __init__(self):
self._parent_class = _parent_class
# Return reference(s) to the subclass(es).
return {"some_subclass": some_subclass, "subclass_2": subclass_2}
The main code, "production ready" (without comments, etc.). Remember to replace all of each value in angle brackets (e.g. <x>) with the desired value.
class <higher_level_class>:
def __init__(self):
subclasses = self._subclass_container()
self.<sub_class> = subclasses[<sub_class, type string>]
del subclasses
def _subclass_container(self):
_parent_class = self
class <sub_class>:
def __init__(self):
self._parent_class = _parent_class
return {<sub_class, type string>: <sub_class>}
Explanation of how this method works (the basic steps):
Create a function named _subclass_container to act as a wrapper to access the variable self, a reference to the higher level class (from code running inside the function).
Create a variable named _parent_class which is a reference to the variable self of this function, that the sub-classes of _subclass_container can access (avoids name conflicts with other self variables in subclasses).
Return the sub-class/sub-classes as a dictionary/list so code calling the _subclass_container function can access the sub-classes inside.
In the __init__ function inside the higher level class (or wherever else needed), receive the returned sub-classes from the function _subclass_container into the variable subclasses.
Assign sub-classes stored in the subclasses variable to attributes of the higher level class.
A few tips to make scenarios easier:
Making the code to assign the sub classes to the higher level class easier to copy and be used in classes derived from the higher level class that have their __init__ function changed:
Insert before line 12 in the main code:
def _subclass_init(self):
Then insert into this function lines 5-6 (of the main code) and replace lines 4-7 with the following code:
self._subclass_init(self)
Making subclass assigning to the higher level class possible when there are many/unknown quantities of subclasses.
Replace line 6 with the following code:
for subclass_name in list(subclasses.keys()):
setattr(self, subclass_name, subclasses[subclass_name])
Example scenario of where this solution would be useful and where the higher level class name should be impossible to get:
A class, named "a" (class a:) is created. It has subclasses that need to access it (the parent). One subclass is called "x1". In this subclass, the code a.run_func() is run.
Then another class, named "b" is created, derived from class "a" (class b(a):). After that, some code runs b.x1() (calling the sub function "x1" of b, a derived sub-class). This function runs a.run_func(), calling the function "run_func" of class "a", not the function "run_func" of its parent, "b" (as it should), because the function which was defined in class "a" is set to refer to the function of class "a", as that was its parent.
This would cause problems (e.g. if function a.run_func has been deleted) and the only solution without rewriting the code in class a.x1 would be to redefine the sub-class x1 with updated code for all classes derived from class "a" which would obviously be difficult and not worth it.
Do you mean to use inheritance, rather than nesting classes like this? What you're doing doesn't make a heap of sense in Python.
You can access the Outer's some_method by just referencing Outer.some_method within the inner class's methods, but it's not going to work as you expect it will. For example, if you try this:
class Outer(object):
def some_method(self):
# do something
class Inner(object):
def __init__(self):
Outer.some_method()
...you'll get a TypeError when initialising an Inner object, because Outer.some_method expects to receive an Outer instance as its first argument. (In the example above, you're basically trying to call some_method as a class method of Outer.)
Another possibility:
class _Outer (object):
# Define your static methods here, e.g.
#staticmethod
def subclassRef ():
return Outer
class Outer (_Outer):
class Inner (object):
def outer (self):
return _Outer
def doSomething (self):
outer = self.outer ()
# Call your static mehthods.
cls = outer.subclassRef ()
return cls ()
What we can do is pass the self variable of Outer Class inside the Inner Class as Class Argument and Under Outer init initialise the Inner Class with Outer self passed into Inner
class Outer:
def __init__(self):
self.somevalue=91
self.Inner=self.Inner(self)
def SomeMethod(self):
print('This is Something from Outer Class')
class Inner:
def __init__(self,Outer)
self.SomeMethod=Outer.SomeMethod
self.somevalue=Outer.somevalue
def SomeAnotherMethod(self):
print(self.somevalue)
self.SomeMethod()
>>>f=Outer()
>>>f.Inner.SomeAnotherMethod()
91
This is Something from Outer Class
Now After running this function it Works
Expanding on #tsnorri's cogent thinking, that the outer method may be a static method:
class Outer(object):
#staticmethod
def some_static_method(self):
# do something
class Inner(object):
def __init__(self):
self.some_static_method() # <-- this will work later
Inner.some_static_method = some_static_method
Now the line in question should work by the time it is actually called.
The last line in the above code gives the Inner class a static method that's a clone of the Outer static method.
This takes advantage of two Python features, that functions are objects, and scope is textual.
Usually, the local scope references the local names of the (textually) current function.
...or current class in our case. So objects "local" to the definition of the Outer class (Inner and some_static_method) may be referred to directly within that definition.
You may create a class, to decorate inner classes. In this case #inner.
Since this a decorator: Outer.A = inner(Outer.A). Once your code requires Outer.A it will be executed inner.__get__ method, which returns the original class (A) with a new attribute set on it: A.owner = Outer.
A classmethod in class A, in this case def add(cls, y=3), may use new attribute owner at return cls.owner.x + y + 1.
The line setattr(owner, name, self.inner), breaks the descriptor because owner.name => Outer.A => A is no longer an instance of the class inner.
Hope this helps.
class inner:
def __init__(self, inner):
self.inner = inner
def __get__(self, instance, owner):
print('__get__ method executed, only once... ')
name = self.inner.__name__
setattr(self.inner, 'owner', owner)
setattr(owner, name, self.inner) # breaks descriptor
return self.inner #returns Inner
class Outer:
x = 1
#inner
class A:
#classmethod
def add(cls, y=3):
return cls.owner.x + y + 1
print(Outer.A.add(0)) # First time executes inner.__get__ method
print(Outer.A.add(0)) # Second time not necessary.
>> __get__ method executed, only once...
>> 2
>> 2
It can be done by parsing the outer class object into inner class.
class Outer():
def __init__(self,userinput):
self.userinput = userinput
def outer_function(self):
self.a = self.userinput + 2
class Inner():
def inner_function(self):
self.b = self.a + 10
after defining this, it need to run the function
m = Outer(3)
m.outer_function()
print (m.a)
#this will output 5
Now it has the variable of outer class.
and then, it need to run inner class functions.
m.Inner.inner_function(m)
The object m of outer class is parsed into the function of inner class (inside the brackets)
Now, the inner class function is accessing self.a from the outer class.
print (m.b)
#this will output 15
It is too simple:
Input:
class A:
def __init__(self):
pass
def func1(self):
print('class A func1')
class B:
def __init__(self):
a1 = A()
a1.func1()
def func1(self):
print('class B func1')
b = A.B()
b.func1()
Output
class A func1
class B func1

understanding method use inside a class

I'm new to classes, this is a small piece of code I've written, but I'm still really shaky on this concept, and am wondering exactly how the method node_name comes into play here and if it's even needed?
from rdflib import BNode
class HigherNode(object):
def node_name(name):
return name
def __init__(self, **kwargs):
self.node_type = kwargs.get('node_type', 'cog_con')
self.position = kwargs.get('position', 0)
self.node_id = self.node_name
self.node = kwargs.get(self.node_name(), BNode())
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return 'This is the node of {} in the graph'.format(self.node_id)
I behavior that I'm seeking is something equivalent to this:
elephant = BNode()
when used as:
some_node = HigherNode(node_id = 'elephant')
So, first off, methods have to be called by an instance of the class. So, your behavior would look something like this:
# create an instance
node = HigherNode()
# get the name
print node.node_name()
However, you never declared name inside the class. So, you'll have to do something like this:
def node_name(self):
return self.name
(All instances pass a reference to themselves to thier functions when called, so you'll always have to have at least one variable in the function call. You don't have to call it self.)
Really, it looks like what you want is actually a name setter/getter.
Try this:
Declare/set the variable in __init__.
def __init__(self, **kwargs):
self.node_name= kwargs.get('node_name', None)
Then you can use the variable like this:
# create an instance
node = HigherNode()
# get the name
print node.node_name
# set the name
node.node_name = "bluh"
Since your class extends object, use getter/setter properties.
#property
def node_name(self):
return self.node_name
#node_name.setter
def node_name(self, x):
self.node_name = str(x)
These are called exactly the same as above in option 1:
# create an instance
node = HigherNode()
# get the name
print node.node_name
# set the name
node.node_name = "bluh"
I prefer this method, since it allows you much more control over how things are set, or even whether or not you can set or get them! (Just make a getter property without a corresponding setter property, for instance.)
However, this second method is more work to set up and may not be suitable for simple variables.

Add attribute to python class

Consider the following code:
class Foo():
pass
Foo.entries = dict()
a = Foo()
a.entries['1'] = 1
b = Foo()
b.entries['3'] = 3
print(a.entries)
This will print:
{'1': 1, '3': 3}
because the entries is added as static attribute. Is there a way monkey patch the class definition in order to add new attributes (without using inheritance).
I managed to find the following way but it looks convoluted to me:
def patch_me(target, field, value):
def func(self):
if not hasattr(self, '__' + field):
setattr(self, '__' + field, value())
return getattr(self, '__' + field)
setattr(target, field, property(func))
patch_me(Foo, 'entries', dict)
Ordinarily, attributes are added either by the __init__() function or after instantiating:
foo = Foo()
foo.bar = 'something' # note case
If you want to do this automatically, inheritance is by far the simplest way to do so:
class Baz(Foo):
def __init__(self):
super().__init__() # super() needs arguments in 2.x
self.bar = 'something'
Note that classes don't need to appear at the top level of a Python module. You can declare a class inside a function:
def make_baz(value):
class Baz(Foo):
def __init__(self):
super().__init__() # super() needs arguments in 2.x
self.bar = value()
return Baz()
This example will create a new class every time make_baz() is called. That may or may not be what you want. It would probably be simpler to just do this:
def make_foo(value):
result = Foo()
result.bar = value()
return result
If you're really set on monkey-patching the original class, the example code you provided is more or less the simplest way of doing it. You might consider using decorator syntax for property(), but that's a minor change. I should also note that it will not invoke double-underscore name mangling, which is probably a good thing because it means you cannot conflict with any names used by the class itself.

Is it safe to replace a self object by another object of the same type in a method?

I would like to replace an object instance by another instance inside a method like this:
class A:
def method1(self):
self = func(self)
The object is retrieved from a database.
It is unlikely that replacing the 'self' variable will accomplish whatever you're trying to do, that couldn't just be accomplished by storing the result of func(self) in a different variable. 'self' is effectively a local variable only defined for the duration of the method call, used to pass in the instance of the class which is being operated upon. Replacing self will not actually replace references to the original instance of the class held by other objects, nor will it create a lasting reference to the new instance which was assigned to it.
As far as I understand, If you are trying to replace the current object with another object of same type (assuming func won't change the object type) from an member function. I think this will achieve that:
class A:
def method1(self):
newObj = func(self)
self.__dict__.update(newObj.__dict__)
It is not a direct answer to the question, but in the posts below there's a solution for what amirouche tried to do:
Python object conversion
Can I dynamically convert an instance of one class to another?
And here's working code sample (Python 3.2.5).
class Men:
def __init__(self, name):
self.name = name
def who_are_you(self):
print("I'm a men! My name is " + self.name)
def cast_to(self, sex, name):
self.__class__ = sex
self.name = name
def method_unique_to_men(self):
print('I made The Matrix')
class Women:
def __init__(self, name):
self.name = name
def who_are_you(self):
print("I'm a women! My name is " + self.name)
def cast_to(self, sex, name):
self.__class__ = sex
self.name = name
def method_unique_to_women(self):
print('I made Cloud Atlas')
men = Men('Larry')
men.who_are_you()
#>>> I'm a men! My name is Larry
men.method_unique_to_men()
#>>> I made The Matrix
men.cast_to(Women, 'Lana')
men.who_are_you()
#>>> I'm a women! My name is Lana
men.method_unique_to_women()
#>>> I made Cloud Atlas
Note the self.__class__ and not self.__class__.__name__. I.e. this technique not only replaces class name, but actually converts an instance of a class (at least both of them have same id()). Also, 1) I don't know whether it is "safe to replace a self object by another object of the same type in [an object own] method"; 2) it works with different types of objects, not only with ones that are of the same type; 3) it works not exactly like amirouche wanted: you can't init class like Class(args), only Class() (I'm not a pro and can't answer why it's like this).
Yes, all that will happen is that you won't be able to reference the current instance of your class A (unless you set another variable to self before you change it.) I wouldn't recommend it though, it makes for less readable code.
Note that you're only changing a variable, just like any other. Doing self = 123 is the same as doing abc = 123. self is only a reference to the current instance within the method. You can't change your instance by setting self.
What func(self) should do is to change the variables of your instance:
def func(obj):
obj.var_a = 123
obj.var_b = 'abc'
Then do this:
class A:
def method1(self):
func(self) # No need to assign self here
In many cases, a good way to achieve what you want is to call __init__ again. For example:
class MyList(list):
def trim(self,n):
self.__init__(self[:-n])
x = MyList([1,2,3,4])
x.trim(2)
assert type(x) == MyList
assert x == [1,2]
Note that this comes with a few assumptions such as the all that you want to change about the object being set in __init__. Also beware that this could cause problems with inheriting classes that redefine __init__ in an incompatible manner.
Yes, there is nothing wrong with this. Haters gonna hate. (Looking at you Pycharm with your in most cases imaginable, there's no point in such reassignment and it indicates an error).
A situation where you could do this is:
some_method(self, ...):
...
if(some_condition):
self = self.some_other_method()
...
return ...
Sure, you could start the method body by reassigning self to some other variable, but if you wouldn't normally do that with other parametres, why do it with self?
One can use the self assignment in a method, to change the class of instance to a derived class.
Of course one could assign it to a new object, but then the use of the new object ripples through the rest of code in the method. Reassiging it to self, leaves the rest of the method untouched.
class aclass:
def methodA(self):
...
if condition:
self = replace_by_derived(self)
# self is now referencing to an instance of a derived class
# with probably the same values for its data attributes
# all code here remains untouched
...
self.methodB() # calls the methodB of derivedclass is condition is True
...
def methodB(self):
# methodB of class aclass
...
class derivedclass(aclass):
def methodB(self):
#methodB of class derivedclass
...
But apart from such a special use case, I don't see any advantages to replace self.
You can make the instance a singleton element of the class
and mark the methods with #classmethod.
from enum import IntEnum
from collections import namedtuple
class kind(IntEnum):
circle = 1
square = 2
def attr(y): return [getattr(y, x) for x in 'k l b u r'.split()]
class Shape(namedtuple('Shape', 'k,l,b,u,r')):
self = None
#classmethod
def __repr__(cls):
return "<Shape({},{},{},{},{}) object at {}>".format(
*(attr(cls.self)+[id(cls.self)]))
#classmethod
def transform(cls, func):
cls.self = cls.self._replace(**func(cls.self))
Shape.self = Shape(k=1, l=2, b=3, u=4, r=5)
s = Shape.self
def nextkind(self):
return {'k': self.k+1}
print(repr(s)) # <Shape(1,2,3,4,5) object at 139766656561792>
s.transform(nextkind)
print(repr(s)) # <Shape(2,2,3,4,5) object at 139766656561888>

Categories

Resources