In my python script I have defined a class similar to the following (admittedly bogus) class:
import copy
class Example:
def __init__(self, a, b):
self.a = a
self.b = b
self.__default__ = copy.deepcopy(self.__dict__)
self.t = 0
self.d = False
def do(self):
self.a += self.b - self.t
self.t += 1
if self.t == self.b:
self.d = True
return self.a
def reset(self):
self.__init__(**self.__default__)
Now, I would like to pass an instance of this class to my main function and repeatedly reset the instance to its default state. Despite having a look here, here, here and here, I couldn't get it going. The working example below gives the desired result, yet resets the instance in the main function explicitly. The dysfunctional example is one of my many tries to make it work using a reset method.
# working example:
def main(x):
agg = []
for i in range(x):
klass = Example(1, 3)
while not klass.d:
a = klass.do()
agg.append(a)
return agg
# dysfunctional example:
def main2(klass, x):
agg = []
for i in range(x):
klass.reset()
while not klass.d:
a = klass.do()
agg.append(a)
return agg
Then main(5) gives
res = main(5)
print(res)
>>> [4, 6, 7, 4, 6, 7, 4, 6, 7, 4, 6, 7, 4, 6, 7]
whereas
ex = Example(1, 3) # default state
res = main2(ex, 5)
print(res)
throws the error: TypeError: __init__() got an unexpected keyword argument '__default__'
Since I would like to avoid having to re-instantiate the class in the main script anew for different reasons, I would be grateful if someone could help me out with the reset method.
How about something like that:
class Example:
def __init__(self, *args, **kwargs):
"""This stores the default state then init the instance using this default state"""
self.__default_args__ = args
self.__default_kwargs__ = kwargs
self.init(*args, **kwargs)
def do(self):
"""Do whatever you want """
self.a += self.b - self.t
self.t += 1
if self.t == self.b:
self.d = True
return self.a
def init(self, a, b):
"""Inits the instance to a given state"""
self.a = a
self.b = b
self.t = 0
self.d = False
return self
def reset(self):
"""Resets the instance to the default (stored) state"""
return self.init(*self.__default_args__, **self.__default_kwargs__)
Here is an implementation using context manager:
class Example:
def __init__(self, a, b):
self.a = a
self.b = b
self.t = 0
self.d = False
def do(self):
self.a += self.b - self.t
self.t += 1
if self.t == self.b:
self.d = True
return self.a
def __enter__(self):
self._a = self.a
self._b = self.b
def __exit__(self, *args):
self.t = 0
self.d = False
self.a = self._a
self.b = self._b
def main2(klass, x):
agg = []
for i in range(x):
with klass:
while not klass.d:
a = klass.do()
agg.append(a)
return agg
ex = Example(1, 3)
res = main2(ex, 5)
print(res)
A reusable way to do this would be to implement a Resettable class to be inherited.
Resettable
class Resettable:
def __init__(self, *args, **kwargs):
self.__args__ = args
self.__kwargs__ = kwargs
def reset(self):
self.__init__(*self.__args__, **self.__kwargs__)
Usage
Using a property to define an attribute that entirely depends on other attributes will also smoothen the process of resetting. This idea of having a single source of truth is generally a helpful paradigm for states that need to go back and forth in time.
class Example(Resettable):
def __init__(self, a=0):
self.a = a
super().__init__(a)
def do(self):
self.a += 1
return self.a
#property
def d(self):
return self.a > 3 # or any other condition
def main(instance, x):
agg = []
for _ in range(x):
instance.reset()
while not instance.d:
a = instance.do()
agg.append(a)
return agg
print(main(Example(), 3)) # [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
The underlying assumption of the Resettable class is that the arguments passed to the constructor contain all the information needed to reset, using properties make that assumption easier to satisfy.
Related
I am trying to call back a class method within another class. It works fine if I don't define the variable x,y,z (see commented portion) while creating objects. However, if I explicitly define the variable names, it doesn't work. Wondering what's making this happen?
class ClassA():
def __init__(self, a, b):
self.a = a
self.b = b
def method_a(self):
return f'A, method_a, {self.a}, {self.b}'
def callback_method(self, *args):
obj = ClassB(*args)
return obj.method_b()
class ClassB():
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def method_b(self):
return f'B, method_b, {self.x}, {self.y}, {self.z}'
A = ClassA(a=1, b=2)
print(A.method_a())
# A, method_a, 1, 2
B = ClassB(x=3, y=4, z=5)
print(B.method_b())
# B, method_b, 3, 4, 5
print(A.callback_method(10, 11, 12))
# B, method_b, 10, 11, 12
# print(A.callback_method(x=10, y=11, z=12)) # <------ doesn't work
You defined the callback_method to only accept positional arguments with *args but no keyword arguments.
Instead you can make it accept both & pass it on to ClassB in order to make sure you can call it with either positional or keyword arguments:
class ClassA():
...
def callback_method(self, *args, **kwargs):
obj = ClassB(*args, **kwargs)
return obj.method_b()
Result:
print(A.callback_method(x=10, y=11, z=12)) # Output: 'B, method_b, 10, 11, 12'
I think to have a use case for the factory pattern method in python and have two ideas how to implement it (see below). Both work, but are they really the same? Option2 looks much clearer to me, despite having a few more lines. Is my gut feeling right? Or is there a third better option?
Option 1
def inner_factory_func(a):
class Inner:
def __init__(self, b):
self.a = a
self.b = b
def call(self):
return self.a + self.b
return Inner
inner_factory = inner_factory_func('a')
inner = inner_factory('b')
res = inner.call() # ='ab'
Option 2
class Inner:
def __init__(self,a,b):
self.a = a
self.b = b
def call(self):
return self.a+self.b
class InnerFactory:
def __init__(self,a ):
self.a = a
def create(self,b) -> Inner2:
return Inner(self.a,b)
inner_factory = InnerFactory('a')
inner = inner_factory.create('b')
res = inner.call() # ='ab'
Given I have a class:
class C:
def __init__(self, flag=None):
self.a = list()
self.b = {}
self.c = 0
self.d = 1
self.e = defaultdict(list)
self.f = defaultdict(set)
if flag is None:
self.g = False
else:
self.g = flag
self.something_else = (1,1,1)
def foo(self): # many other heavy methods
self.a.append(self.d + self.c)
return self.e
I need to instantiate it 1,000,000 times and then call foo(). What is the fastest way to do? Can it be done even faster with CFFI
e.g.
l = []
for i in range(10000000):
o = C()
l.append(o)
If you are not going to systematically call methods on every instance, you may want to defer initialization to the first call to another method.
from collections import defaultdict
class C:
def __init__(self, flag=None):
self.initDone = False
self.g = False if flag is None else flag
def competeInit(self):
if self.initDone: return
self.a = list()
self.b = {}
self.c = 0
self.d = 1
self.e = defaultdict(list)
self.f = defaultdict(set)
self.something_else = (1,1,1)
def foo(self): # many other heavy methods
self.completeInit()
self.a.append(self.d + self.c)
return self.e
This makes allocating 10M instances roughly 5 times faster.
A = [C() for _ in range(10000000)] # 3.95 sec vs 20.4
Depending on usage patterns, this may postpone the cost of initialization to a more acceptable time or even as a background process.
Alternatively you could postpone only the more costly parts of the initialization using properties for lists, dictionary and set attributes:
from collections import defaultdict
class C:
def __init__(self, flag=None):
self.initDone = False
self.g = False if flag is None else flag
self.c = 0
self.d = 1
self.something_else = (1,1,1)
def foo(self): # many other heavy methods
self.a.append(self.d + self.c)
return self.e
#property
def a(self):
try: return self._a
except AttributeError:
self._a = list()
return self._a
#property
def b(self):
try: return self._b
except AttributeError:
self._b = {}
return self._b
#property
def e(self):
try: return self._e
except AttributeError:
self._e = defaultdict(list)
return self._e
#property
def f(self):
try: return self._e
except AttributeError:
self._f = defaultdict(set)
return self._f
In this case, it only gives a 4x speed improvement though
A = [C() for _ in range(10000000)] # 5.16 sec vs 20.4
What about using Python's multiprocessing?
from multiprocessing import Pool, cpu_count
def instantiate_and_run(x):
return C()
with Pool(cpu_count()) as pool:
l = pool.map(instantiate_and_run, range(1000000))
This seems a duplicate
In python you can use slots to speed up creation, maybe a little bit, and the memory occupation. Migrate that class to a slot is very trivial.
another solution was record class it is designe to optimize creation of new instances.
I am looking for a way to init a variable in a class. This variable is dependent of other variables that I init in the same class too. Here is an example.
Class class():
def __init__()
self.a = None
self.b = None
self.sum = self.a + self.b
def change_a_and_b()
self.a = input("a = ")
self.b = input("b = ")
def print_sum()
print(self.sum)
This is a simplified example. In my program self.sum is a complicated calculation which I want to use like a "shortcut" so I don't need to write it a lot of times. The problem is I input the variables after the init function. I just don't want to execute self.sum = self.a + self.b when I run __init__. I think there is a do_thing parameter, but I don't know how to use it.
You can make sum a property:
class my_class():
def __init__(self)
self.a = None
self.b = None
#property
def sum(self):
return self.a + self.b
def change_a_and_b(self)
self.a = input("a = ")
self.b = input("b = ")
def print_sum(self)
print(self.sum)
I want to pass a function to a class when I initialize it. Here's a toy example I came up with and it works:
def addition(self):
return self.a + self.b
def multiplication(self):
return self.a * self.b
class Test:
def __init__(self, a, b, fcn):
self.a = a
self.b = b
self.fcn = fcn
t = Test(3, 3, addition)
print t.fcn(t)
t = Test(3, 3, multiplication)
print t.fcn(t)
Is it possible to simply call t.fcn() as you would any other class method?
did you try it?
the answer is yes
def do_op(x,y,fn):
return fn(x,y)
def add(a,b):
return a+b
print do_op(5,4,add)
same with a class
class whatever:
def __init__(self,fn):
self.fn = fn
def do_it(self,*args,**kwargs):
return self.fn(*args,**kwargs)
#if you wanted the fn to have self as the first argument
#return self.fn(self,*args,**kwargs) #just pass self as first argument
x = whatever(add)
print x.do_it(5,8)
further along what you are asking for (if im reading it right)
def add(self):
return self.a + self.b
class whatever:
def __init__(self,fn,a,b):
self.__dict__[fn.__name__] = fn
self.a,self.b = a,b
def do_it(self):
return self.fn(self)
x = whatever(add,6,7)
x.do_it()
or perhaps you want something like
from functools import partial
def add(self):
return self.a + self.b
class whatever:
def __init__(self,fn,a,b):
self.__dict__[fn.__name__] = partial(fn,self)
self.a,self.b = a,b
x = whatever(add,5,6)
x.add()
this kind of introspection is somewhat risky in deployed code ...