using python WeakSet to enable a callback functionality - python

I'm investigating if I can implement an easy callback functionality in python. I thought I might be able to use weakref.WeakSet for this, but there is clearly something I'm missing or have misunderstood. As you can see in the code I first tried with a list of call back methods in 'ClassA' objects, but realized that this would keep objects that have been added to the list of callbacks alive. Instead I tried using weakref.WeakSet but that doesnt do the trick either (at least not en this way). Comments in the last four lines of code explain what I want to happen.
Can anyone help me with this?
from weakref import WeakSet
class ClassA:
def __init__(self):
#self.destroyCallback=[]
self.destroyCallback=WeakSet()
def __del__(self):
print('ClassA object %d is being destroyed' %id(self))
for f in self.destroyCallback:
f(self)
class ClassB:
def destroyedObjectListener(self,obj):
print('ClassB object %d is called because obj %d is being destroyed'%(id(self),id(obj)))
a1=ClassA()
a2=ClassA()
b=ClassB()
a1.destroyCallback.add(b.destroyedObjectListener)
#a1.destroyCallback.append(b.destroyedObjectListener)
print('destroyCallback len() of obj: %d is: %d'%(id(a1),len(a1.destroyCallback))) # should be 1
a2.destroyCallback.add(b.destroyedObjectListener)
#a2.destroyCallback.append(b.destroyedObjectListener)
print('destroyCallback len() of obj: %d is: %d'%(id(a2),len(a2.destroyCallback))) # should be 1
del a1 # Should call b.destroyedObjectListener(self) in its __del__ method
del b # should result in no strong refs to b so a2's WeakSet should automatically remove added item
print('destroyCallback len() of obj: %d is: %d'%(id(a2),len(a2.destroyCallback))) # should be 0
del a2 # Should call __del__ method
UPDATE: solution based on the accepted answer can be found on github: git#github.com:thgis/PythonEvent.git

You cannot create weak references to method objects. Method objects are short lived; they are created on the fly as you access the name on the instance. See the descriptor howto how that works.
When you access a method name, a new method object is created for you, and when you then add that method to the WeakSet, no other references exist to it anymore, so garbage collection happily cleans it up again.
You'll have to store something less transient. Storing instance objects themselves would work, then call a predefined method on the registered callbacks:
def __del__(self):
for f in self.destroyCallback:
f.destroyedObjectListener(self)
and to register:
a1.destroyCallback.add(b)
You can also make b itself a callable by giving it a __call__ method:
class ClassB:
def __call__(self,obj):
print('ClassB object %d is called because obj %d '
'is being destroyed' % (id(self), id(obj)))
Another approach would be to store a reference to the underlying function object plus a reference to the instance:
import weakref
class ClassA:
def __init__(self):
self._callbacks = []
def registerCallback(self, callback):
try:
# methods
callback_ref = weakref.ref(callback.__func__), weakref.ref(callback.__self__)
except AttributeError:
callback_ref = weakref.ref(callback), None
self._callbacks.append(callback_ref)
def __del__(self):
for callback_ref in self._callbacks:
callback, arg = callback_ref[0](), callback_ref[1]
if arg is not None:
# method
arg = arg()
if arg is None:
# instance is gone
continue
callback(arg, self)
continue
else:
if callback is None:
# callback has been deleted already
continue
callback(self)
Demo:
>>> class ClassB:
... def listener(self, deleted):
... print('ClassA {} was deleted, notified ClassB {}'.format(id(deleted), id(self)))
...
>>> def listener1(deleted):
... print('ClassA {} was deleted, notified listener1'.format(id(deleted)))
...
>>> def listener2(deleted):
... print('ClassA {} was deleted, notified listener2'.format(id(deleted)))
...
>>> # setup, one ClassA and 4 listeners (2 methods, 2 functions)
...
>>> a = ClassA()
>>> b1 = ClassB()
>>> b2 = ClassB()
>>> a.registerCallback(b1.listener)
>>> a.registerCallback(b2.listener)
>>> a.registerCallback(listener1)
>>> a.registerCallback(listener2)
>>>
>>> # deletion, we delete one instance of ClassB, and one function
...
>>> del b1
>>> del listener1
>>>
>>> # Deleting the ClassA instance will only notify the listeners still remaining
...
>>> del a
ClassA 4435440336 was deleted, notified ClassB 4435541648
ClassA 4435440336 was deleted, notified listener2

Try the following changes:
To update the WeakSet:
a1.destroyCallback.add(b)
so the WeakSet holds a reference to b.
Then in the __del__ method of ClassA, trigger the callback like this:
for f in self.destroyCallback:
f.destroyedObjectListener(self)

Related

Avoiding memory leak with circular references between classes [duplicate]

I'm investigating if I can implement an easy callback functionality in python. I thought I might be able to use weakref.WeakSet for this, but there is clearly something I'm missing or have misunderstood. As you can see in the code I first tried with a list of call back methods in 'ClassA' objects, but realized that this would keep objects that have been added to the list of callbacks alive. Instead I tried using weakref.WeakSet but that doesnt do the trick either (at least not en this way). Comments in the last four lines of code explain what I want to happen.
Can anyone help me with this?
from weakref import WeakSet
class ClassA:
def __init__(self):
#self.destroyCallback=[]
self.destroyCallback=WeakSet()
def __del__(self):
print('ClassA object %d is being destroyed' %id(self))
for f in self.destroyCallback:
f(self)
class ClassB:
def destroyedObjectListener(self,obj):
print('ClassB object %d is called because obj %d is being destroyed'%(id(self),id(obj)))
a1=ClassA()
a2=ClassA()
b=ClassB()
a1.destroyCallback.add(b.destroyedObjectListener)
#a1.destroyCallback.append(b.destroyedObjectListener)
print('destroyCallback len() of obj: %d is: %d'%(id(a1),len(a1.destroyCallback))) # should be 1
a2.destroyCallback.add(b.destroyedObjectListener)
#a2.destroyCallback.append(b.destroyedObjectListener)
print('destroyCallback len() of obj: %d is: %d'%(id(a2),len(a2.destroyCallback))) # should be 1
del a1 # Should call b.destroyedObjectListener(self) in its __del__ method
del b # should result in no strong refs to b so a2's WeakSet should automatically remove added item
print('destroyCallback len() of obj: %d is: %d'%(id(a2),len(a2.destroyCallback))) # should be 0
del a2 # Should call __del__ method
UPDATE: solution based on the accepted answer can be found on github: git#github.com:thgis/PythonEvent.git
You cannot create weak references to method objects. Method objects are short lived; they are created on the fly as you access the name on the instance. See the descriptor howto how that works.
When you access a method name, a new method object is created for you, and when you then add that method to the WeakSet, no other references exist to it anymore, so garbage collection happily cleans it up again.
You'll have to store something less transient. Storing instance objects themselves would work, then call a predefined method on the registered callbacks:
def __del__(self):
for f in self.destroyCallback:
f.destroyedObjectListener(self)
and to register:
a1.destroyCallback.add(b)
You can also make b itself a callable by giving it a __call__ method:
class ClassB:
def __call__(self,obj):
print('ClassB object %d is called because obj %d '
'is being destroyed' % (id(self), id(obj)))
Another approach would be to store a reference to the underlying function object plus a reference to the instance:
import weakref
class ClassA:
def __init__(self):
self._callbacks = []
def registerCallback(self, callback):
try:
# methods
callback_ref = weakref.ref(callback.__func__), weakref.ref(callback.__self__)
except AttributeError:
callback_ref = weakref.ref(callback), None
self._callbacks.append(callback_ref)
def __del__(self):
for callback_ref in self._callbacks:
callback, arg = callback_ref[0](), callback_ref[1]
if arg is not None:
# method
arg = arg()
if arg is None:
# instance is gone
continue
callback(arg, self)
continue
else:
if callback is None:
# callback has been deleted already
continue
callback(self)
Demo:
>>> class ClassB:
... def listener(self, deleted):
... print('ClassA {} was deleted, notified ClassB {}'.format(id(deleted), id(self)))
...
>>> def listener1(deleted):
... print('ClassA {} was deleted, notified listener1'.format(id(deleted)))
...
>>> def listener2(deleted):
... print('ClassA {} was deleted, notified listener2'.format(id(deleted)))
...
>>> # setup, one ClassA and 4 listeners (2 methods, 2 functions)
...
>>> a = ClassA()
>>> b1 = ClassB()
>>> b2 = ClassB()
>>> a.registerCallback(b1.listener)
>>> a.registerCallback(b2.listener)
>>> a.registerCallback(listener1)
>>> a.registerCallback(listener2)
>>>
>>> # deletion, we delete one instance of ClassB, and one function
...
>>> del b1
>>> del listener1
>>>
>>> # Deleting the ClassA instance will only notify the listeners still remaining
...
>>> del a
ClassA 4435440336 was deleted, notified ClassB 4435541648
ClassA 4435440336 was deleted, notified listener2
Try the following changes:
To update the WeakSet:
a1.destroyCallback.add(b)
so the WeakSet holds a reference to b.
Then in the __del__ method of ClassA, trigger the callback like this:
for f in self.destroyCallback:
f.destroyedObjectListener(self)

Why set a bound method to python object create a circular reference?

I'm working in Python 2.7 and I fond that issue that puzzling me.
That is the simplest example:
>>> class A(object):
def __del__(self):
print("DEL")
def a(self):
pass
>>> a = A()
>>> del a
DEL
That is OK like expected... now I'm trying to change the a() method of object a and what happen is that after change it I can't delete a any more:
>>> a = A()
>>> a.a = a.a
>>> del a
Just to do some checks I've print the a.a reference before and after the assignment
>>> a = A()
>>> print a.a
<bound method A.a of <__main__.A object at 0xe86110>>
>>> a.a = a.a
>>> print a.a
<bound method A.a of <__main__.A object at 0xe86110>>
Finally I used objgraph module to try to understand why the object is not released:
>>> b = A()
>>> import objgraph
>>> objgraph.show_backrefs([b], filename='pre-backref-graph.png')
>>> b.a = b.a
>>> objgraph.show_backrefs([b], filename='post-backref-graph.png')
As you can see in the post-backref-graph.png image there is a __self__ references in b that have no sense for me because the self references of instance method should be ignored (as was before the assignment).
Somebody can explain why that behaviour and how can I work around it?
When you write a.a, it effectively runs:
A.a.__get__(a, A)
because you are not accessing a pre-bound method but the class' method that is being
bound at runtime.
When you do
a.a = a.a
you effectively "cache" the act of binding the method. As the bound method has a reference to the object (obviously, as it has to pass self to the function) this creates a circular reference.
So I'm modelling your problem like:
class A(object):
def __del__(self):
print("DEL")
def a(self):
pass
def log_all_calls(function):
def inner(*args, **kwargs):
print("Calling {}".format(function))
try:
return function(*args, **kwargs)
finally:
print("Called {}".format(function))
return inner
a = A()
a.a = log_all_calls(a.a)
a.a()
You can use weak references to bind on demand inside log_all_calls like:
import weakref
class A(object):
def __del__(self):
print("DEL")
def a(self):
pass
def log_all_calls_weakmethod(method):
cls = method.im_class
func = method.im_func
instance_ref = weakref.ref(method.im_self)
del method
def inner(*args, **kwargs):
instance = instance_ref()
if instance is None:
raise ValueError("Cannot call weak decorator with dead instance")
function = func.__get__(instance, cls)
print("Calling {}".format(function))
try:
return function(*args, **kwargs)
finally:
print("Called {}".format(function))
return inner
a = A()
a.a = log_all_calls_weakmethod(a.a)
a.a()
This is really ugly, so I would rather extract it out to make a weakmethod decorator:
import weakref
def weakmethod(method):
cls = method.im_class
func = method.im_func
instance_ref = weakref.ref(method.im_self)
del method
def inner(*args, **kwargs):
instance = instance_ref()
if instance is None:
raise ValueError("Cannot call weak method with dead instance")
return func.__get__(instance, cls)(*args, **kwargs)
return inner
class A(object):
def __del__(self):
print("DEL")
def a(self):
pass
def log_all_calls(function):
def inner(*args, **kwargs):
print("Calling {}".format(function))
try:
return function(*args, **kwargs)
finally:
print("Called {}".format(function))
return inner
a = A()
a.a = log_all_calls(weakmethod(a.a))
a.a()
Done!
FWIW, not only does Python 3.4 not have these issues, it also has WeakMethod pre-built for you.
Veedrac's answer about the bound method keeping a reference to the instance is only part of the answer. CPython's garbage collector knows how to detect and handle cyclic references - except when some object that's part of the cycle has a __del__ method, as mentioned here https://docs.python.org/2/library/gc.html#gc.garbage :
Objects that have __del__() methods and are part of a reference cycle
cause the entire reference cycle to be uncollectable, including
objects not necessarily in the cycle but reachable only from it.
Python doesn’t collect such cycles automatically because, in general,
it isn’t possible for Python to guess a safe order in which to run the
__del__() methods. (...) It’s generally better to avoid the issue by not creating cycles containing objects with __del__() methods, and
garbage can be examined in that case to verify that no such cycles are
being created.
IOW : remove your __del__ method and you should be fine.
EDIT: wrt/ your comment :
I use it on the object as function a.a = functor(a.a). When the test
is done I would like replace the functor by the original method.
Then the solution is plain and simple:
a = A()
a.a = functor(a.a)
test(a)
del a.a
Until you explicitely bind it, a has no 'a' instance atribute, so it's looked up on the class and a new method instance is returned (cf https://wiki.python.org/moin/FromFunctionToMethod for more on this). This method instance is then called, and (usually) discarded.
As to why Python does this. Technically all objects contain circular references if they have methods. However, garbage collection would take much longer if the garbage collector had to do explicit checks on an objects methods to make sure freeing the object wouldn't cause a problem. As such Python stores the methods separately from an object's __dict__. So when you write a.a = a.a, you are shadowing the method with itself in the a field on the object. And thus, there is an explicit reference to the method which prevents the object from being freed properly.
The solution to your problem is not bother to keep a "cache" of the original method and just delete the shadowed variable when you're done with it. This will unshadow the method and make it available again.
>>> class A(object):
... def __del__(self):
... print("del")
... def method(self):
... print("method")
>>> a = A()
>>> vars(a)
{}
>>> "method" in dir(a)
True
>>> a.method = a.method
>>> vars(a)
{'method': <bound method A.method of <__main__.A object at 0x0000000001F07940>>}
>>> "method" in dir(a)
True
>>> a.method()
method
>>> del a.method
>>> vars(a)
{}
>>> "method" in dir(a)
True
>>> a.method()
method
>>> del a
del
Here vars shows what's in the __dict__ attribute of an object. Note how __dict__ doesn't contain a reference to itself even though a.__dict__ is valid. dir produces a list of all the attributes reachable from the given object. Here we can see all the attributes and methods on an object and all the methods and attributes of its classes and their bases. This shows that the bound method of a is stored in place separate to where a's attributes are stored.

How to keep track of class instances?

Toward the end of a program I'm looking to load a specific variable from all the instances of a class into a dictionary.
For example:
class Foo():
def __init__(self):
self.x = {}
foo1 = Foo()
foo2 = Foo()
...
Let's say the number of instances will vary and I want the x dict from each instance of Foo() loaded into a new dict. How would I do that?
The examples I've seen in SO assume one already has the list of instances.
One way to keep track of instances is with a class variable:
class A(object):
instances = []
def __init__(self, foo):
self.foo = foo
A.instances.append(self)
At the end of the program, you can create your dict like this:
foo_vars = {id(instance): instance.foo for instance in A.instances}
There is only one list:
>>> a = A(1)
>>> b = A(2)
>>> A.instances
[<__main__.A object at 0x1004d44d0>, <__main__.A object at 0x1004d4510>]
>>> id(A.instances)
4299683456
>>> id(a.instances)
4299683456
>>> id(b.instances)
4299683456
#JoelCornett's answer covers the basics perfectly. This is a slightly more complicated version, which might help with a few subtle issues.
If you want to be able to access all the "live" instances of a given class, subclass the following (or include equivalent code in your own base class):
from weakref import WeakSet
class base(object):
def __new__(cls, *args, **kwargs):
instance = object.__new__(cls, *args, **kwargs)
if "instances" not in cls.__dict__:
cls.instances = WeakSet()
cls.instances.add(instance)
return instance
This addresses two possible issues with the simpler implementation that #JoelCornett presented:
Each subclass of base will keep track of its own instances separately. You won't get subclass instances in a parent class's instance list, and one subclass will never stumble over instances of a sibling subclass. This might be undesirable, depending on your use case, but it's probably easier to merge the sets back together than it is to split them apart.
The instances set uses weak references to the class's instances, so if you del or reassign all the other references to an instance elsewhere in your code, the bookkeeping code will not prevent it from being garbage collected. Again, this might not be desirable for some use cases, but it is easy enough to use regular sets (or lists) instead of a weakset if you really want every instance to last forever.
Some handy-dandy test output (with the instances sets always being passed to list only because they don't print out nicely):
>>> b = base()
>>> list(base.instances)
[<__main__.base object at 0x00000000026067F0>]
>>> class foo(base):
... pass
...
>>> f = foo()
>>> list(foo.instances)
[<__main__.foo object at 0x0000000002606898>]
>>> list(base.instances)
[<__main__.base object at 0x00000000026067F0>]
>>> del f
>>> list(foo.instances)
[]
You would probably want to use weak references to your instances. Otherwise the class could likely end up keeping track of instances that were meant to have been deleted. A weakref.WeakSet will automatically remove any dead instances from its set.
One way to keep track of instances is with a class variable:
import weakref
class A(object):
instances = weakref.WeakSet()
def __init__(self, foo):
self.foo = foo
A.instances.add(self)
#classmethod
def get_instances(cls):
return list(A.instances) #Returns list of all current instances
At the end of the program, you can create your dict like this:
foo_vars = {id(instance): instance.foo for instance in A.instances}
There is only one list:
>>> a = A(1)
>>> b = A(2)
>>> A.get_instances()
[<inst.A object at 0x100587290>, <inst.A object at 0x100587250>]
>>> id(A.instances)
4299861712
>>> id(a.instances)
4299861712
>>> id(b.instances)
4299861712
>>> a = A(3) #original a will be dereferenced and replaced with new instance
>>> A.get_instances()
[<inst.A object at 0x100587290>, <inst.A object at 0x1005872d0>]
You can also solve this problem using a metaclass:
When a class is created (__init__ method of metaclass), add a new instance registry
When a new instance of this class is created (__call__ method of metaclass), add it to the instance registry.
The advantage of this approach is that each class has a registry - even if no instance exists. In contrast, when overriding __new__ (as in Blckknght's answer), the registry is added when the first instance is created.
class MetaInstanceRegistry(type):
"""Metaclass providing an instance registry"""
def __init__(cls, name, bases, attrs):
# Create class
super(MetaInstanceRegistry, cls).__init__(name, bases, attrs)
# Initialize fresh instance storage
cls._instances = weakref.WeakSet()
def __call__(cls, *args, **kwargs):
# Create instance (calls __init__ and __new__ methods)
inst = super(MetaInstanceRegistry, cls).__call__(*args, **kwargs)
# Store weak reference to instance. WeakSet will automatically remove
# references to objects that have been garbage collected
cls._instances.add(inst)
return inst
def _get_instances(cls, recursive=False):
"""Get all instances of this class in the registry. If recursive=True
search subclasses recursively"""
instances = list(cls._instances)
if recursive:
for Child in cls.__subclasses__():
instances += Child._get_instances(recursive=recursive)
# Remove duplicates from multiple inheritance.
return list(set(instances))
Usage: Create a registry and subclass it.
class Registry(object):
__metaclass__ = MetaInstanceRegistry
class Base(Registry):
def __init__(self, x):
self.x = x
class A(Base):
pass
class B(Base):
pass
class C(B):
pass
a = A(x=1)
a2 = A(2)
b = B(x=3)
c = C(4)
for cls in [Base, A, B, C]:
print cls.__name__
print cls._get_instances()
print cls._get_instances(recursive=True)
print
del c
print C._get_instances()
If using abstract base classes from the abc module, just subclass abc.ABCMeta to avoid metaclass conflicts:
from abc import ABCMeta, abstractmethod
class ABCMetaInstanceRegistry(MetaInstanceRegistry, ABCMeta):
pass
class ABCRegistry(object):
__metaclass__ = ABCMetaInstanceRegistry
class ABCBase(ABCRegistry):
__metaclass__ = ABCMeta
#abstractmethod
def f(self):
pass
class E(ABCBase):
def __init__(self, x):
self.x = x
def f(self):
return self.x
e = E(x=5)
print E._get_instances()
Another option for quick low-level hacks and debugging is to filter the list of objects returned by gc.get_objects() and generate the dictionary on the fly that way. In CPython that function will return you a (generally huge) list of everything the garbage collector knows about, so it will definitely contain all of the instances of any particular user-defined class.
Note that this is digging a bit into the internals of the interpreter, so it may or may not work (or work well) with the likes of Jython, PyPy, IronPython, etc. I haven't checked. It's also likely to be really slow regardless. Use with caution/YMMV/etc.
However, I imagine that some people running into this question might eventually want to do this sort of thing as a one-off to figure out what's going on with the runtime state of some slice of code that's behaving strangely. This method has the benefit of not affecting the instances or their construction at all, which might be useful if the code in question is coming out of a third-party library or something.
Here's a similar approach to Blckknght's, which works with subclasses as well. Thought this might be of interest, if someone ends up here. One difference, if B is a subclass of A, and b is an instance of B, b will appear in both A.instances and B.instances. As stated by Blckknght, this depends on the use case.
from weakref import WeakSet
class RegisterInstancesMixin:
instances = WeakSet()
def __new__(cls, *args, **kargs):
o = object.__new__(cls, *args, **kargs)
cls._register_instance(o)
return o
#classmethod
def print_instances(cls):
for instance in cls.instances:
print(instance)
#classmethod
def _register_instance(cls, instance):
cls.instances.add(instance)
for b in cls.__bases__:
if issubclass(b, RegisterInstancesMixin):
b._register_instance(instance)
def __init_subclass__(cls):
cls.instances = WeakSet()
class Animal(RegisterInstancesMixin):
pass
class Mammal(Animal):
pass
class Human(Mammal):
pass
class Dog(Mammal):
pass
alice = Human()
bob = Human()
cannelle = Dog()
Animal.print_instances()
Mammal.print_instances()
Human.print_instances()
Animal.print_instances() will print three objects, whereas Human.print_instances() will print two.
Using the answer from #Joel Cornett I've come up with the following, which seems to work. i.e. i'm able to total up object variables.
import os
os.system("clear")
class Foo():
instances = []
def __init__(self):
Foo.instances.append(self)
self.x = 5
class Bar():
def __init__(self):
pass
def testy(self):
self.foo1 = Foo()
self.foo2 = Foo()
self.foo3 = Foo()
foo = Foo()
print Foo.instances
bar = Bar()
bar.testy()
print Foo.instances
x_tot = 0
for inst in Foo.instances:
x_tot += inst.x
print x_tot
output:
[<__main__.Foo instance at 0x108e334d0>]
[<__main__.Foo instance at 0x108e334d0>, <__main__.Foo instance at 0x108e33560>, <__main__.Foo instance at 0x108e335a8>, <__main__.Foo instance at 0x108e335f0>]
5
10
15
20
(For Python)
I have found a way to record the class instances via the "dataclass" decorator while defining a class. Define a class attribute 'instances' (or any other name) as a list of the instances you want to record. Append that list with the 'dict' form of created objects via the dunder method __dict__. Thus, the class attribute 'instances' will record instances in the dict form, which you want.
For example,
from dataclasses import dataclass
#dataclass
class player:
instances=[]
def __init__(self,name,rank):
self.name=name
self.rank=rank
self.instances.append(self.__dict__)

Built-in function to read __slots__

Let's say I have a class like this:
class Test(object):
prop = property(lambda self: "property")
The descriptor takes priority whenever I try to access Test().prop. So that will return 'property'. If I want to access the object's instance storage, I can do:
x = Test()
x.__dict__["prop"] = 12
print(x.__dict__["prop"])
However if I change my class to:
class Test(object):
__slots__ = ("prop",)
prop = property(lambda self: "property")
How do I do the same, and access the internal storage of x, to write 12 and read it back, since x.__dict__ no longer exist?
I am fairly new with Python, but I understand the Python philosophy is to give complete control, so why is an implementation detail preventing me from doing that?
Isn't Python missing a built-in function that could read from an instance internal storage, something like:
instance_vars(x)["prop"] = 12
print(instance_vars(x)["prop"])
which would work like vars, except it also works with __slots__, and with built-in types that don't have a __dict__?
Short answer, You can't
The problem is that slots are themselves implemented in terms of descriptors. Given:
class Test(object):
__slots__ = ("prop",)
t = Test()
the phrase:
t.prop
Is translated, approximately to:
Test.prop.__get__(t, Test)
where Test.prop is a <type 'member_descriptor'> crafted by the run-time specifically to load prop values out of Test instances from their reserved space.
If you add another descriptor to the class body definition, it masks out the member_descriptor that would let you get to the slotted attribute; there's no way to ask for it, it's just not there anymore. It's effectively like saying:
class Test(object):
#property
def prop(self):
return self.__dict__['prop']
#property
def prop(self):
return "property"
You've defined it twice. there's no way to "get at" the first prop definition.
but:
Long answer, you can't in a general way. You can
You can still abuse the python type system to get at it using another class definition. You can change the type of a python object, so long as it has the exact same class layout, which roughly means that it has all of the same slots:
>>> class Test1(object):
... __slots__ = ["prop"]
... prop = property(lambda self: "property")
...
>>> class Test2(object):
... __slots__ = ["prop"]
...
>>> t = Test1()
>>> t.prop
'property'
>>> t.__class__ = Test2
>>> t.prop = 5
>>> t.prop
5
>>> t.__class__ = Test1
>>> t.prop
'property'
But there's no general way to introspect an instance to work out its class layout; you just have to know from context. You could look at it's __slots__ class attribute, but that won't tell you about the slots provided in the superclass (if any) nor will it give you any hint if that attribute has changed for some reason after the class was defined.
I don't quite understand what and why you want to do this, but does this help you?
>>> class Test(object):
__slots__ = ("prop",)
prop = property(lambda self: "property")
>>> a = Test()
>>> b = Test()
>>> a.prop
'property'
>>> tmp = Test.prop
>>> Test.prop = 23
>>> a.prop
23
>>> Test.prop = tmp; del tmp
>>> b.prop
'property'
of course, you cannot overwrite the property on a per-instance basis, that's the whole point of slotted descriptors.
Note that subclasses of a class with __slots__ do have a __dict__ unless you manually define __slots__, so you can do:
>>> class Test2(Test):pass
>>> t = Test2()
>>> t.prop
'property'
>>> t.__dict__['prop'] = 5
>>> t.__dict__['prop']
5
>>> Test2.prop
<property object at 0x00000000032C4278>
but still:
>>> t.prop
'property'
and that's not because of __slots__, it's the way descriptors work.
your __dict__ is bypassed on attribute lookup, you are just abusing it as data structure that happens to be there for storing a state.
it is equivalent to do this:
>>> class Test(object):
__slots__ = ("prop", "state")
prop = property(lambda self: "property")
state = {"prop": prop}
>>> t.prop
'property'
>>> t.state["prop"] = 5
>>> t.state["prop"]
5
>>> t.prop
'property'
If you really ever want to do something like that, and you REALL REALLY need something like that, you can always override __getattribute__ and __setattribute__, it's just as stupid... This is just to prove it to you:
class Test(object):
__slots__ = ("prop",)
prop = property(lambda self: "property")
__internal__ = {}
def __getattribute__(self, k):
if k == "__dict__":
return self.__internal__
else:
try:
return object.__getattribute__(self, k)
except AttributeError, e:
try:
return self.__internal__[k]
except KeyError:
raise e
def __setattribute__(self, k, v):
self.__internal__[k] = v
object.__setattribute__(self, k, v)
t = Test()
print t.prop
t.__dict__["prop"] = "test"
print "from dict", t.__dict__["prop"]
print "from getattr", t.prop
import traceback
# These won't work: raise AttributeError
try:
t.prop2 = "something"
except AttributeError:
print "see? I told you!"
traceback.print_exc()
try:
print t.prop2
except AttributeError:
print "Haha! Again!"
traceback.print_exc()
(Tried it on Python 2.7)
It's exactly what you expect I guess. Don't do this, it's useless.

Python object deleting itself

Why won't this work? I'm trying to make an instance of a class delete itself.
>>> class A():
def kill(self):
del self
>>> a = A()
>>> a.kill()
>>> a
<__main__.A instance at 0x01F23170>
'self' is only a reference to the object. 'del self' is deleting the 'self' reference from the local namespace of the kill function, instead of the actual object.
To see this for yourself, look at what happens when these two functions are executed:
>>> class A():
... def kill_a(self):
... print self
... del self
... def kill_b(self):
... del self
... print self
...
>>> a = A()
>>> b = A()
>>> a.kill_a()
<__main__.A instance at 0xb771250c>
>>> b.kill_b()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 7, in kill_b
UnboundLocalError: local variable 'self' referenced before assignment
You don't need to use del to delete instances in the first place. Once the last reference to an object is gone, the object will be garbage collected. Maybe you should tell us more about the full problem.
I think I've finally got it!
NOTE: You should not use this in normal code, but it is possible.
This is only meant as a curiosity, see other answers for real-world solutions to this problem.
Take a look at this code:
# NOTE: This is Python 3 code, it should work with python 2, but I haven't tested it.
import weakref
class InsaneClass(object):
_alive = []
def __new__(cls):
self = super().__new__(cls)
InsaneClass._alive.append(self)
return weakref.proxy(self)
def commit_suicide(self):
self._alive.remove(self)
instance = InsaneClass()
instance.commit_suicide()
print(instance)
# Raises Error: ReferenceError: weakly-referenced object no longer exists
When the object is created in the __new__ method, the instance is replaced by a weak reference proxy and the only strong reference is kept in the _alive class attribute.
What is a weak-reference?
Weak-reference is a reference which does not count as a reference when the garbage collector collects the object. Consider this example:
>>> class Test(): pass
>>> a = Test()
>>> b = Test()
>>> c = a
>>> d = weakref.proxy(b)
>>> d
<weakproxy at 0x10671ae58 to Test at 0x10670f4e0>
# The weak reference points to the Test() object
>>> del a
>>> c
<__main__.Test object at 0x10670f390> # c still exists
>>> del b
>>> d
<weakproxy at 0x10671ab38 to NoneType at 0x1002050d0>
# d is now only a weak-reference to None. The Test() instance was garbage-collected
So the only strong reference to the instance is stored in the _alive class attribute. And when the commit_suicide() method removes the reference the instance is garbage-collected.
In this specific context, your example doesn't make a lot of sense.
When a Being picks up an Item, the item retains an individual existence. It doesn't disappear because it's been picked up. It still exists, but it's (a) in the same location as the Being, and (b) no longer eligible to be picked up. While it's had a state change, it still exists.
There is a two-way association between Being and Item. The Being has the Item in a collection. The Item is associated with a Being.
When an Item is picked up by a Being, two things have to happen.
The Being how adds the Item in some set of items. Your bag attribute, for example, could be such a set. [A list is a poor choice -- does order matter in the bag?]
The Item's location changes from where it used to be to the Being's location. There are probably two classes os Items - those with an independent sense of location (because they move around by themselves) and items that have to delegate location to the Being or Place where they're sitting.
Under no circumstances does any Python object ever need to get deleted. If an item is "destroyed", then it's not in a Being's bag. It's not in a location.
player.bag.remove(cat)
Is all that's required to let the cat out of the bag. Since the cat is not used anywhere else, it will both exist as "used" memory and not exist because nothing in your program can access it. It will quietly vanish from memory when some quantum event occurs and memory references are garbage collected.
On the other hand,
here.add( cat )
player.bag.remove(cat)
Will put the cat in the current location. The cat continues to exist, and will not be put out with the garbage.
Realistically you should not need to delete the object to do what you are trying to do. Instead you can change the state of the object.
An example of how this works without getting into the coding would be your player fighting a monster and killing the monster. The state of this monster is fighting. The monster will be accessing all methods needed for fighting. When the monster dies because his health drops to 0, the monsters state will change to dead and your character will stop attacking automatically. This methodology is very similar to using flags or even keywords.
Also apparently in python deleting classes is not required since they will be garbage collected automatically when they are not used anymore.
I can't tell you how this is possible with classes, but functions can delete themselves.
def kill_self(exit_msg = 'killed'):
global kill_self
del kill_self
return exit_msg
And see the output:
>>> kill_self
<function kill_self at 0x02A2C780>
>>> kill_self()
'killed'
>>> kill_self
Traceback (most recent call last):
File "<pyshell#28>", line 1, in <module>
kill_self
NameError: name 'kill_self' is not defined
I don't think that deleting an individual instance of a class without knowing the name of it is possible.
NOTE:
If you assign another name to the function, the other name will still reference the old one, but will cause errors once you attempt to run it:
>>> x = kill_self
>>> kill_self()
>>> kill_self
NameError: name 'kill_self' is not defined
>>> x
<function kill_self at 0x...>
>>> x()
NameError: global name 'kill_self' is not defined
I am trying the same thing. I have a RPG battle system in which my Death(self) function has to kill the own object of the Fighter class. But it appeared it`s not possible. Maybe my class Game in which I collect all participants in the combat should delete units form the "fictional" map???
def Death(self):
if self.stats["HP"] <= 0:
print("%s wounds were too much... Dead!"%(self.player["Name"]))
del self
else:
return True
def Damage(self, enemy):
todamage = self.stats["ATK"] + randint(1,6)
todamage -= enemy.stats["DEF"]
if todamage >=0:
enemy.stats["HP"] -= todamage
print("%s took %d damage from your attack!"%(enemy.player["Name"], todamage))
enemy.Death()
return True
else:
print("Ineffective...")
return True
def Attack(self, enemy):
tohit = self.stats["DEX"] + randint(1,6)
if tohit > enemy.stats["EVA"]:
print("You landed a successful attack on %s "%(enemy.player["Name"]))
self.Damage(enemy)
return True
else:
print("Miss!")
return True
def Action(self, enemylist):
for i in range(0, len(enemylist)):
print("No.%d, %r"%(i, enemylist[i]))
print("It`s your turn, %s. Take action!"%(self.player["Name"]))
choice = input("\n(A)ttack\n(D)efend\n(S)kill\n(I)tem\n(H)elp\n>")
if choice == 'a'or choice == 'A':
who = int(input("Who? "))
self.Attack(enemylist[who])
return True
else:
return self.Action()
Indeed, Python does garbage collection through reference counting. As soon as the last reference to an object falls out of scope, it is deleted. In your example:
a = A()
a.kill()
I don't believe there's any way for variable 'a' to implicitly set itself to None.
This is something I have done in the past.
Create a list of objects, and you can then have objects delete themselves with the list.remove() method.
bullet_list = []
class Bullet:
def kill_self(self):
bullet_list.remove(self)
bullet_list += [Bullet()]
If you're using a single reference to the object, then the object can kill itself by resetting that outside reference to itself, as in:
class Zero:
pOne = None
class One:
pTwo = None
def process(self):
self.pTwo = Two()
self.pTwo.dothing()
self.pTwo.kill()
# now this fails:
self.pTwo.dothing()
class Two:
def dothing(self):
print "two says: doing something"
def kill(self):
Zero.pOne.pTwo = None
def main():
Zero.pOne = One() # just a global
Zero.pOne.process()
if __name__=="__main__":
main()
You can of course do the logic control by checking for the object existence from outside the object (rather than object state), as for instance in:
if object_exists:
use_existing_obj()
else:
obj = Obj()
I'm curious as to why you would want to do such a thing. Chances are, you should just let garbage collection do its job. In python, garbage collection is pretty deterministic. So you don't really have to worry as much about just leaving objects laying around in memory like you would in other languages (not to say that refcounting doesn't have disadvantages).
Although one thing that you should consider is a wrapper around any objects or resources you may get rid of later.
class foo(object):
def __init__(self):
self.some_big_object = some_resource
def killBigObject(self):
del some_big_object
In response to Null's addendum:
Unfortunately, I don't believe there's a way to do what you want to do the way you want to do it. Here's one way that you may wish to consider:
>>> class manager(object):
... def __init__(self):
... self.lookup = {}
... def addItem(self, name, item):
... self.lookup[name] = item
... item.setLookup(self.lookup)
>>> class Item(object):
... def __init__(self, name):
... self.name = name
... def setLookup(self, lookup):
... self.lookup = lookup
... def deleteSelf(self):
... del self.lookup[self.name]
>>> man = manager()
>>> item = Item("foo")
>>> man.addItem("foo", item)
>>> man.lookup
{'foo': <__main__.Item object at 0x81b50>}
>>> item.deleteSelf()
>>> man.lookup
{}
It's a little bit messy, but that should give you the idea. Essentially, I don't think that tying an item's existence in the game to whether or not it's allocated in memory is a good idea. This is because the conditions for the item to be garbage collected are probably going to be different than what the conditions are for the item in the game. This way, you don't have to worry so much about that.
what you could do is take the name with you in the class and make a dictionairy:
class A:
def __init__(self, name):
self.name=name
def kill(self)
del dict[self.name]
dict={}
dict["a"]=A("a")
dict["a"].kill()
class A:
def __init__(self, function):
self.function = function
def kill(self):
self.function(self)
def delete(object): #We are no longer in A object
del object
a = A(delete)
print(a)
a.kill()
print(a)
May this code work ?
A replacement implement:
class A:
def __init__(self):
self.a = 123
def kill(self):
from itertools import chain
for attr_name in chain(dir(self.__class__), dir(self)):
if attr_name.startswith('__'):
continue
attr = getattr(self, attr_name)
if callable(attr):
setattr(self, attr_name, lambda *args, **kwargs: print('NoneType'))
else:
setattr(self, attr_name, None)
a.__str__ = lambda: ''
a.__repr__ = lambda: ''
a = A()
print(a.a)
a.kill()
print(a.a)
a.kill()
a = A()
print(a.a)
will outputs:
123
None
NoneType
123
EDIT: This does not work
If you create an object with an attribute corresponding to the name of the variable it is technically possible using exec.
class Example():
def __init__(self, name) -> None:
self.var_name = name
def kill(self):
exec(f'del {self.var_name}')
coolvar = Example('coolvar')
coolvar.kill()
Keep in mind that you should avoid using exec at all costs, always.

Categories

Resources