I wish to display some variables, data members in hexadecimal format any time they are printed.
My current approach is to define a class Hex:
class Hex:
"""Facilitate printing an integer in hexadecimal format."""
def __init__(self, data):
if not isinstance(data, int):
raise ValueError("Hex expects an integer.")
self._data = data
def __repr__(self):
return hex(self._data)
def __eq__(self, other):
return self._data == other
def __lt__(self, other):
return self._data < other
def __le__(self, other):
return self._data <= other
def __gt__(self, other):
return self._data > other
def __ge__(self, other):
return self._data >= other
def __add__(self, right):
return self._data + right
def __radd__(self, left):
return self._data + left
def __hash__(self):
return hash(self._data)
This works all right as the following examples demonstrate it:
address = Hex(0xdeadbeef)
record1 = {'address': address, 'counter': 42}
print(record1)
{'address': 0xdeadbeef, 'counter': 42}
The __hash__ is defined so that the object is hashable and can be used as a key in a dict:
record2 = {address: 'address'}
print(record2)
{0xdeadbeef: 'address'}
The equality and comparisons are defined so that handling of duplicates and sorting works:
list1 = [Hex(0xff), 15, 23, Hex(0xf), 255]
print(list1)
[0xff, 15, 23, 0xf, 255]
print(set(list1))
{15, 23, 0xff}
list1.sort()
print(list1)
[15, 0xf, 23, 0xff, 255]
The __add__ and __radd__ are defined so that pointer arithmetic is supported:
new_address = Hex(record1['address'] + 0x20400000)
print(new_address)
0xfeedbeef
address += 0x3f00
address += Hex(0xfe)
print(address)
0xdeadfeed
And now to the questions.
Is there a built in or existing hex integer that somehow has its own __repr__ attached that prints it in hex, but otherwise it would work as an int. I could not find such hence the above class.
The above pointer arithmetic works (subtraction, negation can be added similarly) and I was surprised that += works as well. Should I still add __iadd__?
Should I add/override __new__? Something like the following would not create duplicate instances for the same value:
def __new__(cls, *args, **kwargs):
if not hasattr(cls, 'instances'):
cls.instances = {}
data = args[1]
if data in cls.instances:
return cls.instances[data]
# Create if not found:
inst = super(Hex, cls).__new__(cls) #, *args, **kwargs)
cls.instances[data] = inst
return inst
Any other suggestion to fix the class Hex or make it better?
Instead of creating a new class from scratch that holds an int (_data), why don't you simply inherit from int and override the __repr__ method?
I wouldn't go as far as optimizing for duplicated values.
class Hex(int):
def __repr__(self):
return hex(self)
Edit -
Override the methods that return a new int with a call to super() and return as a Hex object. For example -
def __add__(self, val):
return Hex(super().__add__(val))
Looks a little verbose but it works. Plus you can write a monkey patcher that takes a list of all the operations you want to override -
ops = ['__add__', '__sub__', '__mul__']
def monkey_patch(operation: str):
"""
wraps Hex() around int's magic
method for provided operation.
"""
old_op = getattr(int, operation)
new_op = lambda self, val : Hex(old_op(self, val))
setattr(Hex, operation, new_op)
for op in ops:
monkey_patch(op)
full code
This works -
>>> a = Hex(0xf)
>>> a += 1
>>> a
0x10
>>> a -= 1
>>> a
0xf
>>> a * 2
0x1e
How about inheriting from int?
>>> class Hex(int):
... def __repr__(self):
... return hex(self)
...
>>> a = Hex(123)
>>> a
0x7b
>>> a = Hex(16)
>>> a
0x10
>>> Hex(a + 2)
0x12
Related
I'm trying to create an Enum subclass whose values use their definition order as their natural sort order, like in the example below:
#functools.total_ordering
class SelectionType(enum.Enum):
character = 'character'
word = 'word'
sentence = 'sentence'
paragraph = 'paragraph'
def __le__(self, other):
if not isinstance(other, SelectionType):
return NotImplemented
return self._positions[self] < self._positions[other]
SelectionType._positions = {x: i for i, x in enumerate(SelectionType)}
Is there a more direct way to get the position of an enum value in its definition order or otherwise a better way to do this?
If this is a pattern you need often, or if the values are important and cannot be replaced by numbers, make a custom Enum you can inherit from:
import enum
class ByDefinitionOrderEnum(enum.Enum):
def __init__(self, *args):
try:
# attempt to initialize other parents in the hierarchy
super().__init__(*args)
except TypeError:
# ignore -- there are no other parents
pass
ordered = len(self.__class__.__members__) + 1
self._order = ordered
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._order >= other._order
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._order > other._order
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._order <= other._order
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._order < other._order
return NotImplemented
This allows you to keep any other value instead, while still sorting according to definition order.
class SelectionType(ByDefinitionOrderEnum):
character = 'character'
word = 'word'
sentence = 'sentence'
paragraph = 'paragraph'
and in use:
>>> SelectionType.word < SelectionType.sentence
True
>>> SelectionType.word.value < SelectionType.sentence.value
False
You could encode the positions as values. Use .name to get the name.
class SelectionType(enum.Enum):
character = 1
word = 2
sentence = 3
paragraph = 4
# copy the OrderedEnum recipe from https://docs.python.org/3/library/enum.html#orderedenum
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
>>> SelectionType.word.name
'word'
>>> SelectionType.word < SelectionType.sentence
True
On Python 3.6+ you could use enum.auto() to avoid hard-coding the positions.
class SelectionType(enum.Enum):
character = enum.auto()
word = enum.auto()
sentence = enum.auto()
paragraph = enum.auto()
I'm trying to find a general way of generating objects which can be converted to strings and back again using the parse module. For example, for a class StringyObject whose instances have just two attributes a and b:
import parse
class StringyObject(object):
fmt = "{a} {b}"
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return self.fmt.format(a=self.a, b=self.b)
#classmethod
def parse(cls, string):
result = parse.parse(cls.fmt, string)
kwargs = result.named
return cls(**kwargs)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return NotImplemented
if __name__ == "__main__":
obj = StringyObject("foo", "bar")
reconstructed_obj = StringyObject.parse(str(obj))
assert reconstructed_obj == obj, "The reconstructed object should be equivalent to the original one."
The script consecutively calls the __str__ instance method and the parse class method, and verifies that the resulting objects obj and reconstructed_obj are equivalent (defined here as being instances of the same class and having the same dictionaries; cf. Elegant ways to support equivalence ("equality") in Python classes).
So far, so good, but I'd like to extend this method to attributes which are lists of variable length. For example, if b is a list, then I could do the following:
import parse
class StringyObject(object):
fmt = "{a} {b}"
separator = ", "
def __init__(self, a, b):
self.a = a
assert isinstance(b, list), "b should be a list."
self.b = b
def __str__(self):
b_string = self.separator.join(self.b)
return self.fmt.format(a=self.a, b=b_string)
#classmethod
def parse(cls, string):
result = parse.parse(cls.fmt, string)
kwargs = result.named
kwargs['b'] = kwargs['b'].split(cls.separator)
return cls(**kwargs)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return NotImplemented
if __name__ == "__main__":
obj = StringyObject("foo", ["bar1", "bar2"])
reconstructed_obj = StringyObject.parse(str(obj))
assert reconstructed_obj == obj, "The reconstructed object should be equivalent to the original object."
This still works for this example, but is less elegant because I start to have to use join() and split(), which is what I wanted to avoid by using parse.parse. Furthermore, if I add another attribute c which comes after b in the string representation, the parsing goes haywire:
class StringyObject(object):
fmt = "{a} {b} {c}"
separator = ", "
def __init__(self, a, b, c):
self.a = a
assert isinstance(b, list), "b should be a list."
self.b = b
self.c = c
def __str__(self):
b_string = self.separator.join(self.b)
return self.fmt.format(a=self.a, b=b_string, c=self.c)
Then running the script
obj = StringyObject("foo", ["bar1", "bar2"], "hello")
result = parse.parse(StringyObject.fmt, str(obj))
produces the wrong Result object:
<Result () {'a': 'foo', 'c': 'bar2 hello', 'b': 'bar1,'}>
What I would actually like to is implement a kind of 'sub-parser' for b which keeps on running as long as it can find a separator, and only then continues with parsing c. Is there an elegant way to do this?
My suggestion is to look into using ast.literal_eval. This function is a safe eval of Python literal structures (ints, float, strings, lists, dicts...)
I wasn't able to get your examples to work using the parse library, but if you modify your format string slightly, it will work pretty easily with ast.literal_eval:
import ast
class StringyObject(object):
fmt = "{a!r}, {b!r}"
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return self.fmt.format(a=self.a, b=self.b)
#classmethod
def parse(cls, string):
return cls(*ast.literal_eval(string))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return NotImplemented
if __name__ == "__main__":
objects = [("foo", "bar"),
("foo", ["bar1", "bar2"]),
(["foo1", ("foo2", ["foo3", {"foo4"}])], {"bar1" : "bar2", "bar3": ["bar4", "bar5"]})]
for a, b in objects:
obj = StringyObject(a, b)
reconstructed_obj = StringyObject.parse(str(obj))
assert reconstructed_obj == obj, "The reconstructed object should be equivalent to the original one."
The downside to this implementation is that it will only work for basic python literals; i.e., StringyObject(frozenset(['foo']), 'bar') won't work.
I found that the desired parsing result could be achieved by adding some 'fixed' characters (not just spaces) in the format string. For example, below I've put a pipe (|) between the {b} and {c}:
import parse
class StringyObject(object):
fmt = "{a} {b} | {c}"
separator = ", "
def __init__(self, a, b, c):
self.a = a
assert isinstance(b, list), "b should be a list."
self.b = b
self.c = c
def __str__(self):
b_string = self.separator.join(self.b)
return self.fmt.format(a=self.a, b=b_string, c=self.c)
#classmethod
def parse(cls, string):
result = parse.parse(cls.fmt, string)
kwargs = result.named
kwargs['b'] = kwargs['b'].split(cls.separator)
return cls(**kwargs)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return NotImplemented
if __name__ == "__main__":
obj = StringyObject("foo", ["bar1", "bar2"], "hello")
result = parse.parse(StringyObject.fmt, str(obj))
print result
reconstructed_obj = StringyObject.parse(str(obj))
assert reconstructed_obj == obj, "The reconstructed object should be equivalent to the original object."
The printed Result is
<Result () {'a': 'foo', 'c': 'hello', 'b': 'bar1, bar2'}>
as desired. The reconstructed_obj is also equivalent to the original obj.
In Python, data types (like int, float) both represent a value, but also have some built-in attributes/functions/etc:
In [1]: a = 1.2
In [2]: a
Out[2]: 1.2
In [3]: a.is_integer()
Out[3]: False
Is it possible to reproduce this behavior within Python, e.g. define a class:
class Scalar:
def __init__(self, value)
self.value = value
# other code ....
s = Scalar(1.2)
where I could have s return 1.2 (instead of typing s.value), and do things like a = s -> a = 1.2? The closest I can get to this behavior is adding something like:
def __getitem__(self, key=None):
return self.value
and using a = s[()], but that doesn't look very good.
where I could have s return 1.2 (instead of typing s.value)
In the console? Then implement the __repr__ method.
a = s -> a = 1.2
To avoid having to use a = s.value, you can implement __call__ and call the object:
>>> class Scalar:
... def __init__(self, value):
... self.value = value
... def __repr__(self):
... return str(self.value)
... def __call__(self):
... return self.value
...
>>> s = Scalar(1.2)
>>> s
1.2
>>> a = s()
>>> a
1.2
Check the documentation about the data model on emulating numeric types.
For example:
class Scalar:
def __init__(self, value):
self.value = value
def __repr__(self):
return str(self.value)
def __call__(self):
return self.value
def __add__(self, other):
return Scalar(self.value + other.value)
def __lt__(self, other):
return self.value < other.value
def ___le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __gt__(self, other):
return self.value > other.value
def __ge__(self, other):
return self.value >= other.value
Can be used like this:
>>> s1 = Scalar(1.2)
>>> s2 = Scalar(2.1)
>>> s1 + s2
3.3
>>> s1 < s2
True
>>> s1 > s2
False
>>> s1 != s2
True
>>> s1 <= s2
True
>>> s1 >= s2
False
There are also the __int__ and __float__ magic methods, which you can implement and use like this (this is more semantically correct):
>>> a = int(s)
>>> a = float(s)
As far as I know, that's not possible for your a = s example. You would have to change the behavior of =, the assignment operator. The assignment operator doesn't really do anything to the object on the right, it just copies a reference to it (in the case of an object, at least).
In general, it is possible to change the behavior of built in operators for your custom classes using operator overloading, but Python doesn't provide this sort of option for assignment (=) because of how different it is from operators like addition (+) and even equality (==).
Using python3.4. Here I want use singledispatch to dispatch different type in __mul__ method . The code like this :
class Vector(object):
## some code not paste
#functools.singledispatch
def __mul__(self, other):
raise NotImplementedError("can't mul these type")
#__mul__.register(int)
#__mul__.register(object) # Becasue can't use Vector , I have to use object
def _(self, other):
result = Vector(len(self)) # start with vector of zeros
for j in range(len(self)):
result[j] = self[j]*other
return result
#__mul__.register(Vector) # how can I use the self't type
#__mul__.register(object) #
def _(self, other):
pass # need impl
As you can see the code , I want support Vector*Vertor , This has Name error
Traceback (most recent call last):
File "p_algorithms\vector.py", line 6, in <module>
class Vector(object):
File "p_algorithms\vector.py", line 84, in Vector
#__mul__.register(Vector) # how can I use the self't type
NameError: name 'Vector' is not defined
The question may be How can I use class name a Type in the class's method ? I know c++ have font class statement . How python solve my problem ? And it is strange to see result = Vector(len(self)) where the Vector can be used in method body .
After have A look at http://lukasz.langa.pl/8/single-dispatch-generic-functions/
I can choose this way to implement :
import unittest
from functools import singledispatch
class Vector(object):
"""Represent a vector in a multidimensional space."""
def __init__(self, d):
self._coords = [0 for i in range(0, d)]
self.__init__mul__()
def __init__mul__(self):
__mul__registry = self.__mul__.registry
self.__mul__ = singledispatch(__mul__registry[object])
self.__mul__.register(int, self.mul_int)
self.__mul__.register(Vector, self.mul_Vector)
def __setitem__(self, key, value):
self._coords[key] = value
def __getitem__(self, item):
return self._coords[item]
def __len__(self):
return len(self._coords)
def __str__(self):
return str(self._coords)
#singledispatch
def __mul__(self, other):
print ("error type is ", type(other))
print (type(other))
raise NotImplementedError("can't mul these type")
def mul_int(self,other):
print ("other type is ", type(other))
result = Vector(len(self)) # start with vector of zeros
for j in range(len(self)):
result[j] = self[j]*other
return result
def mul_Vector(self, other):
print ("other type is ", type(other))
#result = Vector(len(self)) # start with vector of zeros
sum = 0
for i in range(0,len(self)):
sum += self._coords[i] * other._coords[i]
return sum
class TestCase(unittest.TestCase):
def test_singledispatch(self):
# the following demonstrates usage of a few methods
v = Vector(5) # construct five-dimensional <0, 0, 0, 0, 0>
for i in range(1,6):
v[i-1] = i
print(v.__mul__(3))
print(v.__mul__(v))
print(v*3)
if __name__ == "__main__":
unittest.main()
The answer is strange :
other type is <class 'int'>
[3, 6, 9, 12, 15]
other type is <class '__main__.Vector'>
55
error type is <class 'int'>
Traceback (most recent call last):
File "p_algorithms\vector.py", line 164, in <module>
print(v*3)
File "C:\Python34\lib\functools.py", line 710, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "p_algorithms\vector.py", line 111, in __mul__
raise NotImplementedError("can't mul these type")
v.__mul__(3) can work but v*3 can't work. This is strange From my option v*3 is just the same as v.__mul__(3) .
Update after #Martijn Pieters's comment, I still want implement v*3 in class. So I try this
import unittest
from functools import singledispatch
class Vector(object):
#staticmethod
def static_mul_int(self,other):
print ("other type is ", type(other))
result = Vector(len(self)) # start with vector of zeros
for j in range(len(self)):
result[j] = self[j]*other
return result
#singledispatch
#staticmethod
def __static_mul__(cls, other):
print ("error type is ", type(other))
print (type(other))
raise NotImplementedError("can't mul these type")
__mul__registry2 = __static_mul__.registry
__mul__ = singledispatch(__mul__registry2[object])
__mul__.register(int, static_mul_int)
def __init__(self, d):
self._coords = [0 for i in range(0, d)]
self.__init__mul__()
def __init__mul__(self):
__mul__registry = self.__mul__.registry
print ("__mul__registry",__mul__registry,__mul__registry[object])
self.__mul__ = singledispatch(__mul__registry[object])
self.__mul__.register(int, self.mul_int)
print ("at last __mul__registry",self.__mul__.registry)
# #singledispatch
# def __mul__(self, other):
# print ("error type is ", type(other))
# print (type(other))
# raise NotImplementedError("can't mul these type")
def mul_int(self,other):
print ("other type is ", type(other))
result = Vector(len(self)) # start with vector of zeros
for j in range(len(self)):
result[j] = self[j]*other
return result
def __setitem__(self, key, value):
self._coords[key] = value
def __getitem__(self, item):
return self._coords[item]
def __len__(self):
return len(self._coords)
def __str__(self):
return str(self._coords)
class TestCase(unittest.TestCase):
def test_singledispatch(self):
# the following demonstrates usage of a few methods
v = Vector(5) # construct five-dimensional <0, 0, 0, 0, 0>
for i in range(1,6):
v[i-1] = i
print(v.__mul__(3))
print("type(v).__mul__'s registry:",type(v).__mul__.registry)
type(v).__mul__(v, 3)
print(v*3)
if __name__ == "__main__":
unittest.main()
This time . v.__mul__(3) have error :
Traceback (most recent call last):
File "test.py", line 73, in test_singledispatch
type(v).__mul__(v, 3)
File "/usr/lib/python3.4/functools.py", line 708, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
TypeError: 'staticmethod' object is not callable
For me static method should act like the instance method.
You cannot use functools.singledispatch on methods at all, not as a decorator at least. Python 3.8 adds a new option, just for methods: functools.singledispatchmethod().
It doesn't matter that Vector isn't defined here yet; the first argument to any method is always going to be self, while you'd use single dispatch for the second argument here.
Because decorators apply to the function objects before the class object is created, you could just as well register your 'methods' as functions instead, outside of the class body, so you have access to the Vector name:
class Vector(object):
#functools.singledispatch
def __mul__(self, other):
return NotImplemented
#Vector.__mul__.register(int)
#Vector.__mul__.register(Vector)
def _(self, other):
result = Vector(len(self)) # start with vector of zeros
for j in range(len(self)):
result[j] = self[j]*other
return result
For non-supported types, you need to return the NotImplemented singleton, not raise an exception. This way Python will try the inverse operation too.
However, since the dispatch is going to key on the wrong argument (self) here anyway, you'll have to come up with your own single dispatch mechanism.
If you really want to use #functools.singledispatch you'd have to delegate to a regular function, with the arguments inversed:
#functools.singledispatch
def _vector_mul(other, self):
return NotImplemented
class Vector(object):
def __mul__(self, other):
return _vector_mul(other, self)
#_vector_mul.register(int)
def _vector_int_mul(other, self):
result = Vector(len(self))
for j in range(len(self)):
result[j] = self[j] * other
return result
As for your updates using __init__mul__: v * 3 is not translated to v.__mul__(3). It is instead translated to type(v).__mul__(v, 3), see Special method lookup in the Python datamodel reference. This always bypasses any methods set directly on the instance.
Here type(v) is Vector; Python looks up the function, it won't use a bound method here. Again, because functools.singledispatch dispatches on the first argument, always, you cannot use single dispatch directly on the methods of Vector, because that first argument is always going to be a Vector instance.
In other words, Python will not use the methods you set on self in __init__mul__; special methods are never looked up on the instance, see Special method lookup in the datamodel documentation.
The functools.singledispatchmethod() option that Python 3.8 adds uses a class as the decorator which implements the descriptor protocol, just like methods do. This lets it then handle dispatch before binding (so before self would be prepended to the argument list) and then bind the registered function that the singledispatch dispatcher returns. The source code for this implementation is fully compatible with older Python versions, so you could use that instead:
from functools import singledispatch, update_wrapper
# Python 3.8 singledispatchmethod, backported
class singledispatchmethod:
"""Single-dispatch generic method descriptor.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
"""
def __init__(self, func):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError(f"{func!r} is not callable or a descriptor")
self.dispatcher = singledispatch(func)
self.func = func
def register(self, cls, method=None):
"""generic_method.register(cls, func) -> func
Registers a new implementation for the given *cls* on a *generic_method*.
"""
return self.dispatcher.register(cls, func=method)
def __get__(self, obj, cls):
def _method(*args, **kwargs):
method = self.dispatcher.dispatch(args[0].__class__)
return method.__get__(obj, cls)(*args, **kwargs)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method.register = self.register
update_wrapper(_method, self.func)
return _method
#property
def __isabstractmethod__(self):
return getattr(self.func, '__isabstractmethod__', False)
and apply that to your Vector() class. You still have to register your Vector implementation for the single dispatch after the class has been created, because only then can you register a dispatch for the class:
class Vector(object):
def __init__(self, d):
self._coords = [0] * d
def __setitem__(self, key, value):
self._coords[key] = value
def __getitem__(self, item):
return self._coords[item]
def __len__(self):
return len(self._coords)
def __repr__(self):
return f"Vector({self._coords!r})"
def __str__(self):
return str(self._coords)
#singledispatchmethod
def __mul__(self, other):
return NotImplemented
#__mul__.register
def _int_mul(self, other: int):
result = Vector(len(self))
for j in range(len(self)):
result[j] = self[j] * other
return result
#Vector.__mul__.register
def _vector_mul(self, other: Vector):
return sum(sc * oc for sc, oc in zip(self._coords, other._coords))
You could of course also create a subclass first and dispatch based on that, since dispatch works for subclasses too:
class _Vector(object):
def __init__(self, d):
self._coords = [0] * d
class Vector(_Vector):
def __setitem__(self, key, value):
self._coords[key] = value
def __getitem__(self, item):
return self._coords[item]
def __len__(self):
return len(self._coords)
def __repr__(self):
return f"{type(self).__name__}({self._coords!r})"
def __str__(self):
return str(self._coords)
#singledispatchmethod
def __mul__(self, other):
return NotImplemented
#__mul__.register
def _int_mul(self, other: int):
result = Vector(len(self))
for j in range(len(self)):
result[j] = self[j] * other
return result
#__mul__.register
def _vector_mul(self, other: _Vector):
return sum(sc * oc for sc, oc in zip(self._coords, other._coords))
This is a little ugly, as you need to defer binding the implementation of Vector/Vector multiplication until after Vector is actually defined. But the idea is that the single-dispatch function needs the first argument to be of arbitrary type, so Vector.__mul__ will call that function with self as the second argument.
import functools
class Vector:
def __mul__(self, other):
# Python has already dispatched Vector() * object() here, so
# swap the arguments so that our single-dispatch works. Note
# that in general if a*b != b*a, then the _mul_by_other
# implementations need to compensate.
return Vector._mul_by_other(other, self)
#functools.singledispatch
def _mul_by_other(x, y):
raise NotImplementedError("Can't multiply vector by {}".format(type(x)))
#_mul_by_other.register(int)
def _(x, y):
print("Multiply vector by int")
#Vector._mul_by_other.register(Vector)
def _(x, y):
print("Multiply vector by another vector")
x = Vector()
y = Vector()
x * 3
x * y
try:
x * "foo"
except NotImplementedError:
print("Caught attempt to multiply by string")
Let's say we have a class:
NOTE: this is a dummy class only.
class C(object):
def __init__(self):
self.a = -10
self.b = 5
self.c = 2
def modify(self, **kwargs):
for keyword in kwargs:
vars(self)[keyword] = kwargs[keyword]
return(self)
And we want to use this modify method to change values in our object:
myclass = C()
myclass = myclass.modify(a=10)
But when I want to change the value based on the original one, I have to write this:
myclass = C()
myclass = myclass.modify(a=myclass.a/10)
Or:
myclass = myclass.modify(a=abs(myclass.a))
My question is, is there a way, to create a global variable in a module, that I can import and use it as a placeholder for current value, so I can use this formula:
from globvars import current
myclass = C()
myclass = myclass.modify(
a=abs(current) % current ** 2,
b=current//2,
c=bool(current)
)
First I tried to a create a class, which will store the operation it is taking and a value, and modify() will look first for its variable as a keyword and then execute the function. Actually it is only working for simple situations like: current+10 or current**2.
But when I realised, I want to use this current for example with an hsba(current) (color converter) function, where current is pointing to an object stored in an other object, I just give up, I can't write this to every class I'm going to use..
Is there a solution for this? Maybe it's quite easy, I just can't see it :)
Thanks in advance for replies!
Here is a working solution. It is not complete and full of pretty bad design choices, but I hope it helps.
class Expr(object):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def __call__(self, current):
l = self._replace_current(self.left, current)
r = self._replace_current(self.right, current)
return self._do_operation(l, r)
def _replace_current(self, val, current):
if val == 'current':
return current
elif isinstance(val, Expr): # recurse
return val(current)
else:
return val
def _do_operation(self, l, r):
if self.op == '+':
return l + r
elif self.op == '*':
return l * r
elif self.op == '-':
return l - r
def __add__(self, other):
return self._left_op('+', other)
def __radd__(self, other):
return self._right_op('+', other)
def __mul__(self, other):
return self._left_op('*', other)
def __rmul__(self, other):
return self._right_op('*', other)
def __sub__(self, other):
return self._left_op('-', other)
def __rsub__(self, other):
return self._right_op('-', other)
def _left_op(self, op, other):
if isinstance(other, Current):
return Expr(op=op, left=self, right='current')
else:
return Expr(op=op, left=self, right=other)
def _right_op(self, op, other):
if isinstance(other, Current):
return Expr(op=op, left='current', right=self)
else:
return Expr(op=op, left=other, right=self)
class Current(Expr):
def __init__(self):
super(Current, self).__init__(None, None, None)
def __call__(self, current):
return current
def _left_op(self, op, other):
return Expr(op=op, left='current', right=other)
def _right_op(self, op, other):
return Expr(op=op, left=other, right='current')
current = Current()
class YourObj(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __call__(self, **kw):
for key, val in kw.iteritems():
# You should probably make sure it is actually an attribute of YourObj
if isinstance(val, Expr):
current = self.a
new_val = val(current)
setattr(self, key, new_val)
else:
setattr(self, key, val)
And you can do something like:
obj = YourObj(a=4, b=5)
obj(a=current - 4 + current * current)
This is basically an expression interpreter embedded in python's math operations.
Whenever you use an operation on current (like +), it will return an Expr (because it overrides __add__ and __radd__) that will register which operation this is, and what are each of its operands. These expressions can be nested, so if you say current + 4 - current, it will return Expr(op='-', left=Expr(op='+', left='current', right=4), right='current').
An expression can then be evaluated by calling it like a function and passing it the value that should replace 'current'. When you evaluate an expression, it will:
replace all the occurences of 'current' by the value passed
recursively evaluate the nested functions
return the end result of the whole expression
When you do obj(a=current + 4), the __call__ method of YourObj is called. It will evaluate the expression resulting of current + 4 and store it in a.
I hope this is clearer. Maybe I should rename some of the 'current' to make it less confusing.
Your modify method could take the name of the attribute to modify, and a function that takes the current value of the attribute and returns its new computed value. Then you can do something like:
def compute_new_value(current):
new_value = abs(current) % current ** 2
return new_value
myclass = C()
myclass.modify('a', compute_new_value)
For simple cases, lambda makes it less verbose:
myclass.modify('a', lambda cur: cur + 4)
And your class:
class C(object):
[...]
def modify(self, attr_name, func):
cur_value = getattr(self, attr_name)
new_value = func(cur_value)
setattr(self, attr_name, new_value)
Edit: I may have missed something. Since you're writing myclass = myclass.modify..., should the modify method return a copy of the object ?
You have a poor design, in my opinion, but you could do this using eval(). Of course, that just makes your design smell even more. Still...
class C(object):
# ...
def modify(self, **kwargs):
for name, expr in kwargs.iteritems():
setattr(self, name, eval(expr, vars(self)))
obj = C()
obj.modify(a="a+2", b="b*42")
The downside is that you have to pass the expressions as strings. Also, with this simple implementation, you can only use values defined on the instance in the expression (e.g., you cant access class attributes, or any attributes of parent classes, or globals). You could add the ability to use class attributes or globals and even parent classes by building the v dictionary in the appropriate order, of course:
def modify(self, **kwargs):
vardict = {} # allow globals and self attributes to be used in expressions
vardict.update(globals())
vardict.update(vars(self))
for name, expr in kwargs.iteritems():
value = eval(expr, v)
setattr(self, name, eval(expr, vardict))
vardict[name] = value
If you want a current variable that holds the current value, you could use this (inside the loop, since it needs to change for each attribute processed):
v["current"] = getattr(self, name, None)
One of the biggest drawbacks here is that you can't easily access variables from the caller's scope, although you could dig them out of the stack frame I guess... ugh. Or make the caller interpolate those into the string... double ugh.
Morphyn's answer is the proper way to do it, in my opinion. A lambda is hardly complicated
This was my old solution.. (sort of, this a dummy version of it)
class __current__(object):
def do(self, e, v = None):
c = __current__()
c.exp = e
if v is not None:
c.val = v
return(c)
def __abs__(self):
return(self.do(abs))
def __rpow__(self, v):
return(self.do(pow, v))
current = __current__()
class C(object):
def __call__(self, **kwargs):
for keyword, value in kwargs.iteritems():
try:
expression = value.exp
try:
value = expression(vars(self)[keyword], value.val)
except AttributeError:
value = expression(vars(self)[keyword])
except AttributeError:
value = value
setattr(self, keyword, value)
And the usage:
MyObj = C()
MyObj(a = -2)
MyObj(a = abs(current))
MyObj(a = 2 ** current)