I'm writing a very simple decorator to give me some basic debug information about a function.
from functools import wraps
from time import perf_counter
class debug(object):
def __init__(self, Time=False, Parameters=False, Doc=False):
self.t = Time
self.p = Parameters
self.d = Doc
def __call__(self, func):
#wraps(func)
def run(*args, **kwargs):
params = ""
if self.p:
params = ", ".join(["{}".format(arg) for arg in args] + ["{}={}".format(k, v) for k, v in kwargs.items()])
print("\n\tDebug output for '{}({})'".format(func.__name__, params))
if self.d:
print('\tDocstring: "{}"'.format(func.__doc__))
if self.t:
t1 = perf_counter()
val = func(*args, **kwargs)
if self.t:
t2 = perf_counter()
print("\tTime Taken: {:.3e} seconds".format(t2 - t1))
print("\tReturn Type: '{}'\n".format(type(val).__name__))
return val
return run
This is all well and good for normal functions.
#debug(Parameters=True, Time=True, Doc=True)
def foo(i, j=5):
"""Raises i to 2j"""
for _ in range(j):
i **= 2
return i
i = foo(5, j=3)
# Output:
"""
Debug output for 'foo(5, j=3)'
Docstring: "Raises i to 2j"
Time Taken: 1.067e-05 seconds
Return Type: 'int'
"""
However, generators are a different story.
#debug(Parameters=True, Time=True, Doc=True)
def bar(i, j=2):
"""Infinite iterator of increment j"""
while True:
yield i
i += j
b = bar() # Output occurs here
next(b) # No output
Now, from what I have coded, that is completely expected, but I'm wondering how I can hook the .__next__() method or what the best way of going about this is.
You can simply change your __call__ method and return a generator if a generator is given as an input (add import types at the top of your file):
def __call__(self, f):
if isinstance(f, types.GeneratorType):
def run_gen(*args, **kwargs):
# do pre stuff...
for _ in f(*argw, **kwargs):
yield _
# do post stuff...
return run_gen
else:
def run(*args, **kwargs):
# do pre stuff...
r = f(*argw, **kwargs)
# do post stuff...
return r
return run
You can't replace function.next as it is a read only value. But you can do something like this (see debug_generator function):
from functools import wraps
import inspect
class debug(object):
def __init__(self, Time=False, Parameters=False, Doc=False):
self.t = Time
self.p = Parameters
self.d = Doc
def __call__(self, func):
#wraps(func)
def debug_generator(func):
for i, x in enumerate(list(func)):
# here you add your debug statements
print "What you want: step %s" % i
yield x
#wraps(func)
def run(*args, **kwargs):
params = ""
if self.p:
params = ", ".join(["{}".format(arg) for arg in args] + ["{}={}".format(k, v) for k, v in kwargs.items()])
print("\n\tDebug output for '{}({})'".format(func.__name__, params))
if self.d:
print('\tDocstring: "{}"'.format(func.__doc__))
val = func(*args, **kwargs)
print("\tReturn Type: '{}'\n".format(type(val).__name__))
if inspect.isgenerator(val):
return debug_generator(val)
return val
return run
Basically you just get all the value from the generator you want to debug, and then you yield them again, adding debug statement in the loop.
Related
Here's a simplified function for which I'm trying to add a lru_cache for -
from functools import lru_cache, wraps
#lru_cache(maxsize=1000)
def validate_token(token):
if token % 3:
return None
return True
for x in range(1000):
validate_token(x)
print(validate_token.cache_info())
outputs -
CacheInfo(hits=0, misses=1000, maxsize=1000, currsize=1000)
As we can see, it would also cache args and returned values for the None returns as well. In above example, I want the cache_size to be 334, where we are returning non-None values. In my case, my function having large no. of args might return a different value if previous value was None. So I want to avoid caching the None values.
I want to avoid reinventing the wheel and implementing a lru_cache again from scratch. Is there any good way to do this?
Here are some of my attempts -
1. Trying to implement own cache (which is non-lru here) -
from functools import wraps
# global cache object
MY_CACHE = {}
def get_func_hash(func):
# generates unique key for a function. TODO: fix what if function gets redefined?
return func.__module__ + '|' + func.__name__
def my_lru_cache(func):
name = get_func_hash(func)
if not name in MY_CACHE:
MY_CACHE[name] = {}
#wraps(func)
def function_wrapper(*args, **kwargs):
if tuple(args) in MY_CACHE[name]:
return MY_CACHE[name][tuple(args)]
value = func(*args, **kwargs)
if value is not None:
MY_CACHE[name][tuple(args)] = value
return value
return function_wrapper
#my_lru_cache
def validate_token(token):
if token % 3:
return None
return True
for x in range(1000):
validate_token(x)
print(get_func_hash(validate_token))
print(len(MY_CACHE[get_func_hash(validate_token)]))
outputs -
__main__|validate_token
334
2. I realised that the lru_cache doesn't do caching when an exception is raised within the wrapped function -
from functools import wraps, lru_cache
def my_lru_cache(func):
#wraps(func)
#lru_cache(maxsize=1000)
def function_wrapper(*args, **kwargs):
value = func(*args, **kwargs)
if value is None:
# TODO: change this to a custom exception
raise KeyError
return value
return function_wrapper
def handle_exception(func):
#wraps(func)
def function_wrapper(*args, **kwargs):
try:
value = func(*args, **kwargs)
return value
except KeyError:
return None
return function_wrapper
#handle_exception
#my_lru_cache
def validate_token(token):
if token % 3:
return None
return True
for x in range(1000):
validate_token(x)
print(validate_token.__wrapped__.cache_info())
outputs -
CacheInfo(hits=0, misses=334, maxsize=1000, currsize=334)
Above correctly caches only the 334 values, but needs wrapping the function twice and accessing the cache_info in a weird manner func.__wrapped__.cache_info().
How do I better achieve the behaviour of not caching when None(or specific) values are returned using built-in lru_cache decorator in a pythonic way?
You are missing the two lines marked here:
def handle_exception(func):
#wraps(func)
def function_wrapper(*args, **kwargs):
try:
value = func(*args, **kwargs)
return value
except KeyError:
return None
function_wrapper.cache_info = func.cache_info # Add this
function_wrapper.cache_clear = func.cache_clear # Add this
return function_wrapper
You can do both wrappers in one function:
def my_lru_cache(maxsize=128, typed=False):
class CustomException(Exception):
pass
def decorator(func):
#lru_cache(maxsize=maxsize, typed=typed)
def raise_exception_wrapper(*args, **kwargs):
value = func(*args, **kwargs)
if value is None:
raise CustomException
return value
#wraps(func)
def handle_exception_wrapper(*args, **kwargs):
try:
return raise_exception_wrapper(*args, **kwargs)
except CustomException:
return None
handle_exception_wrapper.cache_info = raise_exception_wrapper.cache_info
handle_exception_wrapper.cache_clear = raise_exception_wrapper.cache_clear
return handle_exception_wrapper
if callable(maxsize):
user_function, maxsize = maxsize, 128
return decorator(user_function)
return decorator
Use exceptions to prevent caching:
from functools import lru_cache
#lru_cache(maxsize=None)
def fname(x):
print('worked')
raise Exception('')
return 1
for _ in range(10):
try:
fname(1)
except Exception as e:
pass
In example above "worked" will be printed 10 times.
In order to use multiprocessing in interactive python on windows (miniconda), I've found a very useful code that works very well. The code, however, can not pass the self argument inside a class to the function to be pooled. Here is my code that works on google colab but never finishes on windows iPython:
import multiprocessing
from multiprocessing import Pool
from poolable import make_applicable, make_mappable
def worker(d):
"""worker function"""
for i in range(10000):
j = i **(1/3) + d.bias
return j
class dummy():
def __init__(self):
self.bias = 1000
def calc(self):
pool = Pool(processes=12)
results = {}
for i in range(5):
results[i] = (pool.apply_async(*make_applicable(worker,self)))
pool.close()
pool.join()
print([results[i].get() for i in range(5)])
d=dummy()
d.calc()
The code works well on windows if I pass other types of variables, for example:
results[i] = (pool.apply_async(*make_applicable(worker,self.bias)))
But when I pass self to the function the process never finishes. I have no idea what to do.
poolable.py from here:
from types import FunctionType
import marshal
def _applicable(*args, **kwargs):
name = kwargs['__pw_name']
code = marshal.loads(kwargs['__pw_code'])
gbls = globals() #gbls = marshal.loads(kwargs['__pw_gbls'])
defs = marshal.loads(kwargs['__pw_defs'])
clsr = marshal.loads(kwargs['__pw_clsr'])
fdct = marshal.loads(kwargs['__pw_fdct'])
func = FunctionType(code, gbls, name, defs, clsr)
func.fdct = fdct
del kwargs['__pw_name']
del kwargs['__pw_code']
del kwargs['__pw_defs']
del kwargs['__pw_clsr']
del kwargs['__pw_fdct']
return func(*args, **kwargs)
def make_applicable(f, *args, **kwargs):
if not isinstance(f, FunctionType): raise ValueError('argument must be a function')
kwargs['__pw_name'] = f.__name__ # edited
kwargs['__pw_code'] = marshal.dumps(f.__code__) # edited
kwargs['__pw_defs'] = marshal.dumps(f.__defaults__) # edited
kwargs['__pw_clsr'] = marshal.dumps(f.__closure__) # edited
kwargs['__pw_fdct'] = marshal.dumps(f.__dict__) # edited
return _applicable, args, kwargs
def _mappable(x):
x,name,code,defs,clsr,fdct = x
code = marshal.loads(code)
gbls = globals() #gbls = marshal.loads(gbls)
defs = marshal.loads(defs)
clsr = marshal.loads(clsr)
fdct = marshal.loads(fdct)
func = FunctionType(code, gbls, name, defs, clsr)
func.fdct = fdct
return func(x)
def make_mappable(f, iterable):
if not isinstance(f, FunctionType): raise ValueError('argument must be a function')
name = f.__name__ # edited
code = marshal.dumps(f.__code__) # edited
defs = marshal.dumps(f.__defaults__) # edited
clsr = marshal.dumps(f.__closure__) # edited
fdct = marshal.dumps(f.__dict__) # edited
return _mappable, ((i,name,code,defs,clsr,fdct) for i in iterable)
Edit:
It seems that the problem exist not only for self but also for any other classes that pass to the make_applicable function. The following code also don't finish:
class dummy2():
def __init__(self):
self.bias = 1000
class dummy():
def __init__(self):
self.bias = 1000
def copy(self):
return copy.deepcopy(self)
def calc(self):
pool = Pool(processes=12)
results = {}
for i in range(5):
d = dummy2()
results[i] = pool.apply_async(*make_applicable(worker,d))
pool.close()
pool.join()
print([results[i].get() for i in range(5)])
Using IPython console:
Put your code in a module (mp.py) ensuring the the class instantiation and method call are executed in a if __name__ == '__main__': conditional
import multiprocessing
from multiprocessing import Pool
from poolable import make_applicable, make_mappable
def worker(d):
"""worker function"""
for i in range(10000):
j = i **(1/3) + d.bias
return j
class Dummy():
def __init__(self):
# self.bias = 1000
self.bias = 10
def calc(self):
pool = Pool(processes=12)
results = {}
for i in range(5):
results[i] = (pool.apply_async(*make_applicable(worker,self)))
pool.close()
pool.join()
return [results[i].get() for i in range(5)]
if __name__ == '__main__':
d=Dummy()
print(d.calc())
Then in the console run the module:
In [1]: runfile('P:/pyProjects3/mp.py', wdir='P:/pyProjects3')
[31.543628731482663, 31.543628731482663, 31.543628731482663, 31.543628731482663, 31.543628731482663]
I have Jupyter Notebook (Anaconda) but don't know how to use it, if I figure it out I'll update this answer.
try to use this code to test
import pickle
def worker(d):
"""worker function"""
d = pickle.loads(d)
for i in range(10000):
j = i **(1/3) + d.bias
return j
class dummy():
def __init__(self):
self.bias = 1000
def calc(self):
pool = Pool(processes=12)
results = {}
for i in range(5):
x = pickle.dumps(self)
results[i] = (pool.apply_async(*make_applicable(worker,x)))
pool.close()
pool.join()
print([results[i].get() for i in range(5)])
i want to design a decorator to check any function annotation type and if it has similar type then run function.
can python do such this thing??
if python can, please help me!!
def foo (a:int):
if foo.__annotations__.get('a') == type(a):
pass
def boo (b:str):
if boo.__annotations__.get('b') == type(b):
pass
and another thing is annotations is a dict type, i want such this :
from type import FunctionType
def check (f:FunctionType):
result = True
k = [k for k in f.__annotations__.keys()]
v = [v for v in f.__annotations__.values()]
for i in range(len(v)):
if v[i] != type(k[i]): #but we don't access to the type of k[i] out of th f function
result = False
return result
If I understand the idea correctly, perhaps this code will help you:
from types import FunctionType
def check(f: FunctionType):
def wrapper(*args, **kwargs):
result = True
# check args
keys = tuple(f.__annotations__.keys())
for ar in enumerate(args):
if not isinstance(ar[1], f.__annotations__.get(keys[ar[0]])):
result = False
break
if result:
# check kwargs
for k, v in kwargs.items():
if not isinstance(v, f.__annotations__.get(k)):
result = False
break
if result:
f(*args, **kwargs)
return wrapper
Example usage:
#check
def foo(a: str, b: int = None):
print(f"a = {a}")
print(f"b = {b}")
# Example 1: a=324, b=32:
foo(234, b=32)
# result: function not executed
# Example 2: a="abc", b="zzz":
foo("abc", b="zzz")
# result: function not executed
# Example 3: a="qwe", b= not set:
foo("qwe")
# result: function executed, output:
# a = qwe
# b = None
# Example 4: a="abc", b=99:
foo("abc", 99)
# result: function executed, output:
# a = abc
# b = 99
The decorator checks the argument types, and if everything is in order, it executes the function, otherwise it does nothing.
something like this.
import inspect
import functools
def check(func):
msg = "Expected type {etype} for {para} got {got}"
para = inspect.signature(func).parameters
keys = tuple(para.keys())
#functools.wraps(func)
def wrapper(*args,**kwargs):
def do_check(anno,value,para):
if not isinstance(value, anno):
raise TypeError(msg.format(etype=anno,
para=para,
got=type(value)))
for i,value in enumerate(args):
anno = para[keys[i]].annotation
do_check(anno, value, keys[i])
for arg_name,value in kwargs.items():
anno = para[arg_name].annotation
do_check(anno, value, arg_name)
ret = func(*args,**kwargs)
if "return" in func.__annotations__:
anno = func.__annotations__["return"]
do_check(anno, ret, "return")
return ret
return wrapper
#check
def test(a:int,b:str) -> str:
return 'aaa'
#check
def test2(a:int,b:str) -> str:
return 123
import functools
def annotations_checker(func):
#functools.wraps(func)
def wrapper(*args,**kwargs):
TrueFalseChecker=True
keys=tuple(func.__annotations__.keys())
for key_num in range(len(keys)):
if func.__annotations__[keys[key_num]]!=type(args[key_num]):
break
else:
value=func(*args,*kwargs)
return value
return
return wrapper
and you can use #annotations_checker decorator for any python method / python function to check type annotations
like:
#annotations_checker
def test(str_example:str,int_example:int,second_str_example:str):
print("if you can see this, the args and annonations type are same!")
test(1,"2",3) #the function will not work
test("1",1,"testtest") #function will work
I am trying to create a function decorator that logs the value specified function arguments in an accessible python object. I have already working code but I am missing a piece to finish this up.
First, I have the object log where I will save stuff correctly set up:
class Borg:
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
class Log(Borg):
def __init__(self):
Borg.__init__(self)
if not hasattr(self, 'tape'):
self.tape = []
def add(self, this):
self.tape.append(this)
def __str__(self):
return '\n'.join([str(line) for line in self.tape])
Then I have a generic call object and the decorator implementation (with missing code):
import inspect
import functools
class Call:
def __init__(self, name, **saved_arguments):
self.name = name
self.saved_arguments = saved_arguments
def __str__(self):
return f'Call(name={self.name}, saved_arguments={self.saved_arguments})'
def record(func, save_args_names=None):
if save_args_names is None:
save_args_names = {}
name = func.__name__
args = inspect.getfullargspec(func).args
if save_args_names and not set(save_args_names).issubset(set(args)):
raise ValueError(f'Arguments not present in function: {set(save_args_names) - set(args)}')
log = Log()
#functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
# **here** I am missing something to replace 0 with the correct values!
saved_arguments = {a: 0 for a in save_args_names}
log.add(Call(name, **saved_arguments))
return_value = func(*func_args, **func_kwargs)
return return_value
return wrapper
To test this, I have the following functions set up:
def inner(x, add=0):
return sum(x) + add
def outer(number, add=0):
x = range(number)
return inner(x, add)
and the basic use case (no saving of arguments) works:
inner = record(inner)
print(outer(1), outer(2), outer(3))
print(Log())
It outputs, correctly:
0 1 3
Call(name=inner, saved_arguments={})
Call(name=inner, saved_arguments={})
Call(name=inner, saved_arguments={})
What I am missing is a way to have this use case:
inner = record(inner, save_args_names=['x'])
print(outer(1), outer(2), outer(3))
print(Log())
to output:
0 1 3
Call(name=inner, saved_arguments={'x': range(0, 1)})
Call(name=inner, saved_arguments={'x': range(0, 2)})
Call(name=inner, saved_arguments={'x': range(0, 3)})
This, should also work for keyword arguments, e.g.:
inner = record(inner, save_args_names=['x', 'add'])
print(outer(1, 2), outer(2, 3), outer(3, 4))
print(Log())
should output:
2 4 7
Call(name=inner, saved_arguments={'x': range(0, 1), 'add': 2})
Call(name=inner, saved_arguments={'x': range(0, 2), 'add': 3})
Call(name=inner, saved_arguments={'x': range(0, 3), 'add': 4})
I feel like I am close and that the inspect library should help me close this, but a little help would be much appreciated!
The function you're looking for is Signature.bind. Define your wrapperfunction like so:
#functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
signature = inspect.signature(func)
bound_args = signature.bind(*func_args, **func_kwargs)
saved_arguments = {a: bound_args.arguments[a] for a in save_args_names}
log.add(Call(name, **saved_arguments))
return_value = func(*func_args, **func_kwargs)
return return_value
I want to call some functions to a single value and return the collective result.
class Foo:
def __init__(self, i):
self.i = i
def get(self):
return self.fn1(self.fn2(self.i)) #200
def fn1(self, i):
return i + i #10+10 = 20
def fn2(self, i):
return i * i #20*20 = 200
#...
foo = Foo(10)
print(foo.get())
Is there a more elegant way or pattern?
Here is my try to improve this a little bit.
def fn1(i):
return i + i #10+10 = 20
def fn2(i):
return i * i #20*20 = 200
def get(i):
funcs = [fn2, fn1]
for f in funcs:
i = f(i)
return i
print(get(10))
In general, nesting functions as you do above is the most straightforward and readable way to compose functions in Python.
If you're composing many functions, it might be worth writing a compose function.
def compose(*funcs):
if len(funcs) == 1:
return funcs[0]
else:
def composition(*args, **kwargs):
return funcs[0](compose(*funcs[1:])(*args, **kwargs))
return composition
Or, if you prefer an iterative over a recursive solution:
def compose_pair(f1, f2):
def composition(*args, **kwargs):
return f1(f2(*args, **kwargs))
return composition
def compose_iterative(*funcs):
iterfuncs = iter(funcs)
comp = next(iterfuncs)
for f in iterfuncs:
comp = compose_pair(comp, f)
return comp
Personally, two of my favorite python functions are map and reduce.
def get(i):
return reduce(lambda acc, f: f(acc), [i,fn2,fn1] )
def fn1(i):
return i + i #10+10 = 20
def fn2(i):
return i * i #20*20 = 200
print( get(10) ) # 200
You could use a decorator-style solution:
class Base()
def __init__(self, decorated):
self.decorates = decorated
def foo(self, arg):
if self.decorates:
arg = self.decorates.foo( arg )
return self._do_foo( arg )
def _do_foo(self, arg):
return arg
Your implementations will inherit from Base and implement _do_foo().
You set it up like this:
a = Subclass(None)
b = AnotherSublcass( a )
c = YetAnotherSubclass( b )
all of the Sublcasses inherit from Base. when you call c.foo( arg ), you'll get the result passed through all three _do_foo() methods.