I have an API method that accepts a callback. The callback expects one argument.
I would like this method to pass a second argument to callbacks that accept it. However, I must maintain compatibility with callbacks that accept only the original argument. (In fact, I expect that most users will not care about the additional argument, so it would be annoying to force them to explicitly ignore it.)
I know that this can be done using inspect. I'm wondering if there is an "idiomatic" or commonly used solution that's not quite so heavyweight.
I think you can use __code__ to look how much arguments needed by the callback.
if callback.__code__.co_argcount == 2:
callback(arg1, arg2)
else:
callback(arg1)
This code isn't tested but it should work.
A simpler solution would be to use a try block to try calling the callback with a second argument first, before falling back to calling with just one argument in the except block:
try:
callback(first, second)
except TypeError as e:
if e.__traceback__.tb_frame.f_code.co_name != 'func_name':
raise
callback(first)
Using a function wrapper:
from inspect import signature, Parameter
def ignore_extra_arguments(function):
positional_count = 0
var_positional = False
keyword_names = set()
var_keyword = False
for p in signature(function).parameters.values():
if p.kind == Parameter.POSITIONAL_ONLY:
positional_count += 1
elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:
positional_count += 1
keyword_names.add(p.name)
elif p.kind == Parameter.VAR_POSITIONAL:
var_positional = True
elif p.kind == Parameter.KEYWORD_ONLY:
keyword_names.add(p.name)
elif p.kind == Parameter.VAR_KEYWORD:
var_keyword = True
if var_positional:
new_args = lambda args: args
else:
new_args = lambda args: args[:positional_count]
if var_keyword:
new_kwargs = lambda kwargs: kwargs
else:
new_kwargs = lambda kwargs: {
name: value for name, value in kwargs.items()
if name in keyword_names
}
def wrapped(*args, **kwargs):
return function(
*new_args(args),
**new_kwargs(kwargs)
)
return wrapped
It works, but it's a bit brute-force.
A simpler version, assuming that function has no keyword or variadic parameters:
from inspect import signature
def ignore_simple(function):
count = len(signature(function).parameters)
return lambda *args: function(*args[:count])
Related
I've been tinkering with decorators lately and (as an academic exercise) tried to implement a decorator that allows for partial application and/or currying of the decorated function. Furthermore this decorator should be optionally parameterizable and take a kwarg asap which determines if the decorated function should return as soon as all mandatory args/kwargs are aquired (default: asap=True) or if the decoratored function should keep caching args/kwargs until the function is called without arguments (asap=False).
Here is the decorator I came up with:
def partialcurry(_f=None, *, asap: bool=True):
""" Decorator; optionally parameterizable; Allows partial application /and/or/ currying of the decorated function F. Decorated F fires as soon as all mandatory args and kwargs are supplied, or, if ASAP=False, collects args and kwargs and fires only if F is called without args/kwargs. """
def _decor(f, *args, **kwargs):
_all_args, _all_kwargs = list(args), kwargs
#functools.wraps(f)
def _wrapper(*more_args, **more_kwargs):
nonlocal _all_args, _all_kwargs # needed for resetting, not mutating
_all_args.extend(more_args)
_all_kwargs.update(more_kwargs)
if asap:
try:
result = f(*_all_args, **_all_kwargs)
# reset closured args/kwargs caches
_all_args, _all_kwargs = list(), dict()
except TypeError:
result = _wrapper
return result
elif not asap:
if more_args or more_kwargs:
return _wrapper
else:
result = f(*_all_args, **_all_kwargs)
# again, reset closured args/kwargs caches
_all_args, _all_kwargs = list(), dict()
return result
return _wrapper
if _f is None:
return _decor
return _decor(_f)
### examples
#partialcurry
def fun(x, y, z=3):
return x, y, z
print(fun(1)) # preloaded function object
print(fun(1, 2)) # all mandatory args supplied; (1,1,2); reset
print(fun(1)(2)) # all mandatory args supplied; (1,2,3); reset
print()
#partialcurry(asap=False)
def fun2(x, y, z=3):
return x, y, z
print(fun2(1)(2, 3)) # all mandatory args supplied; preloaded function object
print(fun2()) # fire + reset
print(fun2(1)(2)) # all mandatory args supplied; preloaded function object
print(fun2(4)()) # load one more and fire + reset
I am sure that this can be generally improved (implementing this as a class would be a good idea for example) and any suggestions are much appreciated, my main question however is how to determine if all mandatory args/kwargs are supplied, because I feel like to check for a TypeError is too generic and could catch all kinds of TypeErrors. One idea would be to define a helper function that calculates the number of mandatory arguments, maybe something like this:
def _required_args_cnt(f):
""" Auxiliary function: Calculate the number of /required/ args of a function F. """
all_args_cnt = f.__code__.co_argcount + f.__code__.co_kwonlyargcount
def_args_cnt = len(f.__defaults__) if f.__defaults__ else 0
return all_args_cnt - def_args_cnt
Obviously unsatisfactory..
Any suggestions are much appreciated!
I have the following code, in which I simply have a decorator for caching a function's results, and as a concrete implementation, I used the Fibonacci function.
After playing around with the code, I wanted to print the cache variable, that's initiated in the cache wrapper.
(It's not because I suspect the cache might be faulty, I simply want to know how to access it without going into debug mode and put a breakpoint inside the decorator)
I tried to explore the fib_w_cache function in debug mode, which is supposed to actually be the wrapped fib_w_cache, but with no success.
import timeit
def cache(f, cache = dict()):
def args_to_str(*args, **kwargs):
return str(args) + str(kwargs)
def wrapper(*args, **kwargs):
args_str = args_to_str(*args, **kwargs)
if args_str in cache:
#print("cache used for: %s" % args_str)
return cache[args_str]
else:
val = f(*args, **kwargs)
cache[args_str] = val
return val
return wrapper
#cache
def fib_w_cache(n):
if n == 0: return 0
elif n == 1: return 1
else:
return fib_w_cache(n-2) + fib_w_cache(n-1)
def fib_wo_cache(n):
if n == 0: return 0
elif n == 1: return 1
else:
return fib_wo_cache(n-1) + fib_wo_cache(n-2)
print(timeit.timeit('[fib_wo_cache(i) for i in range(0,30)]', globals=globals(), number=1))
print(timeit.timeit('[fib_w_cache(i) for i in range(0,30)]', globals=globals(), number=1))
I admit this is not an "elegant" solution in a sense, but keep in mind that python functions are also objects. So with some slight modification to your code, I managed to inject the cache as an attribute of a decorated function:
import timeit
def cache(f):
def args_to_str(*args, **kwargs):
return str(args) + str(kwargs)
def wrapper(*args, **kwargs):
args_str = args_to_str(*args, **kwargs)
if args_str in wrapper._cache:
#print("cache used for: %s" % args_str)
return wrapper._cache[args_str]
else:
val = f(*args, **kwargs)
wrapper._cache[args_str] = val
return val
wrapper._cache = {}
return wrapper
#cache
def fib_w_cache(n):
if n == 0: return 0
elif n == 1: return 1
else:
return fib_w_cache(n-2) + fib_w_cache(n-1)
#cache
def fib_w_cache_1(n):
if n == 0: return 0
elif n == 1: return 1
else:
return fib_w_cache(n-2) + fib_w_cache(n-1)
def fib_wo_cache(n):
if n == 0: return 0
elif n == 1: return 1
else:
return fib_wo_cache(n-1) + fib_wo_cache(n-2)
print(timeit.timeit('[fib_wo_cache(i) for i in range(0,30)]', globals=globals(), number=1))
print(timeit.timeit('[fib_w_cache(i) for i in range(0,30)]', globals=globals(), number=1))
print(fib_w_cache._cache)
print(fib_w_cache_1._cache) # to prove that caches are different instances for different functions
cache is of course a perfectly normal local variable in scope within the cache function, and a perfectly normal nonlocal cellvar in scope within the wrapper function, so if you want to access the value from there, you just do it—as you already are.
But what if you wanted to access it from somewhere else? Then there are two options.
First, cache happens to be defined at the global level, meaning any code anywhere (that hasn't hidden it with a local variable named cache) can access the function object.
And if you're trying to access the values of a function's default parameters from outside the function, they're available in the attributes of the function object. The inspect module docs explain the inspection-oriented attributes of each builtin type:
__defaults__ is a sequence of the values for all positional-or-keyword parameters, in order.
__kwdefaults__ is a mapping from keywords to values for all keyword-only parameters.
So:
>>> def f(a, b=0, c=1, *, d=2, e=3): pass
>>> f.__defaults__
(0, 1)
>>> f.__kwdefaults__
{'e': 3, 'd': 2}
So, for a simple case where you know there's exactly one default value and know which argument it belongs to, all you need is:
>>> cache.__defaults__[0]
{}
If you need to do something more complicated or dynamic, like get the default value for c in the f function above, you need to dig into other information—the only way to know that c's default value will be the second one in __defaults__ is to look at the attributes of the function's code object, like f.__code__.co_varnames, and figure it out from there. But usually, it's better to just use the inspect module's helpers. For example:
>>> inspect.signature(f).parameters['c'].default
1
>>> inspect.signature(cache).parameters['cache'].default
{}
Alternatively, if you're trying to access the cache from inside fib_w_cache, while there's no variable in lexical scope in that function body you can look at, you do know that the function body is only called by the decorator wrapper, and it is available there.
So, you can get your stack frame
frame = inspect.currentframe()
… follow it back to your caller:
back = frame.f_back
… and grab it from that frame's locals:
back.f_locals['cache']
It's worth noting that f_locals works like the locals function: it's actually a copy of the internal locals storage, so modifying it may have no effect, and that copy flattens nonlocal cell variables to regular local variables. If you wanted to access the actual cell variable, you'd have to grub around in things like back.f_code.co_freevars to get the index and then dig it out of the function object's __closure__. But usually, you don't care about that.
Just for a sake of completeness, python has caching decorator built-in in functools.lru_cache with some inspecting mechanisms:
from functools import lru_cache
#lru_cache(maxsize=None)
def fib_w_cache(n):
if n == 0: return 0
elif n == 1: return 1
else:
return fib_w_cache(n-2) + fib_w_cache(n-1)
print('fib_w_cache(10) = ', fib_w_cache(10))
print(fib_w_cache.cache_info())
Prints:
fib_w_cache(10) = 55
CacheInfo(hits=8, misses=11, maxsize=None, currsize=11)
I managed to find a solution (in some sense by #Patrick Haugh's advice).
I simply accessed cache.__defaults__[0] which holds the cache's dict.
The insights about the shared cache and how to avoid it we're also quite useful.
Just as a note, the cache dictionary can only be accessed through the cache function object. It cannot be accessed through the decorated functions (at least as far as I understand). It logically aligns well with the fact that the cache is shared in my implementation, where on the other hand, in the alternative implementation that was proposed, it is local per decorated function.
You can make a class into a wrapper.
def args_to_str(*args, **kwargs):
return str(args) + str(kwargs)
class Cache(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **kwargs):
args_str = args_to_str(*args, **kwargs)
if args_str in self.cache:
return self.cache[args_str]
else:
val = self.func(*args, **kwargs)
self.cache[args_str] = val
return val
Each function has its own cache. you can access it by calling function.cache. This also allows for any methods you wish to attach to your function.
If you wanted all decorated functions to share the same cache, you could use a class variable instead of an instance variable:
class SharedCache(object):
cache = {}
def __init__(self, func):
self.func = func
#rest of the the code is the same
#SharedCache
def function_1(stuff):
things
I have a function of the form:
def my_func(my_list):
for i, thing in enumerate(my_list):
my_val = another_func(thing)
if i == 0:
# do some stuff
else:
if my_val == something:
return my_func(my_list[:-1])
# do some other stuff
The recursive part is getting called enough that I am getting a RecursionError, so I am trying to replace it with a while loop as explained here, but I can't work out how to reconcile this with the control flow statements in the function. Any help would be gratefully received!
There may be a good exact answer, but the most general (or maybe quick-and-dirty) way to switch from recursion to iteration is to manage the stack yourself. Just do manually what programming language does implicitly and have your own unlimited stack.
In this particular case there is tail recursion. You see, my_func recursive call result is not used by the caller in any way, it is immediately returned. What happens in the end is that the deepest recursive call's result bubbles up and is being returned as it is. This is what makes #outoftime's solution possible. We are only interested in into-recursion pass, as the return-from-recursion pass is trivial. So the into-recursion pass is replaced with iterations.
def my_func(my_list):
run = True
while run:
for i, thing in enumerate(my_list):
my_val = another_func(thing)
if i == 0:
# do some stuff
else:
if my_val == something:
my_list = my_list[:-1]
break
# do some other stuff
This is an iterative method.
Decorator
class TailCall(object):
def __init__(self, __function__):
self.__function__ = __function__
self.args = None
self.kwargs = None
self.has_params = False
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.has_params = True
return self
def __handle__(self):
if not self.has_params:
raise TypeError
if type(self.__function__) is TailCaller:
return self.__function__.call(*self.args, **self.kwargs)
return self.__function__(*self.args, **self.kwargs)
class TailCaller(object):
def __init__(self, call):
self.call = call
def __call__(self, *args, **kwargs):
ret = self.call(*args, **kwargs)
while type(ret) is TailCall:
ret = ret.__handle__()
return ret
#TailCaller
def factorial(n, prev=1):
if n < 2:
return prev
return TailCall(factorial)(n-1, n * prev)
To use this decorator simply wrap your function with #TailCaller decorator and return TailCall instance initialized with required params.
I'd like to say thank you for inspiration to #o2genum and to Kyle Miller who wrote an excellent article about this problem.
Despite how good is to remove this limitation, probably, you have to be
aware of why this feature is not officially supported.
I'm writing a small game in python using the pygame module.
I have two lists of functions :
afunc (runs at the start of loop every frame)
bfunc (runs at the end of loop every frame)
def removefunction(function = None , arg = None , position = None):
'''
remove a function which runs every frame.Either a function, arguments or
both can be given and the function with teh arguments will be removed
from either afuncs or bfuncs as specified. if None is specified, both functions
will be checked for the value
'''
if (arg , function) == (None,None): ##If both function and argument is None, raise error
raise LookupError
if arg is not None and function is not None: ##if both function and argument are given do this
try:
self.afuncs.remove(self.afuncs.index([function,arg]))
except:
try:self.bfuncs.remove(self.bfuncs.index([function,arg]))
except:pass
elif arg is None and function is not None: ##if only function is given do this
if position is 'a':
for func in self.afuncs:
if function is func[0]:
self.afuncs.remove(func)
elif position is 'b':
for func in self.bfuncs:
if function is func[0]:
self.bfuncs.remove(func)
elif position is None:
try:
for func in self.bfuncs:
if function is func[0]:
self.bfuncs.remove(func)
except:
try:
for func in self.afuncs:
if function is func[0]:
self.afuncs.remove(func)
except:pass
elif function is None and arg is not None: ##if only argument is given do this
if position is 'a':
for func in self.afuncs:
if arg is func[1]:
self.afuncs.remove(func)
elif position is 'b':
for func in self.bfuncs:
if arg is func[1]:
self.bfuncs.remove(func)
elif position is None:
try:
for func in self.bfuncs:
if arg is func[1]:
self.bfuncs.remove(func)
except:
try:
for func in self.afuncs:
if arg is func[1]:
self.afuncs.remove(func)
except:pass
thats the code to remove a function from either of the list, but this doesn't seem to be working. There are no errors so i cant point out the exact problem.
this is the function that adds new functions to the lists :
def addfunction(function , arg = None , position = 'b'):
'''
add a function which runs every frame. argument 'a' will run the
function after everything, argument 'b' will run the function
before anything is updated. These functions will run independent of
which screen is being displayed i=unless explicitly stated in the code
'''
if position == 'b':
self.bfuncs.append((function , arg))
else:
self.afuncs.append((function, arg))
This is a self generated example where this doesn't work.
addfunction(print,'hi') ##adding function
removefunction(print) ##removing function
This may or may not be your problem, but you add items to the list using
self.afuncs.append((function, arg))
and find the index to remove them using
self.afuncs.index([function, arg])
However, (function, arg) != [function, arg], so the call to index will raise an exception, which is masked by your except: pass line.
On a side note, don't use except: pass. Ever. It's just a bug waiting to happen.
from django import template
register = template.Library()
class_converter = {
"textinput":"textinput textInput",
"fileinput":"fileinput fileUpload"
}
#register.filter#<--------
def is_checkbox(field):
return field.field.widget.__class__.__name__.lower() == "checkboxinput"
#register.filter#<--------
def with_class(field):
class_name = field.field.widget.__class__.__name__.lower()
class_name = class_converter.get(class_name, class_name)
if "class" in field.field.widget.attrs:
field.field.widget.attrs['class'] += " %s" % class_name
else:
field.field.widget.attrs['class'] = class_name
return unicode(field)
and register.filter function is:
def filter(self, name=None, filter_func=None):
if name == None and filter_func == None:
# #register.filter()
return self.filter_function
elif filter_func == None:
if(callable(name)):
# #register.filter
return self.filter_function(name)
else:
# #register.filter('somename') or #register.filter(name='somename')
def dec(func):
return self.filter(name, func)
return dec
elif name != None and filter_func != None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
return filter_func
else:
raise InvalidTemplateLibrary("Unsupported arguments to Library.filter: (%r, %r)", (name, filter_func))
so
#register.filter
def a():
pass
is Equal
register.filter(name=None,filter_func=a)
yes??
Not exactly. The decorator syntax:
#register.filter
def a():
pass
is syntactic sugar for:
def a():
pass
a = register.filter(a)
So register.filter in this case will be called with the first positional argument, 'name' being your function. The django register.filter function handles that usage however and returns the right thing even if the filter is sent as the first argument (see the if callable(name) branch)
It's more common to make decorators that can take multiple arguments do so with the function to be decorated being the first positional argument (or alternately being function factories/closures), but I have a feeling the reason django did it this way was for backwards-compatibility. Actually I vaguely remember it not being a decorator in the past, and then becoming a decorator in a later django version.
No. Simple decorators take the function they decorate as a parameter, and return a new function.
a = register.filter(a)