Re-use incorrect printing - python

I have the following simple code (which represents much larger code):
def dec(data):
def wrap_func(*args, **kwargs):
if not wrap_func.has_run: print(
'\n$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is Start of FUNC___ \'{}\' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*\n'.format(
data.__name__))
print(data(*args, **kwargs))
if not wrap_func.has_run: print(
'\n$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is End of FUNC___ \'{}\' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*\n'.format(
data.__name__))
wrap_func.has_run = True
# return result
wrap_func.has_run = False
return wrap_func
#dec
def sum(a=1, b=1, times=1):
return (a + b) * times
#dec
def multi(a=2, b=3):
return sum(a, b=0, times=b)
# sum(1, 3)
multi(2,3)
If only the line of sum (1,3) are operating I get (as intended):
$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is Start of FUNC___ 'sum' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*
4
$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is End of FUNC___ 'sum' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*
If only the line of multi (2,3) operating I get these annoying 'leftovers' from the summing function:
$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is Start of FUNC___ 'multi' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*
$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is Start of FUNC___ 'sum' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*
6
$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is End of FUNC___ 'sum' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*
None
$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is End of FUNC___ 'multi' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*
So the issue is, if I use a decorator which uses a function/method which has also the same decorator, it prints me the useless data of the internal function
What I wish to see is:
$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is Start of FUNC___ 'multi' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*
6
$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*___This is End of FUNC___ 'multi' $*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*$*

You have multiple decorated functions, and each function is given it's own independent wrap_func function object. These are independent.
If you must produce only one set of lines around multiple decorated functions calling each other, you'd need to keep a shared stack count; attach this information to the decorator, not the wrapper that the decorator returns:
def dec(f):
def wrapper(*args, **kwargs):
stack_count = getattr(dec, '_stack_count', 0)
if stack_count == 0:
print(f'-- start of decorated functions -- {f.__name__}')
dec._stack_count = stack_count + 1
try:
result = f(*args, **kwargs)
if result: print(result)
finally:
dec._stack_count = stack_count
if stack_count == 0:
print(f'-- end of decorated functions -- {f.__name__}')
# return result
return wrapper
So the first call to the wrapper sets dec._stack_count to 1, after which any subsequent calls to wrappers will only further increment that number and nothing more is printed. On return, the counter is decremented again (the old, un-incremented value for that stack level is re-used), and only when that value is 0 again do we print again.
Note that I used try...finally to ensure the stack counter is decremented even if the decorated function raised an exception.
Demo:
>>> #dec
... def sum(a=1, b=1, times=1):
... return (a + b) * times
...
>>> #dec
... def multi(a=2, b=3):
... return sum(a, b=0, times=b)
...
>>> multi(2, 3)
-- start of decorated functions -- multi
6
-- end of decorated functions -- multi
>>> sum(2, 3)
-- start of decorated functions -- sum
5
-- end of decorated functions -- sum
Keeping track of a stack like that is really a context manager type of problem, so I'd further wrap this in such a context manager:
from contextlib import contextmanager
#contextmanager
def print_outer(before, after):
"""Context manager that prints the before and after text only for the outermost call
This is a reentrant context manager, and is not thread-safe.
"""
outer = getattr(print_outer, '_is_outermost', True)
if outer:
print_outer._is_outermost = False
print(before)
try:
yield
finally:
if outer:
print_outer._is_outermost = True
print(after)
then use this context manager in the decorator:
def dec(f):
def wrapper(*args, **kwargs):
banners = (
f'-- start of decorated functions -- {f.__name__}',
f'-- end of decorated functions -- {f.__name__}'
)
with print_outer(*banners):
result = f(*args, **kwargs)
if result: print(result)
# return result
return wrapper

You can use some kind of global state lock which indicates whether one of the wrappers in the stack has printed already. This lock will be acquired by the outermost wrapper and prevents the inner ones from printing.
Code example:
lock = False
def dec(f):
def wrapper(*args, **kwargs):
global lock
if not lock:
lock = True
print('Start', f.__name__)
result = f(*args, **kwargs)
print('End', f.__name__)
lock = False
else:
result = f(*args, **kwargs)
return result
return wrapper
Alternatively:
lock = False
def dec(f):
def wrapper(*args, **kwargs):
global lock
locked = lock
if not locked:
lock = True
print('Start', f.__name__)
result = f(*args, **kwargs)
if not locked:
lock = False
print('End', f.__name__)
return result
return wrapper

Related

nested functions calling with multiple args in python

This was the program for our test and I couldn't understand what is going on. This problem is called nested function problem.
def foo(a):
def bar(b):
def foobar(c):
return a + b + c
return foobar
return bar
a, b, c = map(int,input().split())
res = foo(a)(b)(c)
print(res)
I have tried to debug this program but couldn't get any idea about why it is working.
Why is foo(a)(b)(c) not giving an error?
Why it is working and what it is called?
This is a closures concept, Inner functions are able to access variables of the enclosing scope.
If we do not access any variables from the enclosing scope, they are just ordinary functions with a different scope
def get_add(x):
def add(y):
return x + y
return add
add_function = get_add(10)
print(add_function(5)) # result is 15
Everything in Python is an object, and functions as well, so you can pass them as arguments, return them, for example:
def inc(var):
return var + 1
def my_func():
return inc
my_inc = my_func()
print(my_inc) # <function inc at ...>
print(my_inc(1)) # 2
Moreover it's closed to decorator's concept:
def log_this(func):
def wrapper(*args, **kwargs):
print('start', str(args))
res = func(*args, **kwargs)
return res
return wrapper
#log_this
def inc(var):
return var + 1
print(inc(10))

how to partialy drop functools.lru_cache by one argument?

I have a get(bid, mid, pid) function. It is decorated with lru_cache. I want to drop all cache entries with bid == 105, for example.
I was thinking of some closures, that return decorated functions. Then I get some separate caches for each bid entry, and non cached function with dict of these closures that acts like a router. but maybe there is a more pythonic way for this?
upd: i came up with somthing like this, and it seems to work
getters = {}
def facade(bid, mid, pid):
global getters # not very good, better to use class
if not bid in getters:
def create_getter(bid):
#functools.lru_cache(maxsize=None)
def get(mid, pid):
print ('cache miss')
return bid + mid + pid
return get
getters[bid] = create_getter(bid)
return getters[bid](mid, pid)
val = facade(bid, mid, pid) # ability to read like before
if need_to_drop:
getters[bid].cache_clear() # ability to flush entries with specified bid
Maybe wrap functools.lru_cache and filter parameters?
from functools import lru_cache
def filtered_lru(filter_func: callable, maxsize: int):
def wrapper(f):
cached = lru_cache(maxsize=maxsize)(f)
def wrapped(*args, **kwargs):
if filter_func(*args, **kwargs):
print('Using cache')
return cached(*args, **kwargs)
else:
print('Not using cache')
return f(*args, **kwargs)
return wrapped
return wrapper
def _get_filter(*args, **kwargs):
return args[0] != 0
#filtered_lru(_get_filter, maxsize=100)
def get(num):
print('Calculating...')
return 2 * num
if __name__ == '__main__':
print(get(1))
print(get(1))
print(get(1))
print(get(0))
print(get(0))
output:
Using cache
Calculating...
2
Using cache
2
Using cache
2
Not using cache
Calculating...
0
Not using cache
Calculating...
0

Python decorator to time recursive functions

I have a simple decorator to track the runtime of a function call:
def timed(f):
def caller(*args):
start = time.time()
res = f(*args)
end = time.time()
return res, end - start
return caller
This can be used as follows, and returns a tuple of the function result and the execution time.
#timed
def test(n):
for _ in range(n):
pass
return 0
print(test(900)) # prints (0, 2.69e-05)
Simple enough. But now I want to apply this to recursive functions. Applying the above wrapper to a recursive function results in nested tuples with the times of each recursive call, as is expected.
#timed
def rec(n):
if n:
return rec(n - 1)
else:
return 0
print(rec(3)) # Prints ((((0, 1.90e-06), 8.10e-06), 1.28e-05), 1.90e-05)
What's an elegant way to write the decorator so that it handles recursion properly? Obviously, you could wrap the call if a timed function:
#timed
def wrapper():
return rec(3)
This will give a tuple of the result and the time, but I want all of it to be handled by the decorator so that the caller does not need to worry about defining a new function for every call. Ideas?
The problem here isn't really the decorator. The problem is that rec needs rec to be a function that behaves one way, but you want rec to be a function that behaves differently. There's no clean way to reconcile that with a single rec function.
The cleanest option is to stop requiring rec to be two things at once. Instead of using decorator notation, assign timed(rec) to a different name:
def rec(n):
...
timed_rec = timed(rec)
If you don't want two names, then rec needs to be written to understand the actual value that the decorated rec will return. For example,
#timed
def rec(n):
if n:
val, runtime = rec(n-1)
return val
else:
return 0
I prefer the other answers so far (particularly user2357112's answer), but you can also make a class-based decorator that detects whether the function has been activated, and if so, bypasses the timing:
import time
class fancy_timed(object):
def __init__(self, f):
self.f = f
self.active = False
def __call__(self, *args):
if self.active:
return self.f(*args)
start = time.time()
self.active = True
res = self.f(*args)
end = time.time()
self.active = False
return res, end - start
#fancy_timed
def rec(n):
if n:
time.sleep(0.01)
return rec(n - 1)
else:
return 0
print(rec(3))
(class written with (object) so that this is compatible with py2k and py3k).
Note that to really work properly, the outermost call should use try and finally. Here's the fancied up fancy version of __call__:
def __call__(self, *args):
if self.active:
return self.f(*args)
try:
start = time.time()
self.active = True
res = self.f(*args)
end = time.time()
return res, end - start
finally:
self.active = False
You could structure your timer in a different way by *ahem* abusing the contextmanager and function attribute a little...
from contextlib import contextmanager
import time
#contextmanager
def timed(func):
timed.start = time.time()
try:
yield func
finally:
timed.duration = time.time() - timed.start
def test(n):
for _ in range(n):
pass
return n
def rec(n):
if n:
time.sleep(0.05) # extra delay to notice the difference
return rec(n - 1)
else:
return n
with timed(rec) as r:
print(t(10))
print(t(20))
print(timed.duration)
with timed(test) as t:
print(t(555555))
print(t(666666))
print(timed.duration)
Results:
# recursive
0
0
1.5130000114440918
# non-recursive
555555
666666
0.053999900817871094
If this is deemed a bad hack I'll gladly accept your criticism.
Although it is not an overall solution to the problem of integrating recursion with decorators, for the problem of timing only, I have verified that the last element of the tuple of the times is the overall run time, as this is the time from the upper-most recursive call. Thus if you had
#timed
def rec():
...
to get the overall runtime given the original function definitions you could simply do
rec()[1]
Getting the result of the call, on the other hand, would then require recusing through the nested tuple:
def get(tup):
if isinstance(tup, tuple):
return get(tup[0])
else:
return tup
This might be too complicated to simply get the result of your function.
I encountered the same issue when trying to profile a simple quicksort implementation.
The main issue is that decorators are executed on each function call and we need something that can keep a state, so we can sum all calls at the end. Decorators are not the right tool the job
However, one idea is to abuse the fact that functions are objects and can have atributes. This is explored below with a simple decorator. Something that must be understood is that, by using decorator's sintax sugar (#), the function will always be accumulating its timings.
from typing import Any, Callable
from time import perf_counter
class timeit:
def __init__(self, func: Callable) -> None:
self.func = func
self.timed = []
def __call__(self, *args: Any, **kwds: Any) -> Any:
start = perf_counter()
res = self.func(*args, **kwds)
end = perf_counter()
self.timed.append(end - start)
return res
# usage
#timeit
def rec(n):
...
if __name__ == "__main__":
result = rec(4) # rec result
print(f"Took {rec.timed:.2f} seconds")
# Out: Took 3.39 seconds
result = rec(4) # rec result
# timings between calls are accumulated
# Out: Took 6.78 seconds
Which brings us to a solution inspired by #r.ook, below is a simple context manager that stores each run timing and prints its sum at the end (__exit__). Notice that, because for each timing we require a with statement, this will not accumulate different runs.
from typing import Any, Callable
from time import perf_counter
class timeit:
def __init__(self, func: Callable) -> None:
self.func = func
self.timed = []
def __call__(self, *args: Any, **kwds: Any) -> Any:
start = perf_counter()
res = self.func(*args, **kwds)
end = perf_counter()
self.timed.append(end - start)
return res
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
# TODO: report `exc_*` if an exception get raised
print(f"Took {sum(self.timed):.2f} seconds")
return
# usage
def rec(n):
...
if __name__ == "__main__":
with timeit(rec) as f:
result = f(a) # rec result
# Out: Took 3.39 seconds

Understanding this Python decorator code

I have problem in understanding this code that I got from book "Learning Python" section decorators.
Why this code return result's variable value once instead of twice? We returned the amount of result variable twice, once in "max_result" and another in "measure"; here is the code:
from time import sleep, time
from functools import wraps
def measure(func):
#wraps(func)
def wrapper(*args, **kwargs):
t = time()
result = func(*args, **kwargs)
print(func.__name__, 'took:', time() - t)
return result
return wrapper
def max_result(func):
#wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if result > 100:
print('Result is too big ({0}). Max allowed is 100.'
.format(result))
return result
return wrapper
#measure
#max_result
def cube(n):
return n ** 3
print(cube(2))
print(cube(5))
Here is the output, why don't we get two 8 or two 125?
>>> print(cube(2))
cube took: 8.106231689453125e-06
8
>>> print(cube(5))
Result is too big (125). Max allowed is 100.
cube took: 5.91278076171875e-05
125
>>>
The decorators are chained. The original cube() function was wrapped by the max_result decorator, and the result of that decoration was decorated by measure.
So the return value of cube() is taken by wrapper() in max_result(), and the result of that function is taken by wrapper() in measure() before being returned to the caller.
Unraveling all the decorators would give you:
def measure_wrapper(*args, **kwargs):
t = time()
result = max_result_wrapper(*args, **kwargs)
print(func.__name__, 'took:', time() - t)
return result
def max_result_wrapper(*args, **kwargs):
result = original_cube(*args, **kwargs)
if result > 100:
print('Result is too big ({0}). Max allowed is 100.'
.format(result))
return result
def original_cube(n):
return n ** 3
cube = measure_wrapper
So calling cube(2) produces:
measure_wrapper(2), records t = time() and calls
max_result_wrapper(2), which directly calls
original_cube(2), which
returns 2 ** 3 is 8
tests 8 > 100, whichis false so
returns 8
prints the time the max_result_wrapper() call took and
returns 8

Counting python method calls within another method

I'm actually trying doing this in Java, but I'm in the process of teaching myself python and it made me wonder if there was an easy/clever way to do this with wrappers or something.
I want to know how many times a specific method was called inside another method. For example:
def foo(z):
#do something
return result
def bar(x,y):
#complicated algorithm/logic involving foo
return foobar
So for each call to bar with various parameters, I'd like to know how many times foo was called, perhaps with output like this:
>>> print bar('xyz',3)
foo was called 15 times
[results here]
>>> print bar('stuv',6)
foo was called 23 times
[other results here]
edit: I realize I could just slap a counter inside bar and dump it when I return, but it would be cool if there was some magic you could do with wrappers to accomplish the same thing. It would also mean I could reuse the same wrappers somewhere else without having to modify any code inside the method.
Sounds like almost the textbook example for decorators!
def counted(fn):
def wrapper(*args, **kwargs):
wrapper.called += 1
return fn(*args, **kwargs)
wrapper.called = 0
wrapper.__name__ = fn.__name__
return wrapper
#counted
def foo():
return
>>> foo()
>>> foo.called
1
You could even use another decorator to automate the recording of how many times a function is called inside another function:
def counting(other):
def decorator(fn):
def wrapper(*args, **kwargs):
other.called = 0
try:
return fn(*args, **kwargs)
finally:
print '%s was called %i times' % (other.__name__, other.called)
wrapper.__name__ = fn.__name__
return wrapper
return decorator
#counting(foo)
def bar():
foo()
foo()
>>> bar()
foo was called 2 times
If foo or bar can end up calling themselves, though, you'd need a more complicated solution involving stacks to cope with the recursion. Then you're heading towards a full-on profiler...
Possibly this wrapped decorator stuff, which tends to be used for magic, isn't the ideal place to be looking if you're still ‘teaching yourself Python’!
This defines a decorator to do it:
def count_calls(fn):
def _counting(*args, **kwargs):
_counting.calls += 1
return fn(*args, **kwargs)
_counting.calls = 0
return _counting
#count_calls
def foo(x):
return x
def bar(y):
foo(y)
foo(y)
bar(1)
print foo.calls
After your response - here's a way with a decorator factory...
import inspect
def make_decorators():
# Mutable shared storage...
caller_L = []
callee_L = []
called_count = [0]
def caller_decorator(caller):
caller_L.append(caller)
def counting_caller(*args, **kwargs):
# Returning result here separate from the count report in case
# the result needs to be used...
result = caller(*args, **kwargs)
print callee_L[0].__name__, \
'was called', called_count[0], 'times'
called_count[0] = 0
return result
return counting_caller
def callee_decorator(callee):
callee_L.append(callee)
def counting_callee(*args, **kwargs):
# Next two lines are an alternative to
# sys._getframe(1).f_code.co_name mentioned by Ned...
current_frame = inspect.currentframe()
caller_name = inspect.getouterframes(current_frame)[1][3]
if caller_name == caller_L[0].__name__:
called_count[0] += 1
return callee(*args, **kwargs)
return counting_callee
return caller_decorator, callee_decorator
caller_decorator, callee_decorator = make_decorators()
#callee_decorator
def foo(z):
#do something
return ' foo result'
#caller_decorator
def bar(x,y):
# complicated algorithm/logic simulation...
for i in xrange(x+y):
foo(i)
foobar = 'some result other than the call count that you might use'
return foobar
bar(1,1)
bar(1,2)
bar(2,2)
And here's the output (tested with Python 2.5.2):
foo was called 2 times
foo was called 3 times
foo was called 4 times

Categories

Resources