I have a lot of necessary time.sleep() in my script. I want to clean up the code and possibly append the wait/pause to the previous line instead of creating a new line. Example:
call(['networksetup', '-setv4off', 'direct IP'])
time.sleep(5)
Is there any way of achieving this? (Python 3.4)
Thanks in advance.
If you mean combine these two lines in one line, try:
call(['networksetup', '-setv4off', 'direct IP']); time.sleep(5)
If you unconditionally want to be able to sleep after a particular function (or group of functions), the easiest way of doing this that I'm aware of is to use a decorator.
from functools import wraps
import time
def sleep_decorator(f, duration = 5.0):
#wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
time.sleep( duration )
return result
return wrapper
#sleep_decorator
def call(*args, **kwargs):
return None
call(['networksetup', '-setv4off', 'direct IP'])
The issue with that solution is that the duration of the sleep can't be altered, and you can't enable and disable the sleep call on a case-by-case basis.
To enhance the solution you can add arguments to your decorator.
from functools import wraps
import time
def sleep_decorator(duration = 5.0, active = True):
def decorator(f):
#wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
sleep_duration = kwargs.get('duration', duration)
sleep_active = kwargs.get('active', active)
if sleep_active:
time.sleep( sleep_duration )
return result
return wrapper
return decorator
Which means you could now write you code as follows:
#sleep_decorator(active = False, duration = 0.1)
def call(*args, **kwargs):
return None
call(['networksetup', '-setv4off', 'direct IP'])
call(['networksetup', '-setv4off', 'direct IP'], active = True)
call(['networksetup', '-setv4off', 'direct IP'], active = True, duration = 1.0)
One other alternative that can let you inject code is to rewrite the Abstract Syntax Tree of your code at runtime. This would allow you to inject arbitrary function calls. Libraries like numba and pony use tricks like this to transform Python code into something domain specific.
Related
How could one write a debounce decorator in python which debounces not only on function called but also on the function arguments/combination of function arguments used?
Debouncing means to supress the call to a function within a given timeframe, say you call a function 100 times within 1 second but you only want to allow the function to run once every 10 seconds a debounce decorated function would run the function once 10 seconds after the last function call if no new function calls were made. Here I'm asking how one could debounce a function call with specific function arguments.
An example could be to debounce an expensive update of a person object like:
#debounce(seconds=10)
def update_person(person_id):
# time consuming, expensive op
print('>>Updated person {}'.format(person_id))
Then debouncing on the function - including function arguments:
update_person(person_id=144)
update_person(person_id=144)
update_person(person_id=144)
>>Updated person 144
update_person(person_id=144)
update_person(person_id=355)
>>Updated person 144
>>Updated person 355
So calling the function update_person with the same person_id would be supressed (debounced) until the 10 seconds debounce interval has passed without a new call to the function with that same person_id.
There's a few debounce decorators but none includes the function arguments, example: https://gist.github.com/walkermatt/2871026
I've done a similar throttle decorator by function and arguments:
def throttle(s, keep=60):
def decorate(f):
caller = {}
def wrapped(*args, **kwargs):
nonlocal caller
called_args = '{}'.format(*args)
t_ = time.time()
if caller.get(called_args, None) is None or t_ - caller.get(called_args, 0) >= s:
result = f(*args, **kwargs)
caller = {key: val for key, val in caller.items() if t_ - val > keep}
caller[called_args] = t_
return result
# Keep only calls > keep
caller = {key: val for key, val in caller.items() if t_ - val > keep}
caller[called_args] = t_
return wrapped
return decorate
The main takaway is that it keeps the function arguments in caller[called_args]
See also the difference between throttle and debounce: http://demo.nimius.net/debounce_throttle/
Update:
After some tinkering with the above throttle decorator and the threading.Timer example in the gist, I actually think this should work:
from threading import Timer
from inspect import signature
import time
def debounce(wait):
def decorator(fn):
sig = signature(fn)
caller = {}
def debounced(*args, **kwargs):
nonlocal caller
try:
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
called_args = fn.__name__ + str(dict(bound_args.arguments))
except:
called_args = ''
t_ = time.time()
def call_it(key):
try:
# always remove on call
caller.pop(key)
except:
pass
fn(*args, **kwargs)
try:
# Always try to cancel timer
caller[called_args].cancel()
except:
pass
caller[called_args] = Timer(wait, call_it, [called_args])
caller[called_args].start()
return debounced
return decorator
I've had the same need to build a debounce annotation for a personal project, after stumbling upon the same gist / discussion you have, I ended up with the following solution:
import threading
def debounce(wait_time):
"""
Decorator that will debounce a function so that it is called after wait_time seconds
If it is called multiple times, will wait for the last call to be debounced and run only this one.
"""
def decorator(function):
def debounced(*args, **kwargs):
def call_function():
debounced._timer = None
return function(*args, **kwargs)
# if we already have a call to the function currently waiting to be executed, reset the timer
if debounced._timer is not None:
debounced._timer.cancel()
# after wait_time, call the function provided to the decorator with its arguments
debounced._timer = threading.Timer(wait_time, call_function)
debounced._timer.start()
debounced._timer = None
return debounced
return decorator
I've created an open-source project to provide functions such as debounce, throttle, filter ... as decorators, contributions are more than welcome to improve on the solution I have for these decorators / add other useful decorators: decorator-operations repository
I have a wrapper to time the execution of certain functions in a list. Most of these functions have one and the same parameter: era. I run the functions like displayed below. However, some functions require an extra parameter, e.g. the function dummy_function(). I've been looking for a way to be able to add this parameter in a Pythonic way. I found some solutions but they are very ugly and not quite scalable. Any help or suggestions would be tremendously appreciated!
def dummy_function(self, period, letter='A'):
""" Debugging purposes only """
print(f'This function prints the letter {letter}.')
from time import sleep
sleep(3)
def timed_execution(callbacks, era):
for callback in callbacks:
start_time = time.time()
callback(era)
end_time = time.time()
print(f'{callback.__name__} took {end_time-start_time:.3f}s')
def calculate_insights(era):
timed_execution([
dummy_function,
another_function,
yet_another_function,
], era)
calculate_insights(era)
Perhaps the best way is to actually pass the arguments for their respective function or just try to use a wrapper to calculate the time of a function.
Code taken from another question
from functools import wraps
from time import time
def timing(f):
#wraps(f)
def wrap(*args, **kw):
ts = time()
result = f(*args, **kw)
te = time()
print 'func:%r args:[%r, %r] took: %2.4f sec' % \
(f.__name__, args, kw, te-ts)
return result
return wrap
Then you can do something along the lines of
#timming
def dummy_function(self, period, letter='A'):
""" Debugging purposes only """
print(f'This function prints the letter {letter}.')
from time import sleep
sleep(3)
def calculate_insights():
dummy_function(era)
or you could just a dict with all the parameters passed into each callback but that doesn't sounds to pythonic for me.
There's a situation where I want to check how many times an internal class method has been called. I have a sensitive cloud task that must be done in a count that depends upon some circunstances. I would like to strengthen my application with an unittest to assert the number of times a specific function has been called.
To do so in a much simpler scenario, I would like to make a test in the following script:
class HMT:
def __init__(self):
self.buildedList = []
def handle(self, string_to_explode: str):
for exploded_part in string_to_explode.split(","):
self.doCoolThings(exploded_part)
def doCoolThings(self, fetched_raw_string: str):
self.buildedList.append("Cool thing done: " + fetched_raw_string)
Depending on the string that I deliver to the handle function, the doCoolThings will be called N times (in this simple case, depends solely in the number of comas inside the string).
I can make a test works by just counting the number of resulting elements inside builderList:
import unittest
from HMT import HMT
class test_HMT(unittest.TestCase):
def setUp(self):
self.hmt = HMT()
def test_hmt_3(self):
string_to_test = "alpha,beta,gamma"
self.hmt.handle(string_to_test)
self.assertEqual(3, len(self.hmt.buildedList))
def test_hmt_2(self):
string_to_test = "delta,epsilon"
self.hmt.handle(string_to_test)
self.assertEqual(2, len(self.hmt.buildedList))
But in the real scenario, there's will not have an available public class list that always will match its number of elements to the times the function doCoolThings was called.
So, how do I check how many times the doCoolThings was called without needing to check the list elements count?
I know that I can just put a counter in the class that is increased each time doCoolThings is called and expose it externally to be checked afterwards. But I don't would like to mess up the code putting lines that is not directly related to my business rule.
After #jarmod comment, I came into this code's version:
def mydecorator(func):
def wrapped(*args, **kwargs):
wrapped.calls += 1
return func(*args, **kwargs)
wrapped.calls = 0
return wrapped
class HMT:
def __init__(self):
self.buildedList = []
def handle(self, string_to_explode: str):
for exploded_part in string_to_explode.split(","):
self.doCoolThings(exploded_part)
#mydecorator
def doCoolThings(self, fetched_raw_string: str, *args, **kwargs):
self.buildedList.append("Cool thing done: " + fetched_raw_string)
And the test:
import unittest
from HMT import HMT
class test_HMT(unittest.TestCase):
def test_hmt_3_dec(self):
hmt = HMT()
string_to_test = "epsilon,ota,eta"
hmt.handle(string_to_test)
self.assertEqual(3, hmt.doCoolThings.calls)
def test_hmt_3(self):
hmt = HMT()
string_to_test = "alpha,beta,gamma"
hmt.handle(string_to_test)
self.assertEqual(3, len(hmt.buildedList))
But still are not working properly. When I run tests, I receive:
.F
======================================================================
FAIL: test_hmt_3_dec (myTest.test_HMT)
----------------------------------------------------------------------
Traceback (most recent call last):
File "D:\Users\danil\tmp\myDec\myTest.py", line 10, in test_hmt_3_dec
self.assertEqual(3, hmt.doCoolThings.calls)
AssertionError: 3 != 6
----------------------------------------------------------------------
Ran 2 tests in 0.001s
FAILED (failures=1)
More tests shows that the tests runs twice but the counter do not reset either in new instantiation.
Anyway, the initial idea was to dynamically put an observer in a internal class method and externaly fetches each time when those method is triggered (still, not something that seems to be solved using decorator).
Thanks alot for answers (and as a plus to leave powered up, if someone knows how to reset the counter in the decorator I will appreciate as well).
from HMT import HMT
count = 0
def counter(f):
def wrap(*args, **kwargs):
global count
count += 1
return f(*args, **kwargs)
return wrap
class test_HMT(unittest.TestCase):
def setUp(self):
self.hmt = HMT()
# Add decorator.
self.hmt_no_decorator = self.hmt.doCoolThings
self.hmt.doCoolThings = counter(self.hmt.doCoolThings)
def test_doCoolThings_count(self):
repeat = 3
[self.hmt.doCoolThings() for _ in range(repeat)]
self.assertEqual(counter, repeat)
def tearDown(self):
# Remove decorator.
self.hmt.doCoolThings = self.hmt_no_decorator
...
doCoolThings is not modified in the business code. You simply get the counting behaviour for testing.
You can get rid of the global var by replacing the count var with an object.. or by doing anything else really. But does it matter in testing?
I want to measure execution time of a function on the cheap, something like this:
def my_timeit(func, *args, **kwargs):
t0 = time.time()
result = func(*args, **kwargs)
delta = time.time() - t0
return delta, result
def foo():
time.sleep(1.23)
return 'potato'
delta, result = my_timeit(foo)
But I want to use timeit, profile or other built-in to handle whatever are the common pitfalls due to platform differences, and it would probably be also better to get the actual execution time not the wall time.
I tried using timeit.Timer(foo).timeit(number=1) but the interface seems to obscure the return value.
This is my current attempt. But I would welcome any suggestions, because this feels too hacky and could probably do with improvement.
import time
from timeit import Timer
def my_timeit(func, *args, **kwargs):
output_container = []
def wrapper():
output_container.append(func(*args, **kwargs))
timer = Timer(wrapper)
delta = timer.timeit(1)
return delta, output_container.pop()
def foo():
time.sleep(1.111)
return 'potato'
delta, result = my_timeit(foo)
edit: adapted to work as a decorator below:
def timeit_decorator(the_func):
#functools.wraps(the_func)
def my_timeit(*args, **kwargs):
output_container = []
def wrapper():
output_container.append(the_func(*args, **kwargs))
timer = Timer(wrapper)
delta = timer.timeit(1)
my_timeit.last_execution_time = delta
return output_container.pop()
return my_timeit
How about
>>time python yourprogram.py < input.txt
This is the output for a python script I ran
[20:13:29] praveen:jan$ time python mtrick.py < input_mtrick.txt
3 3 9
1 2 3 4
real 0m0.067s
user 0m0.016s
sys 0m0.012s
In Celery, you can retry any task in case of exception. You can do it like so:
#task(max_retries=5)
def div(a, b):
try:
return a / b
except ZeroDivisionError, exc:
raise div.retry(exc=exc)
In this case, if you want to to divide by zero, task will be retied five times. But you have to check for errors in you code explicitly. Task will not be retied if you skip try-except block.
I want my functions to look like:
#celery.task(autoretry_on=ZeroDivisionError, max_retries=5)
def div(a, b):
return a / b
Celery (since version 4.0) has exactly what you were looking for:
#app.task(autoretry_for=(SomeException,))
def my_task():
...
See: http://docs.celeryproject.org/en/latest/userguide/tasks.html#automatic-retry-for-known-exceptions
I searched this issue for a while, but found only this feature request.
I decide to write my own decorator for doing auto-retries:
def task_autoretry(*args_task, **kwargs_task):
def real_decorator(func):
#task(*args_task, **kwargs_task)
#functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except kwargs_task.get('autoretry_on', Exception), exc:
wrapper.retry(exc=exc)
return wrapper
return real_decorator
With this decorator I can rewriter my previous task:
#task_autoretry(autoretry_on=ZeroDivisionError, max_retries=5)
def div(a, b):
return a / b
I've modified your answer to work with the existing Celery API (currently 3.1.17)
class MyCelery(Celery):
def task(self, *args_task, **opts_task):
def real_decorator(func):
sup = super(MyCelery, self).task
#sup(*args_task, **opts_task)
#functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except opts_task.get('autoretry_on', Exception) as exc:
logger.info('Yo! We did it!')
wrapper.retry(exc=exc, args=args, kwargs=kwargs)
return wrapper
return real_decorator
Then, in your tasks
app = MyCelery()
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
#app.task(autoretry_on=Exception)
def mytask():
raise Exception('Retrying!')
This allows you to add the autoretry_on functionality to your tasks without having to use a separate decorator to define tasks.
Here is an improved version of the existing answers.
This fully implements the Celery 4.2 behaviour (as documented here) but for Celery 3.1.25.
It also doesn't break the different task decorator forms (with/without parentheses) and returns/raises properly.
import functools
import random
from celery.app.base import Celery as BaseCelery
def get_exponential_backoff_interval(factor, retries, maximum, full_jitter=False):
"""
Calculate the exponential backoff wait time.
(taken from Celery 4 `celery/utils/time.py`)
"""
# Will be zero if factor equals 0
countdown = factor * (2 ** retries)
# Full jitter according to
# https://www.awsarchitectureblog.com/2015/03/backoff.html
if full_jitter:
countdown = random.randrange(countdown + 1)
# Adjust according to maximum wait time and account for negative values.
return max(0, min(maximum, countdown))
class Celery(BaseCelery):
def task(self, *args, **opts):
"""
Overridden to add a back-port of Celery 4's `autoretry_for` task args.
"""
super_method = super(Celery, self).task
def inner_create_task_cls(*args_task, **opts_task):
# http://docs.celeryproject.org/en/latest/userguide/tasks.html#Task.autoretry_for
autoretry_for = tuple(opts_task.get('autoretry_for', ())) # Tuple[Type[Exception], ...]
retry_backoff = int(opts_task.get('retry_backoff', False)) # multiplier, default if True: 1
retry_backoff_max = int(opts_task.get('retry_backoff_max', 600)) # seconds
retry_jitter = opts_task.get('retry_jitter', True) # bool
retry_kwargs = opts_task.get('retry_kwargs', {})
def real_decorator(func):
#super_method(*args_task, **opts_task)
#functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
try:
return func(*func_args, **func_kwargs)
except autoretry_for as exc:
if retry_backoff:
retry_kwargs['countdown'] = get_exponential_backoff_interval(
factor=retry_backoff,
retries=wrapper.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter,
)
raise wrapper.retry(exc=exc, **retry_kwargs)
return wrapper
return real_decorator
# handle both `#task` and `#task(...)` decorator forms
if len(args) == 1:
if callable(args[0]):
return inner_create_task_cls(**opts)(*args)
raise TypeError('argument 1 to #task() must be a callable')
if args:
raise TypeError(
'#task() takes exactly 1 argument ({0} given)'.format(
sum([len(args), len(opts)])))
return inner_create_task_cls(**opts)
I have also written some unit tests for this as am using it in my project.
They can be found in this gist but note they are not easily runnable - treat more as documentation of how the above feature works (and validation that it works properly).