from random import randint
import time
state = 0 #close
open_time = 0
failure_count = 0
count = 0
status = {0 : "closed" , 2 : " open" , 1 : "half closed"}
def circuitbreaker(func):
global count
global open_time , state
print("circuit status "+ status.get(state))
if state ==0: #close state
try:
func()
except Exception as ex:
print(ex)
count+=1
if count>2:
print("opening circuit breaker")
state = 2
open_time = int(time.time())
elif (state == 2): #open state
if( time.time() - open_time > 5) :
state = 1
else:
print("circuit opened")
else:
try:
func()
count = 0
open_time = 0
print("closing circuit breaker")
state = 0
except Exception as ex:
state = 2
open_time = int(time.time())
print("opening circuit breaker")
#circuitbreaker
def generic_func():
a = randint(0,9)
print("hello")
print("random number = "+str(a))
if a>4:
return a
else:
raise Exception('Yalla!')
if __name__=="__main__":
# while(True):
# generic_func()
time.sleep(1)
I have this code . I have a couple of question:-
1)why does generic function is being called even if I comment it in main.
2)when I uncomment the commented part in main function . I get following error .How do I properly call this generic function .
My motive is to implement a circuit breaker who closed when there is some kind of error or exception in a calling function . I can directly use :-
circuitbreaker(calling function) but I wanted to use decorators
Traceback (most recent call last):
circuit status closed
hello
File "/Users/abhishekkumar/PycharmProjects/RateLimiter/circuitbreaker.py", line 53, in <module>
random number = 1
Yalla!
generic_func()
TypeError: 'NoneType' object is not callable
Process finished with exit code 1
The issue was the decorator should be returning a function object and should have relevant logic inside a function and then return that function otherwise it returns none object
the answer to question #1: it's because of circuitbreaker decorator, as its logic is executed during module import and it calls the decorated function. Check out the following lines
...
try:
func() # <-- here
except Exception as ex:
print(ex)
...
The way of implementing decorators is to return a wrapper function, which contains the business logic:
from functools import wraps
def decorator(f):
#wraps(f)
def wrapper(*args, **kwargs):
... your logic here ...
return wrapper
The answer to question #2 derives from the previous one.
Answers to almost all questions you never asked about decorators
The function that decorates something is supposed to return the function of what it does itself - not do all the stuff - you are not returnning a "functionpointer" but an implicit None from your deocrator (implicitly as you return nothing). This None is then called ...
How to fix:
def circuitbreaker(func):
def the_works():
global count
global open_time , state
print("circuit status "+ status.get(state))
# .. all your other code ...
return the_works
for _ in range(5):
generic_func()
Output of fix:
circuit status closed
hello
random number = 3
Yalla!
circuit status closed
hello
random number = 3
Yalla!
circuit status closed
hello
random number = 0
Yalla!
opening circuit breaker
circuit status open
circuit opened
circuit status open
circuit opened
Decorators run right after the decorated function is defined, and this is usually at import time. In the decorator circuitbreaker, you call generic_func already.
Here is an example from Fluent Python:
registry = []
def register(func):
print('running register(%s)' % func)
registry.append(func)
return func
#register
def f1():
print('running f1()')
#register
def f2():
print('running f2()')
def f3():
print('running f3()')
def main():
print('registry ->', registry)
f1()
f2()
f3()
if __name__ == '__main__':
main()
The output is
running register(<function f1 at 0x1055ae378>)
running register(<function f2 at 0x1055ae400>)
registry -> [<function f1 at 0x1055ae378>, <function f2 at 0x1055ae400>]
running f1()
running f2()
Is this what you want: How do I catch an exception in a decorator but allow the caller to catch it as well?
Related
I am currently working on an import script that import listings from a database that regularly shuts down every 15 mins for re-snap.
I have created a with block as below to look after the retry mechanism when creating connections:
class DBRetryController(object):
conn_obj = None
connection = None
cursor = None
retry_count_down = None
sleep_time = None
def __init__(self, conn_obj, retry_count_down=5, sleep_time=10):
self.conn_obj = conn_obj
self.retry_count_down = retry_count_down
self.sleep_time = sleep_time
def __enter__(self):
ex = None
while self.retry_count_down > 0:
try:
if hasattr(self.conn_obj, '__call__'):
self.connection = self.conn_obj()
else:
self.connection = self.conn_obj
self.cursor = self.connection.cursor()
self.retry_count_down = False
except OperationalError as ex:
log.warning('Caught db error, possibly due to sql server gone away, retrying in a few moment')
self.retry_count_down -= 1
time.sleep(self.sleep_time)
if ex:
raise ex
return self.connection, self.cursor
def __exit__(self, type, value, traceback):
try:
self.cursor.close()
self.connection.close()
except:
pass
if value:
raise value
And use as below:
with DBRetryController(self.connection) as (_, cursor):
cursor.execute(self.LISTING_QUERY)
But the problem is the server can shutdown during execution of the query, is it possible to modifying the DBRetryController to make the nested block of code to re-enter?
If I understand your question correctly, I think you can use such a scheme:
notCompleted = 1
class TestClass():
def run(self):
global notCompleted
notCompleted = 1
#do_something here
notCompleted = 0
test = TestClass()
test.run()
while(notCompleted):
test.run()
Let assume that I want to be sure, even if any error occure during execution of run() method, my program will retry to finish to run it complete again. The notCompleted is 1 by default. when I call the run method at the beginning I assign 1 to it and at the end of my run method I assigned 0 to it. Anywhere inside the run() if I have any problem, in the while loop the function will called again.
I think you need to add a Try...Catch too.
I have the following type of code, but it is slow because report() is called very often.
import time
import random
def report(values):
open('report.html', 'w').write(str(values))
values = []
for i in range(10000):
# some computation
r = random.random() / 100.
values.append(r)
time.sleep(r)
# report on the current status, but this should not slow things down
report(values)
In this illustrative code example, I would like the report to be up-to-date (at most 10s old), so I would like to throttle that function.
I could fork in report, write the current timestamp, and wait for that period, and check using a shared memory timestamp if report has been called in the meantime. If yes, terminate, if not, write the report.
Is there a more elegant way to do it in Python?
Here's a decorator that will take an argument for how long to protect the inner function for, raising an exception if called too soon.
import time
from functools import partial, wraps
class TooSoon(Exception):
"""Can't be called so soon"""
pass
class CoolDownDecorator(object):
def __init__(self,func,interval):
self.func = func
self.interval = interval
self.last_run = 0
def __get__(self,obj,objtype=None):
if obj is None:
return self.func
return partial(self,obj)
def __call__(self,*args,**kwargs):
now = time.time()
if now - self.last_run < self.interval:
raise TooSoon("Call after {0} seconds".format(self.last_run + self.interval - now))
else:
self.last_run = now
return self.func(*args,**kwargs)
def CoolDown(interval):
def applyDecorator(func):
decorator = CoolDownDecorator(func=func,interval=interval)
return wraps(func)(decorator)
return applyDecorator
Then:
>>> #CoolDown(10)
... def demo():
... print "demo called"
...
>>>
>>> for i in range(12):
... try:
... demo()
... except TooSoon, exc:
... print exc
... time.sleep(1)
...
demo called
Call after 8.99891519547 seconds
Call after 7.99776816368 seconds
Call after 6.99661898613 seconds
Call after 5.99548196793 seconds
Call after 4.9943420887 seconds
Call after 3.99319410324 seconds
Call after 2.99203896523 seconds
Call after 1.99091005325 seconds
Call after 0.990563154221 seconds
demo called
Call after 8.99888515472 seconds
Here is an example of throttling a function using closures in Python3.
import time
def get_current_time_milli():
return int(round(time.time() * 1000))
def mycallbackfunction():
time.sleep(0.1) #mocking some work
print ("in callback function...")
'''
Throttle a function call using closures.
Don't call the callback function until last invokation is more than 100ms ago.
Only works with python 3.
Caveat: python 2 we cannot rebind nonlocal variable inside the closure.
'''
def debouncer(callback, throttle_time_limit=100):
last_millis = get_current_time_milli()
def throttle():
nonlocal last_millis
curr_millis = get_current_time_milli()
if (curr_millis - last_millis) > throttle_time_limit:
last_millis = get_current_time_milli()
callback()
return throttle
#
myclosure_function = debouncer(mycallbackfunction, 100)
# we are calling myclosure_function 20 times, but only few times, the callback is getting executed.
# some event triggers this call repeatedly.
for i in range(20):
print('calling my closure', myclosure_function(), get_current_time_milli())
I wish to write a process which executes commands from another process. This involves receiving the command, processing it and replying to the calling process with the result. The calling process shall wait for the reply, before requesting the execution of the next command. This is what I have come up with so far:
import multiprocessing
import time
class CommandProcessor(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.command = multiprocessing.Queue()
self.result = multiprocessing.Queue()
def run(self):
while True:
c = self.command.get()
if not c: break
self.result.put(str(c))
def execute(self, n):
self.command.put(n)
return self.result.get()
def stop(self):
self.command.put(None)
self.join()
try:
p = CommandProcessor()
p.start()
r = p.execute(1)
print("Result: "+r)
r = p.execute(2)
print("Result: "+r)
r = p.execute(3)
print("Result: "+r)
finally:
p.stop()
There is at least one problem with my design. For example, if there is an exception in CommandProcessor, the master process will wait indefinitely on the line return self.result.get(). I could add a timeout to the get() method, but some of the commands I run take a relatively long time to execute. So the timeout will have to be long enough to guarantee their execution. How can I handle this so that both process terminate if there is an exception with a useful stack trace dumped to standard output.
Wrap it in a try/except:
Example: (except)
def execute(self, n):
try:
self.command.put(n)
return self.result.get()
except Exception as e:
return e # TODO: Do something sensible here
It's the run() method that executes in the spawned process, so that's where you need to do your exception handling. Here's an example, with the stack trace returned as result. Also, note the corrected way of detecting the None command (i.e. the "stop" signal).
import traceback
class CommandProcessor():
...
def run(self):
while True:
c = self.command.get()
if c is None:
break
try:
1/(c-2) # Throws exception if c is 2.
except:
c = traceback.format_exc()
self.result.put(str(c))
What you get is 1 and 3 works fine, while 2 errs:
Result: 1
Result: Traceback (most recent call last):
File "a.py", line 17, in run
1/(c-2) # Throws exception if c is 2.
ZeroDivisionError: integer division or modulo by zero
Result: 3
I want to wait some number of seconds after a selenium call is executed, so that the user who executes the automated test can see what's happening on the screen.
My question is: Is there a way to wait some number of seconds (using implicit or explicit waits or whatever) after every function call that's better than writing time.sleep a bunch of times in the code? A selenium function call looks like this:
driver.find_element_by_name("Account").click()
One option is to put every selenium call into its own function, and use a decorator:
def wait(secs):
def decorator(func):
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
time.sleep(secs)
return ret
return wrapper
return decorator
Usage:
#wait(5) # waits 5 seconds after running the method
def do_instruction1(...):
return "hi"
#wait(3) # waits 3 seconds after running the method
def do_instruction2(...):
return "there"
a = do_instruction1()
print a
b = do_instruction2()
print b
Output:
<5 second delay>
"hi"
<3 second delay>
"there"
If you don't want to put every operation in its own function, you can do this using a coroutine:
import time
from functools import wraps
class Return(Exception):
def __init__(self, value):
self.value = value
def sleeper(func):
""" Coroutine decorator that sleeps after every yield.
Any yield inside a function decorated with sleeper will
result in a 3 second sleep after the operation being
yielded has run.
"""
#wraps(func)
def wrapper(*args, **kwargs):
def execute(gen):
try:
x = next(gen)
time.sleep(3)
while True:
x = gen.send(x)
time.sleep(3)
except (Return, StopIteration) as e:
return getattr(e, "value", None)
gen = func(*args, **kwargs)
return execute(gen)
return wrapper
def f():
print "should sleep"
return "abc"
def g(val):
print "should also sleep"
return "%s-def" % (val,)
def h():
print "this won't sleep"
return "ghi"
#sleeper
def test():
z = yield f()
print "hey there, got %s" % (z,)
y = yield g(z)
print "ok: %s" % (y,)
l = h()
print "see %s" % (l,)
z = yield f()
print "done %s" % z
raise Return("all done") # You can use return "all done" if you have Python 3.x
if __name__ == "__main__":
final = test()
print "final is %s" % final
Output:
should sleep
<3 second sleep>
hey there, got abc
should also sleep
<3 second sleep>
ok: abc-def
this won't sleep
see ghi
should sleep
<3 second sleep>
done abc
final is all done
Using this approach, any method you decorate with the sleeper coroutine will sleep after calling any method you yield from. So in your case, instead of calling
driver.find_element_by_name("Account").click()
You would call
yield driver.find_element_by_name("Account").click()
The only limitation is all of the calls you want to sleep after must be inside of a function decorated with sleeper, and that if you're using Python 2 and you want to return something from the decorated function, you need to use raise Return(value) instead of return value. On Python 3.x, return value will work fine.
I have a code:
function_1()
function_2()
Normally, function_1() takes 10 hours to end.
But I want function_1() to run for 2 hours, and after 2 hours, function_1 must return and program must continue with function_2(). It shouldn't wait for function_1() to be completed. Is there a way to do this in python?
What makes functions in Python able to interrupt their execution and resuming is the use of the "yield" statement -- your function then will work as a generator object. You call the "next" method on this object to have it start or continue after the last yield
import time
def function_1():
start_time = time.time()
while True:
# do long stuff
running_time = time.time() -start_time
if running_time > 2 * 60 * 60: # 2 hours
yield #<partial results can be yield here, if you want>
start_time = time.time()
runner = function_1()
while True:
try:
runner.next()
except StopIteration:
# function_1 had got to the end
break
# do other stuff
If you don't mind leaving function_1 running:
from threading import Thread
import time
Thread(target=function_1).start()
time.sleep(60*60*2)
Thread(target=function_2).start()
You can try to use module Gevent: start function in thread and kill that thread after some time.
Here is example:
import gevent
# function which you can't modify
def func1(some_arg)
# do something
pass
def func2()
# do something
pass
if __name__ == '__main__':
g = gevent.Greenlet(func1, 'Some Argument in func1')
g.start()
gevent.sleep(60*60*2)
g.kill()
# call the rest of functions
func2()
from multiprocessing import Process
p1 = Process(target=function_1)
p1.start()
p1.join(60*60*2)
if p1.is_alive():p1.terminate()
function_2()
I hope this helps
I just tested this using the following code
import time
from multiprocessing import Process
def f1():
print 0
time.sleep(10000)
print 1
def f2():
print 2
p1 = Process(target=f1)
p1.start()
p1.join(6)
if p1.is_alive():p1.terminate()
f2()
Output is as expected:
0
2
You can time the execution using the datetime module. Probably your optimizer function has a loop somewhere. Inside the loop you can test how much time has passed since you started the function.
def function_1():
t_end = datetime.time.now() + datetime.timedelta(hours=2)
while not converged:
# do your thing
if datetime.time.now() > t_end:
return