Signal alarm does not raise error - python

In the following code, I'm trying to create a class 'TimedExecutor' which would stop the execution of the function(bar) passed to its method 'execute' if exceeds a certain time limit. But, the program execution doesn't stop, even though the error message is displayed.
Note: We must not make any changes to the function bar(), as it is provided by an external module.
import signal
import time
class MyError(Exception):
"""Base error"""
class MyInheritedError(MyError):
"""Class to inherit from base error"""
class TimeoutListener(object):
def __init__(self, timeout_seconds, error_message="Timeout executing."):
self.timeout_seconds = timeout_seconds
self.error_message = error_message
self.alarm = None
def __enter__(self):
signal.signal(signal.SIGALRM, self._handle_timeout)
signal.alarm(self.timeout_seconds)
def __exit__(self, listener_type, value, traceback):
# Disable the alarm.
if self.alarm:
self.alarm = None
else:
signal.alarm(0)
def _handle_timeout(self, signum, frame):
print("Got the signum %s with frame: %s" % (signum, frame))
raise MyInheritedError(self.error_message + "aditya")
class TimedExecutor(object):
#staticmethod
def execute(timeout_secs, functor, *args, **kwargs):
msg = "Timeout executing method - %s." % functor.__name__
timeout_signal = TimeoutListener(timeout_secs, error_message=msg)
try:
with timeout_signal:
output = functor(*args, **kwargs)
except MyInheritedError as ex:
print("%s did not complete in %s: %s."
% (functor.__name__, timeout_secs, repr(ex)))
raise
return output
def bar():
for _ in range(5):
try:
time.sleep(1)
print("SLEEPING")
except MyInheritedError as ex:
print ex
ob = TimedExecutor.execute(2, bar)

Your functor is swallowing the exception you intend to be fatal.
It is bar()'s except clause in one of its loops that prints and then discards the error raised by the TimeoutListener context manager. Then the loop resumes.
bar() should probably not be aware of the exception your TimedExecutor can raise. Instead, bar()'s caller who invokes .execute() should be aware of it:
from aditya.utils import TimedExecutor, TimeoutException
...
try:
TimedExecutor.execute(2, bar)
except TimeoutException:
print("Timed out executing bar")

Related

Python - threading assert group is None when creating a custom Thread Class

I wanted to create a custom Thread class that is able to propagate an exception it comes across to the main thread. My implementation is as follows:
class VerseThread(threading.Thread):
def __init__(self, args):
super().__init__(self, args=args)
# self.scraper = scraper
def run(self):
self.exc = None
try:
book, abbrev, template, chapter = self.args
self.parser.parse(book, abbrev, template, chapter)
except ChapterNotFoundError as e:
self.exc = e
def join(self):
threading.Thread.join(self)
if self.exc:
raise self.exc
This is supposed to run in the following method, inside a Scraper class (it's all inside a ẁhile true):
for book, abbrev, testament in self.books[init:end]:
base_chapter = 1
while True:
threads = []
if testament == 'ot':
for i in range(3):
threads.append(VerseThread(args=(book, abbrev, OT_TEMPLATE, base_chapter+i)))
else:
for i in range(3):
threads.append(VerseThread(args=(book, abbrev, NT_TEMPLATE, base_chapter+i)))
try:
for thread in threads:
if not thread.is_alive():
thread.start()
for thread in threads:
thread.join()
base_chapter += 3
except ChapterNotFoundError as e:
LOGGER.info(f"{{PROCESS {multiprocessing.current_process().pid}}} - Chapter {e.chapter} not found in {book}, exiting book...")
break
The issue is, if I run it like presented here, I get the error assert group is None, "group argument must be None for now". However, when I run it using Thread(target=self.parse, args=(book, abbrev, OT_TEMPLATE, base_chapter+1)) instead of VerseThread(args=(book, abbrev, OT_TEMPLATE, base_chapter+i)), it works just fine, but the exception is of course still there. What's wrong with my code? How can I get rid of this error?
EDIT: Upon further testing, it seems that what I'm trying to do works fine when I use thread.run() instead of thread.start(), but then only one thread is being used, which is a problem. This, however, means that the error must be in the start() method, but I've no idea what to do.
You have several errors. First, if you are using super() as in super().__init__(self, target=target, args=args), you do not pass self explicitly as an argument. Second, to handle any possible thread-initializer arguments, your signature for this method should just be as follows:
class VerseThread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
... # rest of the code omitted
But since your __init__ method does not do anything but call the parent's __init__ method with any passed arguments, there is now no need to even override this method.
Finally, the attributes that you are interested in are not args but rather _args and _kwargs (in case keyword arguments are specified). Also, you have specified self.parser, but I do not see where that attribute has been set.
import threading
class ChapterNotFoundError(Exception):
pass
class VerseThread(threading.Thread):
def run(self):
self.exc = None
try:
book, abbrev, template, chapter = self._args
self.parser.parse(book, abbrev, template, chapter)
except ChapterNotFoundError as e:
self.exc = e
def join(self):
threading.Thread.join(self) # Or: super().join()
if self.exc:
raise self.exc
for book, abbrev, testament in self.books[init:end]:
base_chapter = 1
while True:
threads = []
if testament == 'ot':
for i in range(3):
threads.append(VerseThread(args=(book, abbrev, OT_TEMPLATE, base_chapter+i)))
else:
for i in range(3):
threads.append(VerseThread(args=(book, abbrev, NT_TEMPLATE, base_chapter+i)))
try:
for thread in threads:
if not thread.is_alive():
thread.start()
for thread in threads:
thread.join()
base_chapter += 3
except ChapterNotFoundError as e:
LOGGER.info(f"{{PROCESS {multiprocessing.current_process().pid}}} - Chapter {e.chapter} not found in {book}, exiting book...")
break
Improvement
Accessing quasi-private attributes, such as self._args is a potentially dangerous thing and should be avoided.
I can see the value of creating a subclass of Thread that will catch exceptions in the "worker" function it is to execute and then "propogate" it back to the main thread when it joins the thread. But I believe such a class should be general purpose and work with any type of worker function. In general, I don't like to have application-specific code (business logic) in a multithreading.Thread or multiprocessing.Pool subclass. I instead prefer having my business logic coded within a function or class method(s) that can then be used in multithreading, multiprocessing or serial processing as you see fit. The following is how I would code the Thread subclass (I have named it PropogateExceptionThread, but chose whatever name you wish) and I might use it:
import threading
class PropogateExceptionThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception as e:
self.exc = e
def join(self):
super().join()
if self.exc:
raise self.exc
def worker(x):
if x < 10 or x > 20:
raise ValueError(f'Bad value for argument x = {x}')
t = PropogateExceptionThread(target=worker, args=(1,))
t.start()
try:
t.join()
except Exception as e:
print('The thread raised an exception:', e)
Prints:
The thread raised an exception: Bad value for argument x = 1

How do I stop a line of code after running for too long? [duplicate]

I'm calling a function in Python which I know may stall and force me to restart the script.
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it and does something else?
You may use the signal package if you are running on UNIX:
In [1]: import signal
# Register an handler for the timeout
In [2]: def handler(signum, frame):
...: print("Forever is over!")
...: raise Exception("end of time")
...:
# This function *may* run for an indetermined time...
In [3]: def loop_forever():
...: import time
...: while 1:
...: print("sec")
...: time.sleep(1)
...:
...:
# Register the signal function handler
In [4]: signal.signal(signal.SIGALRM, handler)
Out[4]: 0
# Define a timeout for your function
In [5]: signal.alarm(10)
Out[5]: 0
In [6]: try:
...: loop_forever()
...: except Exception, exc:
...: print(exc)
....:
sec
sec
sec
sec
sec
sec
sec
sec
Forever is over!
end of time
# Cancel the timer if the function returned before timeout
# (ok, mine won't but yours maybe will :)
In [7]: signal.alarm(0)
Out[7]: 0
10 seconds after the call signal.alarm(10), the handler is called. This raises an exception that you can intercept from the regular Python code.
This module doesn't play well with threads (but then, who does?)
Note that since we raise an exception when timeout happens, it may end up caught and ignored inside the function, for example of one such function:
def loop_forever():
while 1:
print('sec')
try:
time.sleep(10)
except:
continue
You can use multiprocessing.Process to do exactly that.
Code
import multiprocessing
import time
# bar
def bar():
for i in range(100):
print "Tick"
time.sleep(1)
if __name__ == '__main__':
# Start bar as a process
p = multiprocessing.Process(target=bar)
p.start()
# Wait for 10 seconds or until process finishes
p.join(10)
# If thread is still active
if p.is_alive():
print "running... let's kill it..."
# Terminate - may not work if process is stuck for good
p.terminate()
# OR Kill - will work for sure, no chance for process to finish nicely however
# p.kill()
p.join()
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it?
I posted a gist that solves this question/problem with a decorator and a threading.Timer. Here it is with a breakdown.
Imports and setups for compatibility
It was tested with Python 2 and 3. It should also work under Unix/Linux and Windows.
First the imports. These attempt to keep the code consistent regardless of the Python version:
from __future__ import print_function
import sys
import threading
from time import sleep
try:
import thread
except ImportError:
import _thread as thread
Use version independent code:
try:
range, _print = xrange, print
def print(*args, **kwargs):
flush = kwargs.pop('flush', False)
_print(*args, **kwargs)
if flush:
kwargs.get('file', sys.stdout).flush()
except NameError:
pass
Now we have imported our functionality from the standard library.
exit_after decorator
Next we need a function to terminate the main() from the child thread:
def quit_function(fn_name):
# print to stderr, unbuffered in Python 2.
print('{0} took too long'.format(fn_name), file=sys.stderr)
sys.stderr.flush() # Python 3 stderr is likely buffered.
thread.interrupt_main() # raises KeyboardInterrupt
And here is the decorator itself:
def exit_after(s):
'''
use as decorator to exit process if
function takes longer than s seconds
'''
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(s, quit_function, args=[fn.__name__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
return inner
return outer
Usage
And here's the usage that directly answers your question about exiting after 5 seconds!:
#exit_after(5)
def countdown(n):
print('countdown started', flush=True)
for i in range(n, -1, -1):
print(i, end=', ', flush=True)
sleep(1)
print('countdown finished')
Demo:
>>> countdown(3)
countdown started
3, 2, 1, 0, countdown finished
>>> countdown(10)
countdown started
10, 9, 8, 7, 6, countdown took too long
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 11, in inner
File "<stdin>", line 6, in countdown
KeyboardInterrupt
The second function call will not finish, instead the process should exit with a traceback!
KeyboardInterrupt does not always stop a sleeping thread
Note that sleep will not always be interrupted by a keyboard interrupt, on Python 2 on Windows, e.g.:
#exit_after(1)
def sleep10():
sleep(10)
print('slept 10 seconds')
>>> sleep10()
sleep10 took too long # Note that it hangs here about 9 more seconds
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 11, in inner
File "<stdin>", line 3, in sleep10
KeyboardInterrupt
nor is it likely to interrupt code running in extensions unless it explicitly checks for PyErr_CheckSignals(), see Cython, Python and KeyboardInterrupt ignored
I would avoid sleeping a thread more than a second, in any case - that's an eon in processor time.
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it and does something else?
To catch it and do something else, you can catch the KeyboardInterrupt.
>>> try:
... countdown(10)
... except KeyboardInterrupt:
... print('do something else')
...
countdown started
10, 9, 8, 7, 6, countdown took too long
do something else
I have a different proposal which is a pure function (with the same API as the threading suggestion) and seems to work fine (based on suggestions on this thread)
def timeout(func, args=(), kwargs={}, timeout_duration=1, default=None):
import signal
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError()
# set the timeout handler
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_duration)
try:
result = func(*args, **kwargs)
except TimeoutError as exc:
result = default
finally:
signal.alarm(0)
return result
I ran across this thread when searching for a timeout call on unit tests. I didn't find anything simple in the answers or 3rd party packages so I wrote the decorator below you can drop right into code:
import multiprocessing.pool
import functools
def timeout(max_timeout):
"""Timeout decorator, parameter in seconds."""
def timeout_decorator(item):
"""Wrap the original function."""
#functools.wraps(item)
def func_wrapper(*args, **kwargs):
"""Closure for function."""
pool = multiprocessing.pool.ThreadPool(processes=1)
async_result = pool.apply_async(item, args, kwargs)
# raises a TimeoutError if execution exceeds max_timeout
return async_result.get(max_timeout)
return func_wrapper
return timeout_decorator
Then it's as simple as this to timeout a test or any function you like:
#timeout(5.0) # if execution takes longer than 5 seconds, raise a TimeoutError
def test_base_regression(self):
...
The stopit package, found on pypi, seems to handle timeouts well.
I like the #stopit.threading_timeoutable decorator, which adds a timeout parameter to the decorated function, which does what you expect, it stops the function.
Check it out on pypi: https://pypi.python.org/pypi/stopit
I am the author of wrapt_timeout_decorator.
Most of the solutions presented here work wunderfully under Linux on the first glance - because we have fork() and signals() - but on windows the things look a bit different.
And when it comes to subthreads on Linux, You cant use Signals anymore.
In order to spawn a process under Windows, it needs to be picklable - and many decorated functions or Class methods are not.
So you need to use a better pickler like dill and multiprocess (not pickle and multiprocessing) - thats why You cant use ProcessPoolExecutor (or only with limited functionality).
For the timeout itself - You need to define what timeout means - because on Windows it will take considerable (and not determinable) time to spawn the process. This can be tricky on short timeouts. Lets assume, spawning the process takes about 0.5 seconds (easily !!!). If You give a timeout of 0.2 seconds what should happen?
Should the function time out after 0.5 + 0.2 seconds (so let the method run for 0.2 seconds)?
Or should the called process time out after 0.2 seconds (in that case, the decorated function will ALWAYS timeout, because in that time it is not even spawned)?
Also nested decorators can be nasty and You cant use Signals in a subthread. If You want to create a truly universal, cross-platform decorator, all this needs to be taken into consideration (and tested).
Other issues are passing exceptions back to the caller, as well as logging issues (if used in the decorated function - logging to files in another process is NOT supported)
I tried to cover all edge cases, You might look into the package wrapt_timeout_decorator, or at least test Your own solutions inspired by the unittests used there.
#Alexis Eggermont - unfortunately I dont have enough points to comment - maybe someone else can notify You - I think I solved Your import issue.
There are a lot of suggestions, but none using concurrent.futures, which I think is the most legible way to handle this.
from concurrent.futures import ProcessPoolExecutor
# Warning: this does not terminate function if timeout
def timeout_five(fnc, *args, **kwargs):
with ProcessPoolExecutor() as p:
f = p.submit(fnc, *args, **kwargs)
return f.result(timeout=5)
Super simple to read and maintain.
We make a pool, submit a single process and then wait up to 5 seconds before raising a TimeoutError that you could catch and handle however you needed.
Native to python 3.2+ and backported to 2.7 (pip install futures).
Switching between threads and processes is as simple as replacing ProcessPoolExecutor with ThreadPoolExecutor.
If you want to terminate the Process on timeout I would suggest looking into Pebble.
Building on and and enhancing the answer by #piro , you can build a contextmanager. This allows for very readable code which will disable the alaram signal after a successful run (sets signal.alarm(0))
from contextlib import contextmanager
import signal
import time
#contextmanager
def timeout(duration):
def timeout_handler(signum, frame):
raise TimeoutError(f'block timedout after {duration} seconds')
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(duration)
try:
yield
finally:
signal.alarm(0)
def sleeper(duration):
time.sleep(duration)
print('finished')
Example usage:
In [19]: with timeout(2):
...: sleeper(1)
...:
finished
In [20]: with timeout(2):
...: sleeper(3)
...:
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-20-66c78858116f> in <module>()
1 with timeout(2):
----> 2 sleeper(3)
3
<ipython-input-7-a75b966bf7ac> in sleeper(t)
1 def sleeper(t):
----> 2 time.sleep(t)
3 print('finished')
4
<ipython-input-18-533b9e684466> in timeout_handler(signum, frame)
2 def timeout(duration):
3 def timeout_handler(signum, frame):
----> 4 raise Exception(f'block timedout after {duration} seconds')
5 signal.signal(signal.SIGALRM, timeout_handler)
6 signal.alarm(duration)
Exception: block timedout after 2 seconds
Great, easy to use and reliable PyPi project timeout-decorator (https://pypi.org/project/timeout-decorator/)
installation:
pip install timeout-decorator
Usage:
import time
import timeout_decorator
#timeout_decorator.timeout(5)
def mytest():
print "Start"
for i in range(1,10):
time.sleep(1)
print "%d seconds have passed" % i
if __name__ == '__main__':
mytest()
timeout-decorator don't work on windows system as , windows didn't support signal well.
If you use timeout-decorator in windows system you will get the following
AttributeError: module 'signal' has no attribute 'SIGALRM'
Some suggested to use use_signals=False but didn't worked for me.
Author #bitranox created the following package:
pip install https://github.com/bitranox/wrapt-timeout-decorator/archive/master.zip
Code Sample:
import time
from wrapt_timeout_decorator import *
#timeout(5)
def mytest(message):
print(message)
for i in range(1,10):
time.sleep(1)
print('{} seconds have passed'.format(i))
def main():
mytest('starting')
if __name__ == '__main__':
main()
Gives the following exception:
TimeoutError: Function mytest timed out after 5 seconds
Highlights
Raises TimeoutError uses exceptions to alert on timeout - can easily be modified
Cross Platform: Windows & Mac OS X
Compatibility: Python 3.6+ (I also tested on python 2.7 and it works with small syntax adjustments)
For full explanation and extension to parallel maps, see here https://flipdazed.github.io/blog/quant%20dev/parallel-functions-with-timeouts
Minimal Example
>>> #killer_call(timeout=4)
... def bar(x):
... import time
... time.sleep(x)
... return x
>>> bar(10)
Traceback (most recent call last):
...
__main__.TimeoutError: function 'bar' timed out after 4s
and as expected
>>> bar(2)
2
Full code
import multiprocessing as mp
import multiprocessing.queues as mpq
import functools
import dill
from typing import Tuple, Callable, Dict, Optional, Iterable, List, Any
class TimeoutError(Exception):
def __init__(self, func: Callable, timeout: int):
self.t = timeout
self.fname = func.__name__
def __str__(self):
return f"function '{self.fname}' timed out after {self.t}s"
def _lemmiwinks(func: Callable, args: Tuple, kwargs: Dict[str, Any], q: mp.Queue):
"""lemmiwinks crawls into the unknown"""
q.put(dill.loads(func)(*args, **kwargs))
def killer_call(func: Callable = None, timeout: int = 10) -> Callable:
"""
Single function call with a timeout
Args:
func: the function
timeout: The timeout in seconds
"""
if not isinstance(timeout, int):
raise ValueError(f'timeout needs to be an int. Got: {timeout}')
if func is None:
return functools.partial(killer_call, timeout=timeout)
#functools.wraps(killer_call)
def _inners(*args, **kwargs) -> Any:
q_worker = mp.Queue()
proc = mp.Process(target=_lemmiwinks, args=(dill.dumps(func), args, kwargs, q_worker))
proc.start()
try:
return q_worker.get(timeout=timeout)
except mpq.Empty:
raise TimeoutError(func, timeout)
finally:
try:
proc.terminate()
except:
pass
return _inners
if __name__ == '__main__':
#killer_call(timeout=4)
def bar(x):
import time
time.sleep(x)
return x
print(bar(2))
bar(10)
Notes
You will need to import inside the function because of the way dill works.
This will also mean these functions may not be not compatible with doctest if there are imports inside your target functions. You will get an issue with __import__ not found.
Just in case it is helpful for anyone, building on the answer by #piro, I've made a function decorator:
import time
import signal
from functools import wraps
def timeout(timeout_secs: int):
def wrapper(func):
#wraps(func)
def time_limited(*args, **kwargs):
# Register an handler for the timeout
def handler(signum, frame):
raise Exception(f"Timeout for function '{func.__name__}'")
# Register the signal function handler
signal.signal(signal.SIGALRM, handler)
# Define a timeout for your function
signal.alarm(timeout_secs)
result = None
try:
result = func(*args, **kwargs)
except Exception as exc:
raise exc
finally:
# disable the signal alarm
signal.alarm(0)
return result
return time_limited
return wrapper
Using the wrapper on a function with a 20 seconds timeout would look something like:
#timeout(20)
def my_slow_or_never_ending_function(name):
while True:
time.sleep(1)
print(f"Yet another second passed {name}...")
try:
results = my_slow_or_never_ending_function("Yooo!")
except Exception as e:
print(f"ERROR: {e}")
We can use signals for the same. I think the below example will be useful for you. It is very simple compared to threads.
import signal
def timeout(signum, frame):
raise myException
#this is an infinite loop, never ending under normal circumstances
def main():
print 'Starting Main ',
while 1:
print 'in main ',
#SIGALRM is only usable on a unix platform
signal.signal(signal.SIGALRM, timeout)
#change 5 to however many seconds you need
signal.alarm(5)
try:
main()
except myException:
print "whoops"
Another solution with asyncio :
If you want to cancel the background task and not just timeout on the running main code, then you need an explicit communication from main thread to ask the code of the task to cancel , like a threading.Event()
import asyncio
import functools
import multiprocessing
from concurrent.futures.thread import ThreadPoolExecutor
class SingletonTimeOut:
pool = None
#classmethod
def run(cls, to_run: functools.partial, timeout: float):
pool = cls.get_pool()
loop = cls.get_loop()
try:
task = loop.run_in_executor(pool, to_run)
return loop.run_until_complete(asyncio.wait_for(task, timeout=timeout))
except asyncio.TimeoutError as e:
error_type = type(e).__name__ #TODO
raise e
#classmethod
def get_pool(cls):
if cls.pool is None:
cls.pool = ThreadPoolExecutor(multiprocessing.cpu_count())
return cls.pool
#classmethod
def get_loop(cls):
try:
return asyncio.get_event_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
# print("NEW LOOP" + str(threading.current_thread().ident))
return asyncio.get_event_loop()
# ---------------
TIME_OUT = float('0.2') # seconds
def toto(input_items,nb_predictions):
return 1
to_run = functools.partial(toto,
input_items=1,
nb_predictions="a")
results = SingletonTimeOut.run(to_run, TIME_OUT)
#!/usr/bin/python2
import sys, subprocess, threading
proc = subprocess.Popen(sys.argv[2:])
timer = threading.Timer(float(sys.argv[1]), proc.terminate)
timer.start()
proc.wait()
timer.cancel()
exit(proc.returncode)
The func_timeout package by Tim Savannah has worked well for me.
Installation:
pip install func_timeout
Usage:
import time
from func_timeout import func_timeout, FunctionTimedOut
def my_func(n):
time.sleep(n)
time_to_sleep = 10
# time out after 2 seconds using kwargs
func_timeout(2, my_func, kwargs={'n' : time_to_sleep})
# time out after 2 seconds using args
func_timeout(2, my_func, args=(time_to_sleep,))
I had a need for nestable timed interrupts (which SIGALARM can't do) that won't get blocked by time.sleep (which the thread-based approach can't do). I ended up copying and lightly modifying code from here: http://code.activestate.com/recipes/577600-queue-for-managing-multiple-sigalrm-alarms-concurr/
The code itself:
#!/usr/bin/python
# lightly modified version of http://code.activestate.com/recipes/577600-queue-for-managing-multiple-sigalrm-alarms-concurr/
"""alarm.py: Permits multiple SIGALRM events to be queued.
Uses a `heapq` to store the objects to be called when an alarm signal is
raised, so that the next alarm is always at the top of the heap.
"""
import heapq
import signal
from time import time
__version__ = '$Revision: 2539 $'.split()[1]
alarmlist = []
__new_alarm = lambda t, f, a, k: (t + time(), f, a, k)
__next_alarm = lambda: int(round(alarmlist[0][0] - time())) if alarmlist else None
__set_alarm = lambda: signal.alarm(max(__next_alarm(), 1))
class TimeoutError(Exception):
def __init__(self, message, id_=None):
self.message = message
self.id_ = id_
class Timeout:
''' id_ allows for nested timeouts. '''
def __init__(self, id_=None, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
self.id_ = id_
def handle_timeout(self):
raise TimeoutError(self.error_message, self.id_)
def __enter__(self):
self.this_alarm = alarm(self.seconds, self.handle_timeout)
def __exit__(self, type, value, traceback):
try:
cancel(self.this_alarm)
except ValueError:
pass
def __clear_alarm():
"""Clear an existing alarm.
If the alarm signal was set to a callable other than our own, queue the
previous alarm settings.
"""
oldsec = signal.alarm(0)
oldfunc = signal.signal(signal.SIGALRM, __alarm_handler)
if oldsec > 0 and oldfunc != __alarm_handler:
heapq.heappush(alarmlist, (__new_alarm(oldsec, oldfunc, [], {})))
def __alarm_handler(*zargs):
"""Handle an alarm by calling any due heap entries and resetting the alarm.
Note that multiple heap entries might get called, especially if calling an
entry takes a lot of time.
"""
try:
nextt = __next_alarm()
while nextt is not None and nextt <= 0:
(tm, func, args, keys) = heapq.heappop(alarmlist)
func(*args, **keys)
nextt = __next_alarm()
finally:
if alarmlist: __set_alarm()
def alarm(sec, func, *args, **keys):
"""Set an alarm.
When the alarm is raised in `sec` seconds, the handler will call `func`,
passing `args` and `keys`. Return the heap entry (which is just a big
tuple), so that it can be cancelled by calling `cancel()`.
"""
__clear_alarm()
try:
newalarm = __new_alarm(sec, func, args, keys)
heapq.heappush(alarmlist, newalarm)
return newalarm
finally:
__set_alarm()
def cancel(alarm):
"""Cancel an alarm by passing the heap entry returned by `alarm()`.
It is an error to try to cancel an alarm which has already occurred.
"""
__clear_alarm()
try:
alarmlist.remove(alarm)
heapq.heapify(alarmlist)
finally:
if alarmlist: __set_alarm()
and a usage example:
import alarm
from time import sleep
try:
with alarm.Timeout(id_='a', seconds=5):
try:
with alarm.Timeout(id_='b', seconds=2):
sleep(3)
except alarm.TimeoutError as e:
print 'raised', e.id_
sleep(30)
except alarm.TimeoutError as e:
print 'raised', e.id_
else:
print 'nope.'
I have face the same problem but my situation is need work on sub thread, signal didn't work for me, so I wrote a python package: timeout-timer to solve this problem, support for use as context or decorator, use signal or sub thread module to trigger a timeout interrupt:
from timeout_timer import timeout, TimeoutInterrupt
class TimeoutInterruptNested(TimeoutInterrupt):
pass
def test_timeout_nested_loop_both_timeout(timer="thread"):
cnt = 0
try:
with timeout(5, timer=timer):
try:
with timeout(2, timer=timer, exception=TimeoutInterruptNested):
sleep(2)
except TimeoutInterruptNested:
cnt += 1
time.sleep(10)
except TimeoutInterrupt:
cnt += 1
assert cnt == 2
see more: https://github.com/dozysun/timeout-timer
Here is a simple example running one method with timeout and also retriev its value if successfull.
import multiprocessing
import time
ret = {"foo": False}
def worker(queue):
"""worker function"""
ret = queue.get()
time.sleep(1)
ret["foo"] = True
queue.put(ret)
if __name__ == "__main__":
queue = multiprocessing.Queue()
queue.put(ret)
p = multiprocessing.Process(target=worker, args=(queue,))
p.start()
p.join(timeout=10)
if p.exitcode is None:
print("The worker timed out.")
else:
print(f"The worker completed and returned: {queue.get()}")
Here is a slight improvement to the given thread-based solution.
The code below supports exceptions:
def runFunctionCatchExceptions(func, *args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception, message:
return ["exception", message]
return ["RESULT", result]
def runFunctionWithTimeout(func, args=(), kwargs={}, timeout_duration=10, default=None):
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
self.result = runFunctionCatchExceptions(func, *args, **kwargs)
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return default
if it.result[0] == "exception":
raise it.result[1]
return it.result[1]
Invoking it with a 5 second timeout:
result = timeout(remote_calculate, (myarg,), timeout_duration=5)
Here is a POSIX version that combines many of the previous answers to deliver following features:
Subprocesses blocking the execution.
Usage of the timeout function on class member functions.
Strict requirement on time-to-terminate.
Here is the code and some test cases:
import threading
import signal
import os
import time
class TerminateExecution(Exception):
"""
Exception to indicate that execution has exceeded the preset running time.
"""
def quit_function(pid):
# Killing all subprocesses
os.setpgrp()
os.killpg(0, signal.SIGTERM)
# Killing the main thread
os.kill(pid, signal.SIGTERM)
def handle_term(signum, frame):
raise TerminateExecution()
def invoke_with_timeout(timeout, fn, *args, **kwargs):
# Setting a sigterm handler and initiating a timer
old_handler = signal.signal(signal.SIGTERM, handle_term)
timer = threading.Timer(timeout, quit_function, args=[os.getpid()])
terminate = False
# Executing the function
timer.start()
try:
result = fn(*args, **kwargs)
except TerminateExecution:
terminate = True
finally:
# Restoring original handler and cancel timer
signal.signal(signal.SIGTERM, old_handler)
timer.cancel()
if terminate:
raise BaseException("xxx")
return result
### Test cases
def countdown(n):
print('countdown started', flush=True)
for i in range(n, -1, -1):
print(i, end=', ', flush=True)
time.sleep(1)
print('countdown finished')
return 1337
def really_long_function():
time.sleep(10)
def really_long_function2():
os.system("sleep 787")
# Checking that we can run a function as expected.
assert invoke_with_timeout(3, countdown, 1) == 1337
# Testing various scenarios
t1 = time.time()
try:
print(invoke_with_timeout(1, countdown, 3))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
t1 = time.time()
try:
print(invoke_with_timeout(1, really_long_function2))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
t1 = time.time()
try:
print(invoke_with_timeout(1, really_long_function))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
# Checking that classes are referenced and not
# copied (as would be the case with multiprocessing)
class X:
def __init__(self):
self.value = 0
def set(self, v):
self.value = v
x = X()
invoke_with_timeout(2, x.set, 9)
assert x.value == 9
I intend to kill the process if job not done , using thread and process both to achieve this.
from concurrent.futures import ThreadPoolExecutor
from time import sleep
import multiprocessing
# test case 1
def worker_1(a,b,c):
for _ in range(2):
print('very time consuming sleep')
sleep(1)
return a+b+c
# test case 2
def worker_2(in_name):
for _ in range(10):
print('very time consuming sleep')
sleep(1)
return 'hello '+in_name
Actual class as a contextmanager
class FuncTimer():
def __init__(self,fn,args,runtime):
self.fn = fn
self.args = args
self.queue = multiprocessing.Queue()
self.runtime = runtime
self.process = multiprocessing.Process(target=self.thread_caller)
def thread_caller(self):
with ThreadPoolExecutor() as executor:
future = executor.submit(self.fn, *self.args)
self.queue.put(future.result())
def __enter__(self):
return self
def start_run(self):
self.process.start()
self.process.join(timeout=self.runtime)
if self.process.exitcode is None:
self.process.kill()
if self.process.exitcode is None:
out_res = None
print('killed premature')
else:
out_res = self.queue.get()
return out_res
def __exit__(self, exc_type, exc_value, exc_traceback):
self.process.kill()
How to use it
print('testing case 1')
with FuncTimer(fn=worker_1,args=(1,2,3),runtime = 5) as fp:
res = fp.start_run()
print(res)
print('testing case 2')
with FuncTimer(fn=worker_2,args=('ram',),runtime = 5) as fp:
res = fp.start_run()
print(res)

Python alarm signals, Time-out the current thread from outside with a handled exception?

It is commonly known that signals in python only work inside the main thread, this is my little snippet on this subject:
import signal
from threading import Timer
from time import sleep
class timeout:
def __init__(self, seconds=1, error_message='Timeout error'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def main():
try:
with timeout(seconds=2) :
#do_something
sleep(3)
print ("don't come here after 3 seconds")
except Exception as e:
print ("catch here",str(e))
print ("continue ...")
t = Timer(0.0, main)
t.start()
Now, in order to force this to work, i placed signal.signal outside the thread with a hooked dynamic function.
class timeout:
def __init__(self, seconds=1, error_message='Timeout error'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self):
raise TimeoutError(self.error_message)
def __enter__(self):
#fluid.error = self.error_message
#fluid.__call__ = self.handle_timeout
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
class fluid:
error = 'Orpheline exception'
def __init__(self,signum,frame):
self.signum = signum
self.frame = frame
def __call__(self):
try:
raise TimeoutError(self.error)
except Exception as e:
print ("catch now", str(e))
signal.signal(signal.SIGALRM, lambda x,y:fluid(x,y)())
t = Timer(0.0, main)
try:
t.start()
except Exception as e:
print ("catch there",str(e))
Dabbling with this issue using monkey patching yields the folowing:
In case i uncomment this: fluid.error = self.error_message, the exception is caught inside the class.
In case i uncomment this: fluid.__call__ = self.handle_timeout, the exception is neither caught in both main handlers, the program exits!
The only solution that worked for me is furnishing a new flag-value called skipvalue to check if there is an exception or not in this parallel thread:
class timeout:
def __init__(self, seconds=1, error_message='Timeout error'):
self.seconds = seconds
self.error_message = error_message
self.skipvalue = False
self.SKIP = lambda : self.skipvalue
def handle_timeout(self):
raise TimeoutError(self.error_message)
def timeitout(self):
#print('not caught ',self.error_message)
self.skipvalue = True
def __enter__(self):
fluid.error = self.error_message
#fluid.__call__ = self.handle_timeout
fluid.__call__ = self.timeitout
signal.alarm(self.seconds)
return self.SKIP
def __exit__(self, type, value, traceback):
signal.alarm(0)
def main():
try:
with timeout(seconds=2,error_message="Some message") as e :
#do_something
sleep(3)
if e():
raise Timeout(fluid.error)
print ("don't come here after 3 seconds")
except Exception as e:
print ("catch here",str(e))
print ("continue ...")
t = Timer(0.0, main)
t.start()
The above takes up to 3 seconds with sleep function, with an arbitrary loop I need to check around the new value at each execution cycle.
My Question:
Is there some way more elegant and built-in to work this aim witout stuffing the code with unnecessary variables or classes or forking child-processes?
Appearent There is no way to do this with timers, but it seems semi-possible using threads with system traces
import sys
import trace
import threading
import time
import signal
class thread_with_trace(threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, target=kwargs["target"],args=(self,))
self.killed = False
self.ex_handler = kwargs["handler"]
def start(self):
self.__run_backup = self.run
self.run = self.__run
threading.Thread.start(self)
def __run(self):
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, event, arg):
if event == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, event, arg):
if self.killed:
if event == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
raise self.ex_handler
class fluid:
def __init__(self,signum,frame):
self.signum = signum
self.frame = frame
# do whatever according to signal id
def __call__(self):
pass
signal.signal(signal.SIGALRM, lambda x,y:fluid(x,y)())
class timeout:
def __init__(self, thread=lambda: None, terminatefun=lambda: None, seconds=10):
self.seconds = seconds
self.thisthread = thread
self.terminatefun = terminatefun
def handle_timeout(self):
try:
self.thisthread.kill()
except Exception as e:
print(str(e))
self.terminatefun()
def __enter__(self):
fluid.__call__ = self.handle_timeout
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def stopit():
print("I should be here after two seconds")
def func(t):
with timeout(thread=t, terminatefun=stopit ,seconds=2):
while True:
time.sleep(0.1)
print("I'm running")
t1 = thread_with_trace(target=func,args=[],handler=TimeoutError("Ran out of time"))
t1.start()
It's almost complying because SystemExit() halts the thread after expiration of the last timeout (being 0.1 secs in this case)

How to put a "session" time out for a function in Python? [duplicate]

I'm calling a function in Python which I know may stall and force me to restart the script.
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it and does something else?
You may use the signal package if you are running on UNIX:
In [1]: import signal
# Register an handler for the timeout
In [2]: def handler(signum, frame):
...: print("Forever is over!")
...: raise Exception("end of time")
...:
# This function *may* run for an indetermined time...
In [3]: def loop_forever():
...: import time
...: while 1:
...: print("sec")
...: time.sleep(1)
...:
...:
# Register the signal function handler
In [4]: signal.signal(signal.SIGALRM, handler)
Out[4]: 0
# Define a timeout for your function
In [5]: signal.alarm(10)
Out[5]: 0
In [6]: try:
...: loop_forever()
...: except Exception, exc:
...: print(exc)
....:
sec
sec
sec
sec
sec
sec
sec
sec
Forever is over!
end of time
# Cancel the timer if the function returned before timeout
# (ok, mine won't but yours maybe will :)
In [7]: signal.alarm(0)
Out[7]: 0
10 seconds after the call signal.alarm(10), the handler is called. This raises an exception that you can intercept from the regular Python code.
This module doesn't play well with threads (but then, who does?)
Note that since we raise an exception when timeout happens, it may end up caught and ignored inside the function, for example of one such function:
def loop_forever():
while 1:
print('sec')
try:
time.sleep(10)
except:
continue
You can use multiprocessing.Process to do exactly that.
Code
import multiprocessing
import time
# bar
def bar():
for i in range(100):
print "Tick"
time.sleep(1)
if __name__ == '__main__':
# Start bar as a process
p = multiprocessing.Process(target=bar)
p.start()
# Wait for 10 seconds or until process finishes
p.join(10)
# If thread is still active
if p.is_alive():
print "running... let's kill it..."
# Terminate - may not work if process is stuck for good
p.terminate()
# OR Kill - will work for sure, no chance for process to finish nicely however
# p.kill()
p.join()
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it?
I posted a gist that solves this question/problem with a decorator and a threading.Timer. Here it is with a breakdown.
Imports and setups for compatibility
It was tested with Python 2 and 3. It should also work under Unix/Linux and Windows.
First the imports. These attempt to keep the code consistent regardless of the Python version:
from __future__ import print_function
import sys
import threading
from time import sleep
try:
import thread
except ImportError:
import _thread as thread
Use version independent code:
try:
range, _print = xrange, print
def print(*args, **kwargs):
flush = kwargs.pop('flush', False)
_print(*args, **kwargs)
if flush:
kwargs.get('file', sys.stdout).flush()
except NameError:
pass
Now we have imported our functionality from the standard library.
exit_after decorator
Next we need a function to terminate the main() from the child thread:
def quit_function(fn_name):
# print to stderr, unbuffered in Python 2.
print('{0} took too long'.format(fn_name), file=sys.stderr)
sys.stderr.flush() # Python 3 stderr is likely buffered.
thread.interrupt_main() # raises KeyboardInterrupt
And here is the decorator itself:
def exit_after(s):
'''
use as decorator to exit process if
function takes longer than s seconds
'''
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(s, quit_function, args=[fn.__name__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
return inner
return outer
Usage
And here's the usage that directly answers your question about exiting after 5 seconds!:
#exit_after(5)
def countdown(n):
print('countdown started', flush=True)
for i in range(n, -1, -1):
print(i, end=', ', flush=True)
sleep(1)
print('countdown finished')
Demo:
>>> countdown(3)
countdown started
3, 2, 1, 0, countdown finished
>>> countdown(10)
countdown started
10, 9, 8, 7, 6, countdown took too long
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 11, in inner
File "<stdin>", line 6, in countdown
KeyboardInterrupt
The second function call will not finish, instead the process should exit with a traceback!
KeyboardInterrupt does not always stop a sleeping thread
Note that sleep will not always be interrupted by a keyboard interrupt, on Python 2 on Windows, e.g.:
#exit_after(1)
def sleep10():
sleep(10)
print('slept 10 seconds')
>>> sleep10()
sleep10 took too long # Note that it hangs here about 9 more seconds
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 11, in inner
File "<stdin>", line 3, in sleep10
KeyboardInterrupt
nor is it likely to interrupt code running in extensions unless it explicitly checks for PyErr_CheckSignals(), see Cython, Python and KeyboardInterrupt ignored
I would avoid sleeping a thread more than a second, in any case - that's an eon in processor time.
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it and does something else?
To catch it and do something else, you can catch the KeyboardInterrupt.
>>> try:
... countdown(10)
... except KeyboardInterrupt:
... print('do something else')
...
countdown started
10, 9, 8, 7, 6, countdown took too long
do something else
I have a different proposal which is a pure function (with the same API as the threading suggestion) and seems to work fine (based on suggestions on this thread)
def timeout(func, args=(), kwargs={}, timeout_duration=1, default=None):
import signal
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError()
# set the timeout handler
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_duration)
try:
result = func(*args, **kwargs)
except TimeoutError as exc:
result = default
finally:
signal.alarm(0)
return result
I ran across this thread when searching for a timeout call on unit tests. I didn't find anything simple in the answers or 3rd party packages so I wrote the decorator below you can drop right into code:
import multiprocessing.pool
import functools
def timeout(max_timeout):
"""Timeout decorator, parameter in seconds."""
def timeout_decorator(item):
"""Wrap the original function."""
#functools.wraps(item)
def func_wrapper(*args, **kwargs):
"""Closure for function."""
pool = multiprocessing.pool.ThreadPool(processes=1)
async_result = pool.apply_async(item, args, kwargs)
# raises a TimeoutError if execution exceeds max_timeout
return async_result.get(max_timeout)
return func_wrapper
return timeout_decorator
Then it's as simple as this to timeout a test or any function you like:
#timeout(5.0) # if execution takes longer than 5 seconds, raise a TimeoutError
def test_base_regression(self):
...
The stopit package, found on pypi, seems to handle timeouts well.
I like the #stopit.threading_timeoutable decorator, which adds a timeout parameter to the decorated function, which does what you expect, it stops the function.
Check it out on pypi: https://pypi.python.org/pypi/stopit
I am the author of wrapt_timeout_decorator.
Most of the solutions presented here work wunderfully under Linux on the first glance - because we have fork() and signals() - but on windows the things look a bit different.
And when it comes to subthreads on Linux, You cant use Signals anymore.
In order to spawn a process under Windows, it needs to be picklable - and many decorated functions or Class methods are not.
So you need to use a better pickler like dill and multiprocess (not pickle and multiprocessing) - thats why You cant use ProcessPoolExecutor (or only with limited functionality).
For the timeout itself - You need to define what timeout means - because on Windows it will take considerable (and not determinable) time to spawn the process. This can be tricky on short timeouts. Lets assume, spawning the process takes about 0.5 seconds (easily !!!). If You give a timeout of 0.2 seconds what should happen?
Should the function time out after 0.5 + 0.2 seconds (so let the method run for 0.2 seconds)?
Or should the called process time out after 0.2 seconds (in that case, the decorated function will ALWAYS timeout, because in that time it is not even spawned)?
Also nested decorators can be nasty and You cant use Signals in a subthread. If You want to create a truly universal, cross-platform decorator, all this needs to be taken into consideration (and tested).
Other issues are passing exceptions back to the caller, as well as logging issues (if used in the decorated function - logging to files in another process is NOT supported)
I tried to cover all edge cases, You might look into the package wrapt_timeout_decorator, or at least test Your own solutions inspired by the unittests used there.
#Alexis Eggermont - unfortunately I dont have enough points to comment - maybe someone else can notify You - I think I solved Your import issue.
There are a lot of suggestions, but none using concurrent.futures, which I think is the most legible way to handle this.
from concurrent.futures import ProcessPoolExecutor
# Warning: this does not terminate function if timeout
def timeout_five(fnc, *args, **kwargs):
with ProcessPoolExecutor() as p:
f = p.submit(fnc, *args, **kwargs)
return f.result(timeout=5)
Super simple to read and maintain.
We make a pool, submit a single process and then wait up to 5 seconds before raising a TimeoutError that you could catch and handle however you needed.
Native to python 3.2+ and backported to 2.7 (pip install futures).
Switching between threads and processes is as simple as replacing ProcessPoolExecutor with ThreadPoolExecutor.
If you want to terminate the Process on timeout I would suggest looking into Pebble.
Building on and and enhancing the answer by #piro , you can build a contextmanager. This allows for very readable code which will disable the alaram signal after a successful run (sets signal.alarm(0))
from contextlib import contextmanager
import signal
import time
#contextmanager
def timeout(duration):
def timeout_handler(signum, frame):
raise TimeoutError(f'block timedout after {duration} seconds')
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(duration)
try:
yield
finally:
signal.alarm(0)
def sleeper(duration):
time.sleep(duration)
print('finished')
Example usage:
In [19]: with timeout(2):
...: sleeper(1)
...:
finished
In [20]: with timeout(2):
...: sleeper(3)
...:
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-20-66c78858116f> in <module>()
1 with timeout(2):
----> 2 sleeper(3)
3
<ipython-input-7-a75b966bf7ac> in sleeper(t)
1 def sleeper(t):
----> 2 time.sleep(t)
3 print('finished')
4
<ipython-input-18-533b9e684466> in timeout_handler(signum, frame)
2 def timeout(duration):
3 def timeout_handler(signum, frame):
----> 4 raise Exception(f'block timedout after {duration} seconds')
5 signal.signal(signal.SIGALRM, timeout_handler)
6 signal.alarm(duration)
Exception: block timedout after 2 seconds
Great, easy to use and reliable PyPi project timeout-decorator (https://pypi.org/project/timeout-decorator/)
installation:
pip install timeout-decorator
Usage:
import time
import timeout_decorator
#timeout_decorator.timeout(5)
def mytest():
print "Start"
for i in range(1,10):
time.sleep(1)
print "%d seconds have passed" % i
if __name__ == '__main__':
mytest()
timeout-decorator don't work on windows system as , windows didn't support signal well.
If you use timeout-decorator in windows system you will get the following
AttributeError: module 'signal' has no attribute 'SIGALRM'
Some suggested to use use_signals=False but didn't worked for me.
Author #bitranox created the following package:
pip install https://github.com/bitranox/wrapt-timeout-decorator/archive/master.zip
Code Sample:
import time
from wrapt_timeout_decorator import *
#timeout(5)
def mytest(message):
print(message)
for i in range(1,10):
time.sleep(1)
print('{} seconds have passed'.format(i))
def main():
mytest('starting')
if __name__ == '__main__':
main()
Gives the following exception:
TimeoutError: Function mytest timed out after 5 seconds
Highlights
Raises TimeoutError uses exceptions to alert on timeout - can easily be modified
Cross Platform: Windows & Mac OS X
Compatibility: Python 3.6+ (I also tested on python 2.7 and it works with small syntax adjustments)
For full explanation and extension to parallel maps, see here https://flipdazed.github.io/blog/quant%20dev/parallel-functions-with-timeouts
Minimal Example
>>> #killer_call(timeout=4)
... def bar(x):
... import time
... time.sleep(x)
... return x
>>> bar(10)
Traceback (most recent call last):
...
__main__.TimeoutError: function 'bar' timed out after 4s
and as expected
>>> bar(2)
2
Full code
import multiprocessing as mp
import multiprocessing.queues as mpq
import functools
import dill
from typing import Tuple, Callable, Dict, Optional, Iterable, List, Any
class TimeoutError(Exception):
def __init__(self, func: Callable, timeout: int):
self.t = timeout
self.fname = func.__name__
def __str__(self):
return f"function '{self.fname}' timed out after {self.t}s"
def _lemmiwinks(func: Callable, args: Tuple, kwargs: Dict[str, Any], q: mp.Queue):
"""lemmiwinks crawls into the unknown"""
q.put(dill.loads(func)(*args, **kwargs))
def killer_call(func: Callable = None, timeout: int = 10) -> Callable:
"""
Single function call with a timeout
Args:
func: the function
timeout: The timeout in seconds
"""
if not isinstance(timeout, int):
raise ValueError(f'timeout needs to be an int. Got: {timeout}')
if func is None:
return functools.partial(killer_call, timeout=timeout)
#functools.wraps(killer_call)
def _inners(*args, **kwargs) -> Any:
q_worker = mp.Queue()
proc = mp.Process(target=_lemmiwinks, args=(dill.dumps(func), args, kwargs, q_worker))
proc.start()
try:
return q_worker.get(timeout=timeout)
except mpq.Empty:
raise TimeoutError(func, timeout)
finally:
try:
proc.terminate()
except:
pass
return _inners
if __name__ == '__main__':
#killer_call(timeout=4)
def bar(x):
import time
time.sleep(x)
return x
print(bar(2))
bar(10)
Notes
You will need to import inside the function because of the way dill works.
This will also mean these functions may not be not compatible with doctest if there are imports inside your target functions. You will get an issue with __import__ not found.
Just in case it is helpful for anyone, building on the answer by #piro, I've made a function decorator:
import time
import signal
from functools import wraps
def timeout(timeout_secs: int):
def wrapper(func):
#wraps(func)
def time_limited(*args, **kwargs):
# Register an handler for the timeout
def handler(signum, frame):
raise Exception(f"Timeout for function '{func.__name__}'")
# Register the signal function handler
signal.signal(signal.SIGALRM, handler)
# Define a timeout for your function
signal.alarm(timeout_secs)
result = None
try:
result = func(*args, **kwargs)
except Exception as exc:
raise exc
finally:
# disable the signal alarm
signal.alarm(0)
return result
return time_limited
return wrapper
Using the wrapper on a function with a 20 seconds timeout would look something like:
#timeout(20)
def my_slow_or_never_ending_function(name):
while True:
time.sleep(1)
print(f"Yet another second passed {name}...")
try:
results = my_slow_or_never_ending_function("Yooo!")
except Exception as e:
print(f"ERROR: {e}")
We can use signals for the same. I think the below example will be useful for you. It is very simple compared to threads.
import signal
def timeout(signum, frame):
raise myException
#this is an infinite loop, never ending under normal circumstances
def main():
print 'Starting Main ',
while 1:
print 'in main ',
#SIGALRM is only usable on a unix platform
signal.signal(signal.SIGALRM, timeout)
#change 5 to however many seconds you need
signal.alarm(5)
try:
main()
except myException:
print "whoops"
Another solution with asyncio :
If you want to cancel the background task and not just timeout on the running main code, then you need an explicit communication from main thread to ask the code of the task to cancel , like a threading.Event()
import asyncio
import functools
import multiprocessing
from concurrent.futures.thread import ThreadPoolExecutor
class SingletonTimeOut:
pool = None
#classmethod
def run(cls, to_run: functools.partial, timeout: float):
pool = cls.get_pool()
loop = cls.get_loop()
try:
task = loop.run_in_executor(pool, to_run)
return loop.run_until_complete(asyncio.wait_for(task, timeout=timeout))
except asyncio.TimeoutError as e:
error_type = type(e).__name__ #TODO
raise e
#classmethod
def get_pool(cls):
if cls.pool is None:
cls.pool = ThreadPoolExecutor(multiprocessing.cpu_count())
return cls.pool
#classmethod
def get_loop(cls):
try:
return asyncio.get_event_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
# print("NEW LOOP" + str(threading.current_thread().ident))
return asyncio.get_event_loop()
# ---------------
TIME_OUT = float('0.2') # seconds
def toto(input_items,nb_predictions):
return 1
to_run = functools.partial(toto,
input_items=1,
nb_predictions="a")
results = SingletonTimeOut.run(to_run, TIME_OUT)
#!/usr/bin/python2
import sys, subprocess, threading
proc = subprocess.Popen(sys.argv[2:])
timer = threading.Timer(float(sys.argv[1]), proc.terminate)
timer.start()
proc.wait()
timer.cancel()
exit(proc.returncode)
The func_timeout package by Tim Savannah has worked well for me.
Installation:
pip install func_timeout
Usage:
import time
from func_timeout import func_timeout, FunctionTimedOut
def my_func(n):
time.sleep(n)
time_to_sleep = 10
# time out after 2 seconds using kwargs
func_timeout(2, my_func, kwargs={'n' : time_to_sleep})
# time out after 2 seconds using args
func_timeout(2, my_func, args=(time_to_sleep,))
I had a need for nestable timed interrupts (which SIGALARM can't do) that won't get blocked by time.sleep (which the thread-based approach can't do). I ended up copying and lightly modifying code from here: http://code.activestate.com/recipes/577600-queue-for-managing-multiple-sigalrm-alarms-concurr/
The code itself:
#!/usr/bin/python
# lightly modified version of http://code.activestate.com/recipes/577600-queue-for-managing-multiple-sigalrm-alarms-concurr/
"""alarm.py: Permits multiple SIGALRM events to be queued.
Uses a `heapq` to store the objects to be called when an alarm signal is
raised, so that the next alarm is always at the top of the heap.
"""
import heapq
import signal
from time import time
__version__ = '$Revision: 2539 $'.split()[1]
alarmlist = []
__new_alarm = lambda t, f, a, k: (t + time(), f, a, k)
__next_alarm = lambda: int(round(alarmlist[0][0] - time())) if alarmlist else None
__set_alarm = lambda: signal.alarm(max(__next_alarm(), 1))
class TimeoutError(Exception):
def __init__(self, message, id_=None):
self.message = message
self.id_ = id_
class Timeout:
''' id_ allows for nested timeouts. '''
def __init__(self, id_=None, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
self.id_ = id_
def handle_timeout(self):
raise TimeoutError(self.error_message, self.id_)
def __enter__(self):
self.this_alarm = alarm(self.seconds, self.handle_timeout)
def __exit__(self, type, value, traceback):
try:
cancel(self.this_alarm)
except ValueError:
pass
def __clear_alarm():
"""Clear an existing alarm.
If the alarm signal was set to a callable other than our own, queue the
previous alarm settings.
"""
oldsec = signal.alarm(0)
oldfunc = signal.signal(signal.SIGALRM, __alarm_handler)
if oldsec > 0 and oldfunc != __alarm_handler:
heapq.heappush(alarmlist, (__new_alarm(oldsec, oldfunc, [], {})))
def __alarm_handler(*zargs):
"""Handle an alarm by calling any due heap entries and resetting the alarm.
Note that multiple heap entries might get called, especially if calling an
entry takes a lot of time.
"""
try:
nextt = __next_alarm()
while nextt is not None and nextt <= 0:
(tm, func, args, keys) = heapq.heappop(alarmlist)
func(*args, **keys)
nextt = __next_alarm()
finally:
if alarmlist: __set_alarm()
def alarm(sec, func, *args, **keys):
"""Set an alarm.
When the alarm is raised in `sec` seconds, the handler will call `func`,
passing `args` and `keys`. Return the heap entry (which is just a big
tuple), so that it can be cancelled by calling `cancel()`.
"""
__clear_alarm()
try:
newalarm = __new_alarm(sec, func, args, keys)
heapq.heappush(alarmlist, newalarm)
return newalarm
finally:
__set_alarm()
def cancel(alarm):
"""Cancel an alarm by passing the heap entry returned by `alarm()`.
It is an error to try to cancel an alarm which has already occurred.
"""
__clear_alarm()
try:
alarmlist.remove(alarm)
heapq.heapify(alarmlist)
finally:
if alarmlist: __set_alarm()
and a usage example:
import alarm
from time import sleep
try:
with alarm.Timeout(id_='a', seconds=5):
try:
with alarm.Timeout(id_='b', seconds=2):
sleep(3)
except alarm.TimeoutError as e:
print 'raised', e.id_
sleep(30)
except alarm.TimeoutError as e:
print 'raised', e.id_
else:
print 'nope.'
I have face the same problem but my situation is need work on sub thread, signal didn't work for me, so I wrote a python package: timeout-timer to solve this problem, support for use as context or decorator, use signal or sub thread module to trigger a timeout interrupt:
from timeout_timer import timeout, TimeoutInterrupt
class TimeoutInterruptNested(TimeoutInterrupt):
pass
def test_timeout_nested_loop_both_timeout(timer="thread"):
cnt = 0
try:
with timeout(5, timer=timer):
try:
with timeout(2, timer=timer, exception=TimeoutInterruptNested):
sleep(2)
except TimeoutInterruptNested:
cnt += 1
time.sleep(10)
except TimeoutInterrupt:
cnt += 1
assert cnt == 2
see more: https://github.com/dozysun/timeout-timer
Here is a simple example running one method with timeout and also retriev its value if successfull.
import multiprocessing
import time
ret = {"foo": False}
def worker(queue):
"""worker function"""
ret = queue.get()
time.sleep(1)
ret["foo"] = True
queue.put(ret)
if __name__ == "__main__":
queue = multiprocessing.Queue()
queue.put(ret)
p = multiprocessing.Process(target=worker, args=(queue,))
p.start()
p.join(timeout=10)
if p.exitcode is None:
print("The worker timed out.")
else:
print(f"The worker completed and returned: {queue.get()}")
Here is a slight improvement to the given thread-based solution.
The code below supports exceptions:
def runFunctionCatchExceptions(func, *args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception, message:
return ["exception", message]
return ["RESULT", result]
def runFunctionWithTimeout(func, args=(), kwargs={}, timeout_duration=10, default=None):
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
self.result = runFunctionCatchExceptions(func, *args, **kwargs)
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return default
if it.result[0] == "exception":
raise it.result[1]
return it.result[1]
Invoking it with a 5 second timeout:
result = timeout(remote_calculate, (myarg,), timeout_duration=5)
Here is a POSIX version that combines many of the previous answers to deliver following features:
Subprocesses blocking the execution.
Usage of the timeout function on class member functions.
Strict requirement on time-to-terminate.
Here is the code and some test cases:
import threading
import signal
import os
import time
class TerminateExecution(Exception):
"""
Exception to indicate that execution has exceeded the preset running time.
"""
def quit_function(pid):
# Killing all subprocesses
os.setpgrp()
os.killpg(0, signal.SIGTERM)
# Killing the main thread
os.kill(pid, signal.SIGTERM)
def handle_term(signum, frame):
raise TerminateExecution()
def invoke_with_timeout(timeout, fn, *args, **kwargs):
# Setting a sigterm handler and initiating a timer
old_handler = signal.signal(signal.SIGTERM, handle_term)
timer = threading.Timer(timeout, quit_function, args=[os.getpid()])
terminate = False
# Executing the function
timer.start()
try:
result = fn(*args, **kwargs)
except TerminateExecution:
terminate = True
finally:
# Restoring original handler and cancel timer
signal.signal(signal.SIGTERM, old_handler)
timer.cancel()
if terminate:
raise BaseException("xxx")
return result
### Test cases
def countdown(n):
print('countdown started', flush=True)
for i in range(n, -1, -1):
print(i, end=', ', flush=True)
time.sleep(1)
print('countdown finished')
return 1337
def really_long_function():
time.sleep(10)
def really_long_function2():
os.system("sleep 787")
# Checking that we can run a function as expected.
assert invoke_with_timeout(3, countdown, 1) == 1337
# Testing various scenarios
t1 = time.time()
try:
print(invoke_with_timeout(1, countdown, 3))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
t1 = time.time()
try:
print(invoke_with_timeout(1, really_long_function2))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
t1 = time.time()
try:
print(invoke_with_timeout(1, really_long_function))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
# Checking that classes are referenced and not
# copied (as would be the case with multiprocessing)
class X:
def __init__(self):
self.value = 0
def set(self, v):
self.value = v
x = X()
invoke_with_timeout(2, x.set, 9)
assert x.value == 9
I intend to kill the process if job not done , using thread and process both to achieve this.
from concurrent.futures import ThreadPoolExecutor
from time import sleep
import multiprocessing
# test case 1
def worker_1(a,b,c):
for _ in range(2):
print('very time consuming sleep')
sleep(1)
return a+b+c
# test case 2
def worker_2(in_name):
for _ in range(10):
print('very time consuming sleep')
sleep(1)
return 'hello '+in_name
Actual class as a contextmanager
class FuncTimer():
def __init__(self,fn,args,runtime):
self.fn = fn
self.args = args
self.queue = multiprocessing.Queue()
self.runtime = runtime
self.process = multiprocessing.Process(target=self.thread_caller)
def thread_caller(self):
with ThreadPoolExecutor() as executor:
future = executor.submit(self.fn, *self.args)
self.queue.put(future.result())
def __enter__(self):
return self
def start_run(self):
self.process.start()
self.process.join(timeout=self.runtime)
if self.process.exitcode is None:
self.process.kill()
if self.process.exitcode is None:
out_res = None
print('killed premature')
else:
out_res = self.queue.get()
return out_res
def __exit__(self, exc_type, exc_value, exc_traceback):
self.process.kill()
How to use it
print('testing case 1')
with FuncTimer(fn=worker_1,args=(1,2,3),runtime = 5) as fp:
res = fp.start_run()
print(res)
print('testing case 2')
with FuncTimer(fn=worker_2,args=('ram',),runtime = 5) as fp:
res = fp.start_run()
print(res)

Timeout on a function call

I'm calling a function in Python which I know may stall and force me to restart the script.
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it and does something else?
You may use the signal package if you are running on UNIX:
In [1]: import signal
# Register an handler for the timeout
In [2]: def handler(signum, frame):
...: print("Forever is over!")
...: raise Exception("end of time")
...:
# This function *may* run for an indetermined time...
In [3]: def loop_forever():
...: import time
...: while 1:
...: print("sec")
...: time.sleep(1)
...:
...:
# Register the signal function handler
In [4]: signal.signal(signal.SIGALRM, handler)
Out[4]: 0
# Define a timeout for your function
In [5]: signal.alarm(10)
Out[5]: 0
In [6]: try:
...: loop_forever()
...: except Exception, exc:
...: print(exc)
....:
sec
sec
sec
sec
sec
sec
sec
sec
Forever is over!
end of time
# Cancel the timer if the function returned before timeout
# (ok, mine won't but yours maybe will :)
In [7]: signal.alarm(0)
Out[7]: 0
10 seconds after the call signal.alarm(10), the handler is called. This raises an exception that you can intercept from the regular Python code.
This module doesn't play well with threads (but then, who does?)
Note that since we raise an exception when timeout happens, it may end up caught and ignored inside the function, for example of one such function:
def loop_forever():
while 1:
print('sec')
try:
time.sleep(10)
except:
continue
You can use multiprocessing.Process to do exactly that.
Code
import multiprocessing
import time
# bar
def bar():
for i in range(100):
print "Tick"
time.sleep(1)
if __name__ == '__main__':
# Start bar as a process
p = multiprocessing.Process(target=bar)
p.start()
# Wait for 10 seconds or until process finishes
p.join(10)
# If thread is still active
if p.is_alive():
print "running... let's kill it..."
# Terminate - may not work if process is stuck for good
p.terminate()
# OR Kill - will work for sure, no chance for process to finish nicely however
# p.kill()
p.join()
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it?
I posted a gist that solves this question/problem with a decorator and a threading.Timer. Here it is with a breakdown.
Imports and setups for compatibility
It was tested with Python 2 and 3. It should also work under Unix/Linux and Windows.
First the imports. These attempt to keep the code consistent regardless of the Python version:
from __future__ import print_function
import sys
import threading
from time import sleep
try:
import thread
except ImportError:
import _thread as thread
Use version independent code:
try:
range, _print = xrange, print
def print(*args, **kwargs):
flush = kwargs.pop('flush', False)
_print(*args, **kwargs)
if flush:
kwargs.get('file', sys.stdout).flush()
except NameError:
pass
Now we have imported our functionality from the standard library.
exit_after decorator
Next we need a function to terminate the main() from the child thread:
def quit_function(fn_name):
# print to stderr, unbuffered in Python 2.
print('{0} took too long'.format(fn_name), file=sys.stderr)
sys.stderr.flush() # Python 3 stderr is likely buffered.
thread.interrupt_main() # raises KeyboardInterrupt
And here is the decorator itself:
def exit_after(s):
'''
use as decorator to exit process if
function takes longer than s seconds
'''
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(s, quit_function, args=[fn.__name__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
return inner
return outer
Usage
And here's the usage that directly answers your question about exiting after 5 seconds!:
#exit_after(5)
def countdown(n):
print('countdown started', flush=True)
for i in range(n, -1, -1):
print(i, end=', ', flush=True)
sleep(1)
print('countdown finished')
Demo:
>>> countdown(3)
countdown started
3, 2, 1, 0, countdown finished
>>> countdown(10)
countdown started
10, 9, 8, 7, 6, countdown took too long
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 11, in inner
File "<stdin>", line 6, in countdown
KeyboardInterrupt
The second function call will not finish, instead the process should exit with a traceback!
KeyboardInterrupt does not always stop a sleeping thread
Note that sleep will not always be interrupted by a keyboard interrupt, on Python 2 on Windows, e.g.:
#exit_after(1)
def sleep10():
sleep(10)
print('slept 10 seconds')
>>> sleep10()
sleep10 took too long # Note that it hangs here about 9 more seconds
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 11, in inner
File "<stdin>", line 3, in sleep10
KeyboardInterrupt
nor is it likely to interrupt code running in extensions unless it explicitly checks for PyErr_CheckSignals(), see Cython, Python and KeyboardInterrupt ignored
I would avoid sleeping a thread more than a second, in any case - that's an eon in processor time.
How do I call the function or what do I wrap it in so that if it takes longer than 5 seconds the script cancels it and does something else?
To catch it and do something else, you can catch the KeyboardInterrupt.
>>> try:
... countdown(10)
... except KeyboardInterrupt:
... print('do something else')
...
countdown started
10, 9, 8, 7, 6, countdown took too long
do something else
I have a different proposal which is a pure function (with the same API as the threading suggestion) and seems to work fine (based on suggestions on this thread)
def timeout(func, args=(), kwargs={}, timeout_duration=1, default=None):
import signal
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError()
# set the timeout handler
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_duration)
try:
result = func(*args, **kwargs)
except TimeoutError as exc:
result = default
finally:
signal.alarm(0)
return result
I ran across this thread when searching for a timeout call on unit tests. I didn't find anything simple in the answers or 3rd party packages so I wrote the decorator below you can drop right into code:
import multiprocessing.pool
import functools
def timeout(max_timeout):
"""Timeout decorator, parameter in seconds."""
def timeout_decorator(item):
"""Wrap the original function."""
#functools.wraps(item)
def func_wrapper(*args, **kwargs):
"""Closure for function."""
pool = multiprocessing.pool.ThreadPool(processes=1)
async_result = pool.apply_async(item, args, kwargs)
# raises a TimeoutError if execution exceeds max_timeout
return async_result.get(max_timeout)
return func_wrapper
return timeout_decorator
Then it's as simple as this to timeout a test or any function you like:
#timeout(5.0) # if execution takes longer than 5 seconds, raise a TimeoutError
def test_base_regression(self):
...
The stopit package, found on pypi, seems to handle timeouts well.
I like the #stopit.threading_timeoutable decorator, which adds a timeout parameter to the decorated function, which does what you expect, it stops the function.
Check it out on pypi: https://pypi.python.org/pypi/stopit
I am the author of wrapt_timeout_decorator.
Most of the solutions presented here work wunderfully under Linux on the first glance - because we have fork() and signals() - but on windows the things look a bit different.
And when it comes to subthreads on Linux, You cant use Signals anymore.
In order to spawn a process under Windows, it needs to be picklable - and many decorated functions or Class methods are not.
So you need to use a better pickler like dill and multiprocess (not pickle and multiprocessing) - thats why You cant use ProcessPoolExecutor (or only with limited functionality).
For the timeout itself - You need to define what timeout means - because on Windows it will take considerable (and not determinable) time to spawn the process. This can be tricky on short timeouts. Lets assume, spawning the process takes about 0.5 seconds (easily !!!). If You give a timeout of 0.2 seconds what should happen?
Should the function time out after 0.5 + 0.2 seconds (so let the method run for 0.2 seconds)?
Or should the called process time out after 0.2 seconds (in that case, the decorated function will ALWAYS timeout, because in that time it is not even spawned)?
Also nested decorators can be nasty and You cant use Signals in a subthread. If You want to create a truly universal, cross-platform decorator, all this needs to be taken into consideration (and tested).
Other issues are passing exceptions back to the caller, as well as logging issues (if used in the decorated function - logging to files in another process is NOT supported)
I tried to cover all edge cases, You might look into the package wrapt_timeout_decorator, or at least test Your own solutions inspired by the unittests used there.
#Alexis Eggermont - unfortunately I dont have enough points to comment - maybe someone else can notify You - I think I solved Your import issue.
There are a lot of suggestions, but none using concurrent.futures, which I think is the most legible way to handle this.
from concurrent.futures import ProcessPoolExecutor
# Warning: this does not terminate function if timeout
def timeout_five(fnc, *args, **kwargs):
with ProcessPoolExecutor() as p:
f = p.submit(fnc, *args, **kwargs)
return f.result(timeout=5)
Super simple to read and maintain.
We make a pool, submit a single process and then wait up to 5 seconds before raising a TimeoutError that you could catch and handle however you needed.
Native to python 3.2+ and backported to 2.7 (pip install futures).
Switching between threads and processes is as simple as replacing ProcessPoolExecutor with ThreadPoolExecutor.
If you want to terminate the Process on timeout I would suggest looking into Pebble.
Building on and and enhancing the answer by #piro , you can build a contextmanager. This allows for very readable code which will disable the alaram signal after a successful run (sets signal.alarm(0))
from contextlib import contextmanager
import signal
import time
#contextmanager
def timeout(duration):
def timeout_handler(signum, frame):
raise TimeoutError(f'block timedout after {duration} seconds')
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(duration)
try:
yield
finally:
signal.alarm(0)
def sleeper(duration):
time.sleep(duration)
print('finished')
Example usage:
In [19]: with timeout(2):
...: sleeper(1)
...:
finished
In [20]: with timeout(2):
...: sleeper(3)
...:
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-20-66c78858116f> in <module>()
1 with timeout(2):
----> 2 sleeper(3)
3
<ipython-input-7-a75b966bf7ac> in sleeper(t)
1 def sleeper(t):
----> 2 time.sleep(t)
3 print('finished')
4
<ipython-input-18-533b9e684466> in timeout_handler(signum, frame)
2 def timeout(duration):
3 def timeout_handler(signum, frame):
----> 4 raise Exception(f'block timedout after {duration} seconds')
5 signal.signal(signal.SIGALRM, timeout_handler)
6 signal.alarm(duration)
Exception: block timedout after 2 seconds
Great, easy to use and reliable PyPi project timeout-decorator (https://pypi.org/project/timeout-decorator/)
installation:
pip install timeout-decorator
Usage:
import time
import timeout_decorator
#timeout_decorator.timeout(5)
def mytest():
print "Start"
for i in range(1,10):
time.sleep(1)
print "%d seconds have passed" % i
if __name__ == '__main__':
mytest()
timeout-decorator don't work on windows system as , windows didn't support signal well.
If you use timeout-decorator in windows system you will get the following
AttributeError: module 'signal' has no attribute 'SIGALRM'
Some suggested to use use_signals=False but didn't worked for me.
Author #bitranox created the following package:
pip install https://github.com/bitranox/wrapt-timeout-decorator/archive/master.zip
Code Sample:
import time
from wrapt_timeout_decorator import *
#timeout(5)
def mytest(message):
print(message)
for i in range(1,10):
time.sleep(1)
print('{} seconds have passed'.format(i))
def main():
mytest('starting')
if __name__ == '__main__':
main()
Gives the following exception:
TimeoutError: Function mytest timed out after 5 seconds
Highlights
Raises TimeoutError uses exceptions to alert on timeout - can easily be modified
Cross Platform: Windows & Mac OS X
Compatibility: Python 3.6+ (I also tested on python 2.7 and it works with small syntax adjustments)
For full explanation and extension to parallel maps, see here https://flipdazed.github.io/blog/quant%20dev/parallel-functions-with-timeouts
Minimal Example
>>> #killer_call(timeout=4)
... def bar(x):
... import time
... time.sleep(x)
... return x
>>> bar(10)
Traceback (most recent call last):
...
__main__.TimeoutError: function 'bar' timed out after 4s
and as expected
>>> bar(2)
2
Full code
import multiprocessing as mp
import multiprocessing.queues as mpq
import functools
import dill
from typing import Tuple, Callable, Dict, Optional, Iterable, List, Any
class TimeoutError(Exception):
def __init__(self, func: Callable, timeout: int):
self.t = timeout
self.fname = func.__name__
def __str__(self):
return f"function '{self.fname}' timed out after {self.t}s"
def _lemmiwinks(func: Callable, args: Tuple, kwargs: Dict[str, Any], q: mp.Queue):
"""lemmiwinks crawls into the unknown"""
q.put(dill.loads(func)(*args, **kwargs))
def killer_call(func: Callable = None, timeout: int = 10) -> Callable:
"""
Single function call with a timeout
Args:
func: the function
timeout: The timeout in seconds
"""
if not isinstance(timeout, int):
raise ValueError(f'timeout needs to be an int. Got: {timeout}')
if func is None:
return functools.partial(killer_call, timeout=timeout)
#functools.wraps(killer_call)
def _inners(*args, **kwargs) -> Any:
q_worker = mp.Queue()
proc = mp.Process(target=_lemmiwinks, args=(dill.dumps(func), args, kwargs, q_worker))
proc.start()
try:
return q_worker.get(timeout=timeout)
except mpq.Empty:
raise TimeoutError(func, timeout)
finally:
try:
proc.terminate()
except:
pass
return _inners
if __name__ == '__main__':
#killer_call(timeout=4)
def bar(x):
import time
time.sleep(x)
return x
print(bar(2))
bar(10)
Notes
You will need to import inside the function because of the way dill works.
This will also mean these functions may not be not compatible with doctest if there are imports inside your target functions. You will get an issue with __import__ not found.
Just in case it is helpful for anyone, building on the answer by #piro, I've made a function decorator:
import time
import signal
from functools import wraps
def timeout(timeout_secs: int):
def wrapper(func):
#wraps(func)
def time_limited(*args, **kwargs):
# Register an handler for the timeout
def handler(signum, frame):
raise Exception(f"Timeout for function '{func.__name__}'")
# Register the signal function handler
signal.signal(signal.SIGALRM, handler)
# Define a timeout for your function
signal.alarm(timeout_secs)
result = None
try:
result = func(*args, **kwargs)
except Exception as exc:
raise exc
finally:
# disable the signal alarm
signal.alarm(0)
return result
return time_limited
return wrapper
Using the wrapper on a function with a 20 seconds timeout would look something like:
#timeout(20)
def my_slow_or_never_ending_function(name):
while True:
time.sleep(1)
print(f"Yet another second passed {name}...")
try:
results = my_slow_or_never_ending_function("Yooo!")
except Exception as e:
print(f"ERROR: {e}")
We can use signals for the same. I think the below example will be useful for you. It is very simple compared to threads.
import signal
def timeout(signum, frame):
raise myException
#this is an infinite loop, never ending under normal circumstances
def main():
print 'Starting Main ',
while 1:
print 'in main ',
#SIGALRM is only usable on a unix platform
signal.signal(signal.SIGALRM, timeout)
#change 5 to however many seconds you need
signal.alarm(5)
try:
main()
except myException:
print "whoops"
Another solution with asyncio :
If you want to cancel the background task and not just timeout on the running main code, then you need an explicit communication from main thread to ask the code of the task to cancel , like a threading.Event()
import asyncio
import functools
import multiprocessing
from concurrent.futures.thread import ThreadPoolExecutor
class SingletonTimeOut:
pool = None
#classmethod
def run(cls, to_run: functools.partial, timeout: float):
pool = cls.get_pool()
loop = cls.get_loop()
try:
task = loop.run_in_executor(pool, to_run)
return loop.run_until_complete(asyncio.wait_for(task, timeout=timeout))
except asyncio.TimeoutError as e:
error_type = type(e).__name__ #TODO
raise e
#classmethod
def get_pool(cls):
if cls.pool is None:
cls.pool = ThreadPoolExecutor(multiprocessing.cpu_count())
return cls.pool
#classmethod
def get_loop(cls):
try:
return asyncio.get_event_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
# print("NEW LOOP" + str(threading.current_thread().ident))
return asyncio.get_event_loop()
# ---------------
TIME_OUT = float('0.2') # seconds
def toto(input_items,nb_predictions):
return 1
to_run = functools.partial(toto,
input_items=1,
nb_predictions="a")
results = SingletonTimeOut.run(to_run, TIME_OUT)
#!/usr/bin/python2
import sys, subprocess, threading
proc = subprocess.Popen(sys.argv[2:])
timer = threading.Timer(float(sys.argv[1]), proc.terminate)
timer.start()
proc.wait()
timer.cancel()
exit(proc.returncode)
The func_timeout package by Tim Savannah has worked well for me.
Installation:
pip install func_timeout
Usage:
import time
from func_timeout import func_timeout, FunctionTimedOut
def my_func(n):
time.sleep(n)
time_to_sleep = 10
# time out after 2 seconds using kwargs
func_timeout(2, my_func, kwargs={'n' : time_to_sleep})
# time out after 2 seconds using args
func_timeout(2, my_func, args=(time_to_sleep,))
I had a need for nestable timed interrupts (which SIGALARM can't do) that won't get blocked by time.sleep (which the thread-based approach can't do). I ended up copying and lightly modifying code from here: http://code.activestate.com/recipes/577600-queue-for-managing-multiple-sigalrm-alarms-concurr/
The code itself:
#!/usr/bin/python
# lightly modified version of http://code.activestate.com/recipes/577600-queue-for-managing-multiple-sigalrm-alarms-concurr/
"""alarm.py: Permits multiple SIGALRM events to be queued.
Uses a `heapq` to store the objects to be called when an alarm signal is
raised, so that the next alarm is always at the top of the heap.
"""
import heapq
import signal
from time import time
__version__ = '$Revision: 2539 $'.split()[1]
alarmlist = []
__new_alarm = lambda t, f, a, k: (t + time(), f, a, k)
__next_alarm = lambda: int(round(alarmlist[0][0] - time())) if alarmlist else None
__set_alarm = lambda: signal.alarm(max(__next_alarm(), 1))
class TimeoutError(Exception):
def __init__(self, message, id_=None):
self.message = message
self.id_ = id_
class Timeout:
''' id_ allows for nested timeouts. '''
def __init__(self, id_=None, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
self.id_ = id_
def handle_timeout(self):
raise TimeoutError(self.error_message, self.id_)
def __enter__(self):
self.this_alarm = alarm(self.seconds, self.handle_timeout)
def __exit__(self, type, value, traceback):
try:
cancel(self.this_alarm)
except ValueError:
pass
def __clear_alarm():
"""Clear an existing alarm.
If the alarm signal was set to a callable other than our own, queue the
previous alarm settings.
"""
oldsec = signal.alarm(0)
oldfunc = signal.signal(signal.SIGALRM, __alarm_handler)
if oldsec > 0 and oldfunc != __alarm_handler:
heapq.heappush(alarmlist, (__new_alarm(oldsec, oldfunc, [], {})))
def __alarm_handler(*zargs):
"""Handle an alarm by calling any due heap entries and resetting the alarm.
Note that multiple heap entries might get called, especially if calling an
entry takes a lot of time.
"""
try:
nextt = __next_alarm()
while nextt is not None and nextt <= 0:
(tm, func, args, keys) = heapq.heappop(alarmlist)
func(*args, **keys)
nextt = __next_alarm()
finally:
if alarmlist: __set_alarm()
def alarm(sec, func, *args, **keys):
"""Set an alarm.
When the alarm is raised in `sec` seconds, the handler will call `func`,
passing `args` and `keys`. Return the heap entry (which is just a big
tuple), so that it can be cancelled by calling `cancel()`.
"""
__clear_alarm()
try:
newalarm = __new_alarm(sec, func, args, keys)
heapq.heappush(alarmlist, newalarm)
return newalarm
finally:
__set_alarm()
def cancel(alarm):
"""Cancel an alarm by passing the heap entry returned by `alarm()`.
It is an error to try to cancel an alarm which has already occurred.
"""
__clear_alarm()
try:
alarmlist.remove(alarm)
heapq.heapify(alarmlist)
finally:
if alarmlist: __set_alarm()
and a usage example:
import alarm
from time import sleep
try:
with alarm.Timeout(id_='a', seconds=5):
try:
with alarm.Timeout(id_='b', seconds=2):
sleep(3)
except alarm.TimeoutError as e:
print 'raised', e.id_
sleep(30)
except alarm.TimeoutError as e:
print 'raised', e.id_
else:
print 'nope.'
I have face the same problem but my situation is need work on sub thread, signal didn't work for me, so I wrote a python package: timeout-timer to solve this problem, support for use as context or decorator, use signal or sub thread module to trigger a timeout interrupt:
from timeout_timer import timeout, TimeoutInterrupt
class TimeoutInterruptNested(TimeoutInterrupt):
pass
def test_timeout_nested_loop_both_timeout(timer="thread"):
cnt = 0
try:
with timeout(5, timer=timer):
try:
with timeout(2, timer=timer, exception=TimeoutInterruptNested):
sleep(2)
except TimeoutInterruptNested:
cnt += 1
time.sleep(10)
except TimeoutInterrupt:
cnt += 1
assert cnt == 2
see more: https://github.com/dozysun/timeout-timer
Here is a simple example running one method with timeout and also retriev its value if successfull.
import multiprocessing
import time
ret = {"foo": False}
def worker(queue):
"""worker function"""
ret = queue.get()
time.sleep(1)
ret["foo"] = True
queue.put(ret)
if __name__ == "__main__":
queue = multiprocessing.Queue()
queue.put(ret)
p = multiprocessing.Process(target=worker, args=(queue,))
p.start()
p.join(timeout=10)
if p.exitcode is None:
print("The worker timed out.")
else:
print(f"The worker completed and returned: {queue.get()}")
Here is a slight improvement to the given thread-based solution.
The code below supports exceptions:
def runFunctionCatchExceptions(func, *args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception, message:
return ["exception", message]
return ["RESULT", result]
def runFunctionWithTimeout(func, args=(), kwargs={}, timeout_duration=10, default=None):
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
self.result = runFunctionCatchExceptions(func, *args, **kwargs)
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return default
if it.result[0] == "exception":
raise it.result[1]
return it.result[1]
Invoking it with a 5 second timeout:
result = timeout(remote_calculate, (myarg,), timeout_duration=5)
Here is a POSIX version that combines many of the previous answers to deliver following features:
Subprocesses blocking the execution.
Usage of the timeout function on class member functions.
Strict requirement on time-to-terminate.
Here is the code and some test cases:
import threading
import signal
import os
import time
class TerminateExecution(Exception):
"""
Exception to indicate that execution has exceeded the preset running time.
"""
def quit_function(pid):
# Killing all subprocesses
os.setpgrp()
os.killpg(0, signal.SIGTERM)
# Killing the main thread
os.kill(pid, signal.SIGTERM)
def handle_term(signum, frame):
raise TerminateExecution()
def invoke_with_timeout(timeout, fn, *args, **kwargs):
# Setting a sigterm handler and initiating a timer
old_handler = signal.signal(signal.SIGTERM, handle_term)
timer = threading.Timer(timeout, quit_function, args=[os.getpid()])
terminate = False
# Executing the function
timer.start()
try:
result = fn(*args, **kwargs)
except TerminateExecution:
terminate = True
finally:
# Restoring original handler and cancel timer
signal.signal(signal.SIGTERM, old_handler)
timer.cancel()
if terminate:
raise BaseException("xxx")
return result
### Test cases
def countdown(n):
print('countdown started', flush=True)
for i in range(n, -1, -1):
print(i, end=', ', flush=True)
time.sleep(1)
print('countdown finished')
return 1337
def really_long_function():
time.sleep(10)
def really_long_function2():
os.system("sleep 787")
# Checking that we can run a function as expected.
assert invoke_with_timeout(3, countdown, 1) == 1337
# Testing various scenarios
t1 = time.time()
try:
print(invoke_with_timeout(1, countdown, 3))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
t1 = time.time()
try:
print(invoke_with_timeout(1, really_long_function2))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
t1 = time.time()
try:
print(invoke_with_timeout(1, really_long_function))
assert(False)
except BaseException:
assert(time.time() - t1 < 1.1)
print("All good", time.time() - t1)
# Checking that classes are referenced and not
# copied (as would be the case with multiprocessing)
class X:
def __init__(self):
self.value = 0
def set(self, v):
self.value = v
x = X()
invoke_with_timeout(2, x.set, 9)
assert x.value == 9
I intend to kill the process if job not done , using thread and process both to achieve this.
from concurrent.futures import ThreadPoolExecutor
from time import sleep
import multiprocessing
# test case 1
def worker_1(a,b,c):
for _ in range(2):
print('very time consuming sleep')
sleep(1)
return a+b+c
# test case 2
def worker_2(in_name):
for _ in range(10):
print('very time consuming sleep')
sleep(1)
return 'hello '+in_name
Actual class as a contextmanager
class FuncTimer():
def __init__(self,fn,args,runtime):
self.fn = fn
self.args = args
self.queue = multiprocessing.Queue()
self.runtime = runtime
self.process = multiprocessing.Process(target=self.thread_caller)
def thread_caller(self):
with ThreadPoolExecutor() as executor:
future = executor.submit(self.fn, *self.args)
self.queue.put(future.result())
def __enter__(self):
return self
def start_run(self):
self.process.start()
self.process.join(timeout=self.runtime)
if self.process.exitcode is None:
self.process.kill()
if self.process.exitcode is None:
out_res = None
print('killed premature')
else:
out_res = self.queue.get()
return out_res
def __exit__(self, exc_type, exc_value, exc_traceback):
self.process.kill()
How to use it
print('testing case 1')
with FuncTimer(fn=worker_1,args=(1,2,3),runtime = 5) as fp:
res = fp.start_run()
print(res)
print('testing case 2')
with FuncTimer(fn=worker_2,args=('ram',),runtime = 5) as fp:
res = fp.start_run()
print(res)

Categories

Resources