Python timeout context manager with threads - python

I have timeout context manager that works perfectly with signals but it raises error in multithread mode because signals work only in main thread.
def timeout_handler(signum, frame):
raise TimeoutException()
#contextmanager
def timeout(seconds):
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
I've seen decorator implementation of timeout but I don't know how to pass yield inside class derived from threading.Thread. My variant won't work.
#contextmanager
def timelimit(seconds):
class FuncThread(threading.Thread):
def run(self):
yield
it = FuncThread()
it.start()
it.join(seconds)
if it.isAlive():
raise TimeoutException()

If the code guarded by the context manager is loop-based, consider handling this the way people handle thread killing. Killing another thread is generally unsafe, so the standard approach is to have the controlling thread set a flag that's visible to the worker thread. The worker thread periodically checks that flag and cleanly shuts itself down. Here's how you can do something analogous with timeouts:
class timeout(object):
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
#property
def timed_out(self):
return time.time() > self.die_after
Here's a single-threaded usage example:
with timeout(1) as t:
while True: # this will take a long time without a timeout
# periodically check for timeouts
if t.timed_out:
break # or raise an exception
# do some "useful" work
print "."
time.sleep(0.2)
and a multithreaded one:
import thread
def print_for_n_secs(string, seconds):
with timeout(seconds) as t:
while True:
if t.timed_out:
break # or raise an exception
print string,
time.sleep(0.5)
for i in xrange(5):
thread.start_new_thread(print_for_n_secs,
('thread%d' % (i,), 2))
time.sleep(0.25)
This approach is more intrusive than using signals but it works for arbitrary threads.

I cannot see a way of doing what you are proposing with a context manager, you cannot yield the flow from one thread to another.
What I would do is wrap your function with an interrutable thread with the timeout. Here is a recipe for that.
You will have an extra thread and the syntax won't be as nice but it would work.

I know it's late but I'm only just reading this, but what about creating your own signaller/context manager? I'm new to python would love feedback from experienced devs this implementation.
This is based off of the answer from "Mr Fooz"
class TimeoutSignaller(Thread):
def __init__(self, limit, handler):
Thread.__init__(self)
self.limit = limit
self.running = True
self.handler = handler
assert callable(handler), "Timeout Handler needs to be a method"
def run(self):
timeout_limit = datetime.datetime.now() + datetime.timedelta(seconds=self.limit)
while self.running:
if datetime.datetime.now() >= timeout_limit:
self.handler()
self.stop_run()
break
def stop_run(self):
self.running = False
class ProcessContextManager:
def __init__(self, process, seconds=0, minutes=0, hours=0):
self.seconds = (hours * 3600) + (minutes * 60) + seconds
self.process = process
self.signal = TimeoutSignaller(self.seconds, self.signal_handler)
def __enter__(self):
self.signal.start()
return self.process
def __exit__(self, exc_type, exc_val, exc_tb):
self.signal.stop_run()
def signal_handler(self):
# Make process terminate however you like
# using self.process reference
raise TimeoutError("Process took too long to execute")
Use case:
with ProcessContextManager(my_proc) as p:
# do stuff e.g.
p.execute()

Similar implementation as Mr Fooz but using the contextlib library:
import time
from contextlib import contextmanager
#contextmanager
def timeout(seconds):
"""
A simple context manager to enable timeouts.
Example:
with timeout(5) as t:
while True:
if t():
# handle
"""
stop = time.time() + seconds
def timed_out():
return time.time() > stop
yield timed_out

Timeouts for system calls are done with signals. Most blocking system calls return with EINTR when a signal happens, so you can use alarm to implement timeouts.
Here's a context manager that works with most system calls, causing IOError to be raised from a blocking system call if it takes too long.
import signal, errno
from contextlib import contextmanager
import fcntl
#contextmanager
def timeout(seconds):
def timeout_handler(signum, frame):
pass
original_handler = signal.signal(signal.SIGALRM, timeout_handler)
try:
signal.alarm(seconds)
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, original_handler)
with timeout(1):
f = open("test.lck", "w")
try:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
except IOError, e:
if e.errno != errno.EINTR:
raise e
print "Lock timed out"

Related

How to wrap a stuck function in a timer block? [duplicate]

There is a socket related function call in my code, that function is from another module thus out of my control, the problem is that it blocks for hours occasionally, which is totally unacceptable, How can I limit the function execution time from my code? I guess the solution must utilize another thread.
An improvement on #rik.the.vik's answer would be to use the with statement to give the timeout function some syntactic sugar:
import signal
from contextlib import contextmanager
class TimeoutException(Exception): pass
#contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
try:
with time_limit(10):
long_function_call()
except TimeoutException as e:
print("Timed out!")
I'm not sure how cross-platform this might be, but using signals and alarm might be a good way of looking at this. With a little work you could make this completely generic as well and usable in any situation.
http://docs.python.org/library/signal.html
So your code is going to look something like this.
import signal
def signal_handler(signum, frame):
raise Exception("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(10) # Ten seconds
try:
long_function_call()
except Exception, msg:
print "Timed out!"
Here's a Linux/OSX way to limit a function's running time. This is in case you don't want to use threads, and want your program to wait until the function ends, or the time limit expires.
from multiprocessing import Process
from time import sleep
def f(time):
sleep(time)
def run_with_limited_time(func, args, kwargs, time):
"""Runs a function with time limit
:param func: The function to run
:param args: The functions args, given as tuple
:param kwargs: The functions keywords, given as dict
:param time: The time limit in seconds
:return: True if the function ended successfully. False if it was terminated.
"""
p = Process(target=func, args=args, kwargs=kwargs)
p.start()
p.join(time)
if p.is_alive():
p.terminate()
return False
return True
if __name__ == '__main__':
print run_with_limited_time(f, (1.5, ), {}, 2.5) # True
print run_with_limited_time(f, (3.5, ), {}, 2.5) # False
I prefer a context manager approach because it allows the execution of multiple python statements within a with time_limit statement. Because windows system does not have SIGALARM, a more portable and perhaps more straightforward method could be using a Timer
from contextlib import contextmanager
import threading
import _thread
class TimeoutException(Exception):
def __init__(self, msg=''):
self.msg = msg
#contextmanager
def time_limit(seconds, msg=''):
timer = threading.Timer(seconds, lambda: _thread.interrupt_main())
timer.start()
try:
yield
except KeyboardInterrupt:
raise TimeoutException("Timed out for operation {}".format(msg))
finally:
# if the action ends in specified time, timer is canceled
timer.cancel()
import time
# ends after 5 seconds
with time_limit(5, 'sleep'):
for i in range(10):
time.sleep(1)
# this will actually end after 10 seconds
with time_limit(5, 'sleep'):
time.sleep(10)
The key technique here is the use of _thread.interrupt_main to interrupt the main thread from the timer thread. One caveat is that the main thread does not always respond to the KeyboardInterrupt raised by the Timer quickly. For example, time.sleep() calls a system function so a KeyboardInterrupt will be handled after the sleep call.
Here: a simple way of getting the desired effect:
https://pypi.org/project/func-timeout
This saved my life.
And now an example on how it works: lets say you have a huge list of items to be processed and you are iterating your function over those items. However, for some strange reason, your function get stuck on item n, without raising an exception. You need to other items to be processed, the more the better. In this case, you can set a timeout for processing each item:
import time
import func_timeout
def my_function(n):
"""Sleep for n seconds and return n squared."""
print(f'Processing {n}')
time.sleep(n)
return n**2
def main_controller(max_wait_time, all_data):
"""
Feed my_function with a list of itens to process (all_data).
However, if max_wait_time is exceeded, return the item and a fail info.
"""
res = []
for data in all_data:
try:
my_square = func_timeout.func_timeout(
max_wait_time, my_function, args=[data]
)
res.append((my_square, 'processed'))
except func_timeout.FunctionTimedOut:
print('error')
res.append((data, 'fail'))
continue
return res
timeout_time = 2.1 # my time limit
all_data = range(1, 10) # the data to be processed
res = main_controller(timeout_time, all_data)
print(res)
Doing this from within a signal handler is dangerous: you might be inside an exception handler at the time the exception is raised, and leave things in a broken state. For example,
def function_with_enforced_timeout():
f = open_temporary_file()
try:
...
finally:
here()
unlink(f.filename)
If your exception is raised here(), the temporary file will never be deleted.
The solution here is for asynchronous exceptions to be postponed until the code is not inside exception-handling code (an except or finally block), but Python doesn't do that.
Note that this won't interrupt anything while executing native code; it'll only interrupt it when the function returns, so this may not help this particular case. (SIGALRM itself might interrupt the call that's blocking--but socket code typically simply retries after an EINTR.)
Doing this with threads is a better idea, since it's more portable than signals. Since you're starting a worker thread and blocking until it finishes, there are none of the usual concurrency worries. Unfortunately, there's no way to deliver an exception asynchronously to another thread in Python (other thread APIs can do this). It'll also have the same issue with sending an exception during an exception handler, and require the same fix.
You don't have to use threads. You can use another process to do the blocking work, for instance, maybe using the subprocess module. If you want to share data structures between different parts of your program then Twisted is a great library for giving yourself control of this, and I'd recommend it if you care about blocking and expect to have this trouble a lot. The bad news with Twisted is you have to rewrite your code to avoid any blocking, and there is a fair learning curve.
You can use threads to avoid blocking, but I'd regard this as a last resort, since it exposes you to a whole world of pain. Read a good book on concurrency before even thinking about using threads in production, e.g. Jean Bacon's "Concurrent Systems". I work with a bunch of people who do really cool high performance stuff with threads, and we don't introduce threads into projects unless we really need them.
The only "safe" way to do this, in any language, is to use a secondary process to do that timeout-thing, otherwise you need to build your code in such a way that it will time out safely by itself, for instance by checking the time elapsed in a loop or similar. If changing the method isn't an option, a thread will not suffice.
Why? Because you're risking leaving things in a bad state when you do. If the thread is simply killed mid-method, locks being held, etc. will just be held, and cannot be released.
So look at the process way, do not look at the thread way.
I would usually prefer using a contextmanager as suggested by #josh-lee
But in case someone is interested in having this implemented as a decorator, here's an alternative.
Here's how it would look like:
import time
from timeout import timeout
class Test(object):
#timeout(2)
def test_a(self, foo, bar):
print foo
time.sleep(1)
print bar
return 'A Done'
#timeout(2)
def test_b(self, foo, bar):
print foo
time.sleep(3)
print bar
return 'B Done'
t = Test()
print t.test_a('python', 'rocks')
print t.test_b('timing', 'out')
And this is the timeout.py module:
import threading
class TimeoutError(Exception):
pass
class InterruptableThread(threading.Thread):
def __init__(self, func, *args, **kwargs):
threading.Thread.__init__(self)
self._func = func
self._args = args
self._kwargs = kwargs
self._result = None
def run(self):
self._result = self._func(*self._args, **self._kwargs)
#property
def result(self):
return self._result
class timeout(object):
def __init__(self, sec):
self._sec = sec
def __call__(self, f):
def wrapped_f(*args, **kwargs):
it = InterruptableThread(f, *args, **kwargs)
it.start()
it.join(self._sec)
if not it.is_alive():
return it.result
raise TimeoutError('execution expired')
return wrapped_f
The output:
python
rocks
A Done
timing
Traceback (most recent call last):
...
timeout.TimeoutError: execution expired
out
Notice that even if the TimeoutError is thrown, the decorated method will continue to run in a different thread. If you would also want this thread to be "stopped" see: Is there any way to kill a Thread in Python?
Using simple decorator
Here's the version I made after studying above answers. Pretty straight forward.
def function_timeout(seconds: int):
"""Wrapper of Decorator to pass arguments"""
def decorator(func):
#contextmanager
def time_limit(seconds_):
def signal_handler(signum, frame): # noqa
raise TimeoutException(f"Timed out in {seconds_} seconds!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds_)
try:
yield
finally:
signal.alarm(0)
#wraps(func)
def wrapper(*args, **kwargs):
with time_limit(seconds):
return func(*args, **kwargs)
return wrapper
return decorator
How to use?
#function_timeout(seconds=5)
def my_naughty_function():
while True:
print("Try to stop me ;-p")
Well of course, don't forget to import the function if it is in a separate file.
Here's a timeout function I think I found via google and it works for me.
From:
http://code.activestate.com/recipes/473878/
def timeout(func, args=(), kwargs={}, timeout_duration=1, default=None):
'''This function will spwan a thread and run the given function using the args, kwargs and
return the given default value if the timeout_duration is exceeded
'''
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
try:
self.result = func(*args, **kwargs)
except:
self.result = default
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return it.result
else:
return it.result
The method from #user2283347 is tested working, but we want to get rid of the traceback messages. Use pass trick from Remove traceback in Python on Ctrl-C, the modified code is:
from contextlib import contextmanager
import threading
import _thread
class TimeoutException(Exception): pass
#contextmanager
def time_limit(seconds):
timer = threading.Timer(seconds, lambda: _thread.interrupt_main())
timer.start()
try:
yield
except KeyboardInterrupt:
pass
finally:
# if the action ends in specified time, timer is canceled
timer.cancel()
def timeout_svm_score(i):
#from sklearn import svm
#import numpy as np
#from IPython.core.display import display
#%store -r names X Y
clf = svm.SVC(kernel='linear', C=1).fit(np.nan_to_num(X[[names[i]]]), Y)
score = clf.score(np.nan_to_num(X[[names[i]]]),Y)
#scoressvm.append((score, names[i]))
display((score, names[i]))
%%time
with time_limit(5):
i=0
timeout_svm_score(i)
#Wall time: 14.2 s
%%time
with time_limit(20):
i=0
timeout_svm_score(i)
#(0.04541284403669725, '计划飞行时间')
#Wall time: 16.1 s
%%time
with time_limit(5):
i=14
timeout_svm_score(i)
#Wall time: 5h 43min 41s
We can see that this method may need far long time to interrupt the calculation, we asked for 5 seconds, but it work out in 5 hours.
This code works for Windows Server Datacenter 2016 with python 3.7.3 and I didn't tested on Unix, after mixing some answers from Google and StackOverflow, it finally worked for me like this:
from multiprocessing import Process, Lock
import time
import os
def f(lock,id,sleepTime):
lock.acquire()
print("I'm P"+str(id)+" Process ID: "+str(os.getpid()))
lock.release()
time.sleep(sleepTime) #sleeps for some time
print("Process: "+str(id)+" took this much time:"+str(sleepTime))
time.sleep(sleepTime)
print("Process: "+str(id)+" took this much time:"+str(sleepTime*2))
if __name__ == '__main__':
timeout_function=float(9) # 9 seconds for max function time
print("Main Process ID: "+str(os.getpid()))
lock=Lock()
p1=Process(target=f, args=(lock,1,6,)) #Here you can change from 6 to 3 for instance, so you can watch the behavior
start=time.time()
print(type(start))
p1.start()
if p1.is_alive():
print("process running a")
else:
print("process not running a")
while p1.is_alive():
timeout=time.time()
if timeout-start > timeout_function:
p1.terminate()
print("process terminated")
print("watching, time passed: "+str(timeout-start) )
time.sleep(1)
if p1.is_alive():
print("process running b")
else:
print("process not running b")
p1.join()
if p1.is_alive():
print("process running c")
else:
print("process not running c")
end=time.time()
print("I am the main process, the two processes are done")
print("Time taken:- "+str(end-start)+" secs") #MainProcess terminates at approx ~ 5 secs.
time.sleep(5) # To see if on Task Manager the child process is really being terminated, and it is
print("finishing")
The main code is from this link:
Create two child process using python(windows)
Then I used .terminate() to kill the child process. You can see that the function f calls 2 prints, one after 5 seconds and another after 10 seconds. However, with a 7 seconds sleep and the terminate(), it does not show the last print.
It worked for me, hope it helps!

Timeout from a hanging process (stuck os calls)

I have a python program accessing a NFS-mounted file system. Sometimes, the file system becomes inaccessible and a simple os.stat("/path/to/file") will hang the process. I have tried the following timeout wrapper snippet, but it doesn't seem effective when dealing with "bad" OS-system calls (bad in the sense that it won't return): for example, it works with:
with timeout(seconds=3)
sleep(4)
but it doesn't work:
with timeout(seconds=3)
os.stat("/nfs/mounted/filesystem")
Are there any other ways I can kick myself out of the hanged process?
class timeout:
"""
Usage:
with timeout(seconds=3):
sleep(4)
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
You can use a Watchdog Process
For instance:
with timeout(seconds=3)
watchdog_queue.put( (my_id,timeout=5) )
os.stat("/nfs/mounted/filesystem")
watchdog_queue.put( (my_id,clear) )
If the Watchdog Process get no (my_id,clear) message within 5 seconds than kill the Process/Thread with my_id.
you can try this, it's not optimized but it should work:
i=0
while i<200:
try:
os.stat("/nfs/mounted/filesystem")
i=200
except:
time.sleep(1)

python asyncronous thread exception handling

I'm trying to implement a timeout functionality in Python.
It works by wrapping functions with a function decorator that calls the function as a thread but also calls a 'watchdog' thread that will raise an exception in the function thread after a specified period has elapsed.
It currently works for threads that don't sleep. During the do_rand call, I suspect the 'asynchronous' exception is actually being called after the time.sleep call and after the execution has moved beyond the try/except block, as this would explain the Unhandled exception in thread started by error. Additionally, the error from the do_rand call is generated 7 seconds after the call (the duration of time.sleep).
How would I go about 'waking' a thread up (using ctypes?) to get it to respond to an asynchronous exception ?
Or possibly a different approach altogether ?
Code:
# Import System libraries
import ctypes
import random
import sys
import threading
import time
class TimeoutException(Exception):
pass
def terminate_thread(thread, exc_type = SystemExit):
"""Terminates a python thread from another thread.
:param thread: a threading.Thread instance
"""
if not thread.isAlive():
return
exc = ctypes.py_object(exc_type)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)
if res == 0:
raise ValueError("nonexistent thread id")
elif res > 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
class timeout_thread(threading.Thread):
def __init__(self, interval, target_thread):
super(timeout_thread, self).__init__()
self.interval = interval
self.target_thread = target_thread
self.done_event = threading.Event()
self.done_event.clear()
def run(self):
timeout = not self.done_event.wait(self.interval)
if timeout:
terminate_thread(self.target_thread, TimeoutException)
class timeout_wrapper(object):
def __init__(self, interval = 300):
self.interval = interval
def __call__(self, f):
def wrap_func(*args, **kwargs):
thread = threading.Thread(target = f, args = args, kwargs = kwargs)
thread.setDaemon(True)
timeout_ticker = timeout_thread(self.interval, thread)
timeout_ticker.setDaemon(True)
timeout_ticker.start()
thread.start()
thread.join()
timeout_ticker.done_event.set()
return wrap_func
#timeout_wrapper(2)
def print_guvnah():
try:
while True:
print "guvnah"
except TimeoutException:
print "blimey"
def print_hello():
try:
while True:
print "hello"
except TimeoutException:
print "Whoops, looks like I timed out"
def do_rand(*args):
try:
rand_num = 7 #random.randint(0, 10)
rand_pause = 7 #random.randint(0, 5)
print "Got rand: %d" % rand_num
print "Waiting for %d seconds" % rand_pause
time.sleep(rand_pause)
except TimeoutException:
print "Waited too long"
print_guvnah()
timeout_wrapper(3)(print_hello)()
timeout_wrapper(2)(do_rand)()
The problem is that time.sleep blocks. And it blocks really hard, so the only thing that can actually interrupt it is process signals. But the code with it gets really messy and in some cases even signals don't work ( when for example you are doing blocking socket.recv(), see this: recv() is not interrupted by a signal in multithreaded environment ).
So generally interrupting a thread (without killing entire process) cannot be done (not to mention that someone can simply override your signal handling from a thread).
But in this particular case instead of using time.sleep you can use Event class from threading module:
Thread 1
from threading import Event
ev = Event()
ev.clear()
state = ev.wait(rand_pause) # this blocks until timeout or .set() call
Thread 2 (make sure it has access to the same ev instance)
ev.set() # this will unlock .wait above
Note that state will be the internal state of the event. Thus state == True will mean that it was unlocked with .set() while state == False will mean that timeout occured.
Read more about events here:
http://docs.python.org/2/library/threading.html#event-objects
You'd need to use something other than sleep, or you'd need to send a signal to the other thread in order to make it wake up.
One option I've used is to set up a pair of file descriptors and use select or poll instead of sleep, this lets you write something to the file descriptor to wake up the other thread. Alternatively you just wear waiting until the sleep finishes if all you need is for the operation to error out because it took too long and nothing else is depending on it.

Python: Catch Ctrl-C command. Prompt "really want to quit (y/n)", resume execution if no

I have a program that may have a lengthy execution. In the main module I have the following:
import signal
def run_program()
...time consuming execution...
def Exit_gracefully(signal, frame):
... log exiting information ...
... close any open files ...
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, Exit_gracefully)
run_program()
This works fine, but I'd like the possibility to pause execution upon catching SIGINT, prompting the user if they would really like to quit, and resuming where I left off in run_program() if they decide they don't want to quit.
The only way I can think of doing this is running the program in a separate thread, keeping the main thread waiting on it and ready to catch SIGINT. If the user wants to quit the main thread can do cleanup and kill the child thread.
Is there a simpler way?
The python signal handlers do not seem to be real signal handlers; that is they happen after the fact, in the normal flow and after the C handler has already returned. Thus you'd try to put your quit logic within the signal handler. As the signal handler runs in the main thread, it will block execution there too.
Something like this seems to work nicely.
import signal
import time
import sys
def run_program():
while True:
time.sleep(1)
print("a")
def exit_gracefully(signum, frame):
# restore the original signal handler as otherwise evil things will happen
# in raw_input when CTRL+C is pressed, and our signal handler is not re-entrant
signal.signal(signal.SIGINT, original_sigint)
try:
if raw_input("\nReally quit? (y/n)> ").lower().startswith('y'):
sys.exit(1)
except KeyboardInterrupt:
print("Ok ok, quitting")
sys.exit(1)
# restore the exit gracefully handler here
signal.signal(signal.SIGINT, exit_gracefully)
if __name__ == '__main__':
# store the original SIGINT handler
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, exit_gracefully)
run_program()
The code restores the original signal handler for the duration of raw_input; raw_input itself is not re-entrable, and re-entering it
will lead to RuntimeError: can't re-enter readline being raised from time.sleep which is something we don't want as it is harder to catch than KeyboardInterrupt. Rather, we let 2 consecutive Ctrl-C's to raise KeyboardInterrupt.
from https://gist.github.com/rtfpessoa/e3b1fe0bbfcd8ac853bf
#!/usr/bin/env python
import signal
import sys
def signal_handler(signal, frame):
# your code here
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
Bye!
when procedure end then do something
suppose you just want to the procedure will do something after the task end
import time
class TestTask:
def __init__(self, msg: str):
self.msg = msg
def __enter__(self):
print(f'Task Start!:{self.msg}')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print('Task End!')
#staticmethod
def do_something():
try:
time.sleep(5)
except:
pass
with TestTask('Hello World') as task:
task.do_something()
when the process leaves with that will run __exit__ even with KeyboardInterrupt happen that are same.
if you don't like to see the error, add try ... except ...
#staticmethod
def do_something():
try:
time.sleep(5)
except:
pass
pause, continue, reset, and etc.
I don't have a perfect solution, but it may be useful to you.
It's means divided your process to many subprocesses and save it that finished.it will not be executed again since you find it already done.
import time
from enum import Enum
class Action(Enum):
EXIT = 0
CONTINUE = 1
RESET = 2
class TestTask:
def __init__(self, msg: str):
self.msg = msg
def __enter__(self):
print(f'Task Start!:{self.msg}')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print('Task End!')
def do_something(self):
tuple_job = (self._foo, self._bar) # implement by yourself
list_job_state = [0] * len(tuple_job)
dict_keep = {} # If there is a need to communicate between jobs, and you don’t want to use class members, you can use this method.
while 1:
try:
for idx, cur_process in enumerate(tuple_job):
if not list_job_state[idx]:
cur_process(dict_keep)
list_job_state[idx] = True
if all(list_job_state):
print('100%')
break
except KeyboardInterrupt:
print('KeyboardInterrupt. input action:')
msg = '\n\t'.join([f"{action + ':':<10}{str(act_number)}" for act_number, action in
enumerate([name for name in vars(Action) if not name.startswith('_')])
])
case = Action(int(input(f'\t{msg}\n:')))
if case == Action.EXIT:
break
if case == Action.RESET:
list_job_state = [0] * len(tuple_job)
#staticmethod
def _foo(keep_dict: dict) -> bool: # implement by yourself
time.sleep(2)
print('1%')
print('2%')
print('...')
print('60%')
keep_dict['status_1'] = 'status_1'
return True
#staticmethod
def _bar(keep_dict: dict) -> bool: # implement by yourself
time.sleep(2)
print('61%')
print(keep_dict.get('status_1'))
print('...')
print('99%')
return True
with TestTask('Hello World') as task:
task.do_something()
console
input action number:2
Task Start!:Hello World
1%
2%
...
60%
KeyboardInterrupt. input action:
EXIT: 0
CONTINUE: 1
RESET: 2
:1
61%
status_1
...
99%
100%
Task End!

How do I capture SIGINT in Python?

I'm working on a python script that starts several processes and database connections. Every now and then I want to kill the script with a Ctrl+C signal, and I'd like to do some cleanup.
In Perl I'd do this:
$SIG{'INT'} = 'exit_gracefully';
sub exit_gracefully {
print "Caught ^C \n";
exit (0);
}
How do I do the analogue of this in Python?
Register your handler with signal.signal like this:
#!/usr/bin/env python
import signal
import sys
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
print('Press Ctrl+C')
signal.pause()
Code adapted from here.
More documentation on signal can be found here.
 
You can treat it like an exception (KeyboardInterrupt), like any other. Make a new file and run it from your shell with the following contents to see what I mean:
import time, sys
x = 1
while True:
try:
print x
time.sleep(.3)
x += 1
except KeyboardInterrupt:
print "Bye"
sys.exit()
And as a context manager:
import signal
class GracefulInterruptHandler(object):
def __init__(self, sig=signal.SIGINT):
self.sig = sig
def __enter__(self):
self.interrupted = False
self.released = False
self.original_handler = signal.getsignal(self.sig)
def handler(signum, frame):
self.release()
self.interrupted = True
signal.signal(self.sig, handler)
return self
def __exit__(self, type, value, tb):
self.release()
def release(self):
if self.released:
return False
signal.signal(self.sig, self.original_handler)
self.released = True
return True
To use:
with GracefulInterruptHandler() as h:
for i in xrange(1000):
print "..."
time.sleep(1)
if h.interrupted:
print "interrupted!"
time.sleep(2)
break
Nested handlers:
with GracefulInterruptHandler() as h1:
while True:
print "(1)..."
time.sleep(1)
with GracefulInterruptHandler() as h2:
while True:
print "\t(2)..."
time.sleep(1)
if h2.interrupted:
print "\t(2) interrupted!"
time.sleep(2)
break
if h1.interrupted:
print "(1) interrupted!"
time.sleep(2)
break
From here: https://gist.github.com/2907502
You can handle CTRL+C by catching the KeyboardInterrupt exception. You can implement any clean-up code in the exception handler.
From Python's documentation:
import signal
import time
def handler(signum, frame):
print 'Here you go'
signal.signal(signal.SIGINT, handler)
time.sleep(10) # Press Ctrl+c here
Yet Another Snippet
Referred main as the main function and exit_gracefully as the Ctrl+C handler
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
exit_gracefully()
I adapted the code from #udi to support multiple signals (nothing fancy) :
class GracefulInterruptHandler(object):
def __init__(self, signals=(signal.SIGINT, signal.SIGTERM)):
self.signals = signals
self.original_handlers = {}
def __enter__(self):
self.interrupted = False
self.released = False
for sig in self.signals:
self.original_handlers[sig] = signal.getsignal(sig)
signal.signal(sig, self.handler)
return self
def handler(self, signum, frame):
self.release()
self.interrupted = True
def __exit__(self, type, value, tb):
self.release()
def release(self):
if self.released:
return False
for sig in self.signals:
signal.signal(sig, self.original_handlers[sig])
self.released = True
return True
This code support the keyboard interrupt call (SIGINT) and the SIGTERM (kill <process>)
In contrast to Matt J his answer, I use a simple object. This gives me the possibily to parse this handler to all the threads that needs to be stopped securlery.
class SIGINT_handler():
def __init__(self):
self.SIGINT = False
def signal_handler(self, signal, frame):
print('You pressed Ctrl+C!')
self.SIGINT = True
handler = SIGINT_handler()
signal.signal(signal.SIGINT, handler.signal_handler)
Elsewhere
while True:
# task
if handler.SIGINT:
break
If you want to ensure that your cleanup process finishes I would add on to Matt J's answer by using a SIG_IGN so that further SIGINT are ignored which will prevent your cleanup from being interrupted.
import signal
import sys
def signal_handler(signum, frame):
signal.signal(signum, signal.SIG_IGN) # ignore additional signals
cleanup() # give your process a chance to clean up
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler) # register the signal with the signal handler first
do_stuff()
You can use the functions in Python's built-in signal module to set up signal handlers in python. Specifically the signal.signal(signalnum, handler) function is used to register the handler function for signal signalnum.
thanks for existing answers, but added signal.getsignal()
import signal
# store default handler of signal.SIGINT
default_handler = signal.getsignal(signal.SIGINT)
catch_count = 0
def handler(signum, frame):
global default_handler, catch_count
catch_count += 1
print ('wait:', catch_count)
if catch_count > 3:
# recover handler for signal.SIGINT
signal.signal(signal.SIGINT, default_handler)
print('expecting KeyboardInterrupt')
signal.signal(signal.SIGINT, handler)
print('Press Ctrl+c here')
while True:
pass
Personally, I couldn't use try/except KeyboardInterrupt because I was using standard socket (IPC) mode which is blocking. So the SIGINT was cueued, but came only after receiving data on the socket.
Setting a signal handler behaves the same.
On the other hand, this only works for an actual terminal. Other starting environments might not accept Ctrl+C, or pre-handle the signal.
Also, there are "Exceptions" and "BaseExceptions" in Python, which differ in the sense that interpreter needs to exit cleanly itself, so some exceptions have a higher priority than others (Exceptions is derived from BaseException)

Categories

Resources