When I run the following code on OSX or Linux and then press ctrl+c a "graceful shutdown" is initiated. Which looks something like this:
$ python subprocess_test.py
Subprocess: <MyProcess(MyProcess-1, started)>
^CMain: Graceful shutdown
Subprocess: shutdown
However, when I run the some code on a Windows10 machine a KeyboardInterrupt is raised in line self.event.wait() preventing a graceful shutdown. I have tried different approaches as described here to prevent that the subprocess is receiving the signal.
What is the correct way to to get the same behavior on the different OS using Python 2.7?
import multiprocessing
import signal
class MyProcess(multiprocessing.Process):
def __init__(self):
super(MyProcess, self).__init__()
self.event = multiprocessing.Event()
def run(self):
print "Subprocess: ", multiprocessing.current_process()
self.event.wait()
print "Subprocess: shutdown"
def sighandler(a,b,):
print "Main: Graceful shutdown"
p1.event.set()
def run():
signal.signal(signal.SIGINT, signal.SIG_IGN)
global p1
p1 = MyProcess()
p1.start()
signal.signal(signal.SIGINT, sighandler)
p1.join()
if __name__ == '__main__':
run()
Using win32api.SetConsoleCtrlHandler from pywin32 one can control how Windows the interrupts. Using SetConsoleCtrlHandler(None, True) causes the calling process to ignore CTRL+C input. With SetConsoleCtrlHandler(sighandler, True) a specific handler can be registered.
Putting it all together the issue is addressed like this:
import multiprocessing
import signal
import sys
class MyProcess(multiprocessing.Process):
def __init__(self):
super(MyProcess, self).__init__()
self.event = multiprocessing.Event()
def run(self):
if sys.platform == "win32":
import win32api # ignoring the signal
win32api.SetConsoleCtrlHandler(None, True)
print "Subprocess: ", multiprocessing.current_process()
self.event.wait()
print "Subprocess: shutdown"
def sighandler(a,b=None):
print "Main: Graceful shutdown"
p1.event.set()
def run():
signal.signal(signal.SIGINT, signal.SIG_IGN)
global p1
p1 = MyProcess()
p1.start()
if sys.platform == "win32":
import win32api
win32api.SetConsoleCtrlHandler(sighandler, True)
else:
signal.signal(signal.SIGINT, sighandler)
p1.join()
if __name__ == '__main__':
run()
On Windows SIGINT is implemented using a console control event handler for CTRL_C_EVENT. It's the console state that gets inherited by a child process, not the CRT's signal handling state. Thus you need to first call SetConsoleCtrlHandler to ignore Ctrl+C in the parent process before creating a child process if you want the child to ignore Ctrl+C.
There's a catch. Python doesn't use alertable waits on Windows, such as the wait in the process join method. Since it dispatches signal handlers on the main thread, the fact that the main thread is blocked in join() means your signal handler will never be called. You have to replace the join with a loop on time.sleep(), which is interruptible by Ctrl+C because internally it waits on a Windows Event and sets its own control handler that sets this Event. Or you can instead use your own asynchronous control handler set via ctypes. The following example implements both approaches and works in both Python 2 and 3.
import sys
import signal
import multiprocessing
if sys.platform == "win32":
# Handle Ctrl+C in the Windows Console
import time
import errno
import ctypes
import threading
from ctypes import wintypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
PHANDLER_ROUTINE = ctypes.WINFUNCTYPE(
wintypes.BOOL,
wintypes.DWORD) # _In_ dwCtrlType
win_ignore_ctrl_c = PHANDLER_ROUTINE() # alias for NULL handler
def _errcheck_bool(result, func, args):
if not result:
raise ctypes.WinError(ctypes.get_last_error())
return args
kernel32.SetConsoleCtrlHandler.errcheck = _errcheck_bool
kernel32.SetConsoleCtrlHandler.argtypes = (
PHANDLER_ROUTINE, # _In_opt_ HandlerRoutine
wintypes.BOOL) # _In_ Add
class MyProcess(multiprocessing.Process):
def __init__(self):
super(MyProcess, self).__init__()
self.event = multiprocessing.Event()
def run(self):
print("Subprocess: %r" % multiprocessing.current_process())
self.event.wait()
print("Subprocess: shutdown")
if sys.platform == "win32":
def join(self, timeout=None):
if threading.current_thread().name != "MainThread":
super(MyProcess, self).join(timeout)
else:
# use time.sleep to allow the main thread to
# interruptible by Ctrl+C
interval = 1
remaining = timeout
while self.is_alive():
if timeout is not None:
if remaining <= 0:
break
if remaining < interval:
interval = remaining
remaining = 0
else:
remaining -= interval
try:
time.sleep(interval)
except IOError as e:
if e.errno != errno.EINTR:
raise
break
def run():
p1 = MyProcess()
# Ignore Ctrl+C, which is inherited by the child process.
if sys.platform == "win32":
kernel32.SetConsoleCtrlHandler(win_ignore_ctrl_c, True)
signal.signal(signal.SIGINT, signal.SIG_IGN)
p1.start()
# Set a Ctrl+C handler to signal graceful shutdown.
if sys.platform == "win32":
kernel32.SetConsoleCtrlHandler(win_ignore_ctrl_c, False)
# comment out the following to rely on sig_handler
# instead. Note that using the normal sig_handler requires
# joining using a loop on time.sleep() instead of the
# normal process join method. See the join() method
# defined above.
#PHANDLER_ROUTINE
def win_ctrl_handler(dwCtrlType):
if (dwCtrlType == signal.CTRL_C_EVENT and
not p1.event.is_set()):
print("Main <win_ctrl_handler>: Graceful shutdown")
p1.event.set()
return False
kernel32.SetConsoleCtrlHandler(win_ctrl_handler, True)
def sig_handler(signum, frame):
if not p1.event.is_set():
print("Main <sig_handler>: Graceful shutdown")
p1.event.set()
signal.signal(signal.SIGINT, sig_handler)
p1.join()
if __name__ == "__main__":
run()
Related
I am trying to implement a method to force stop the child that have been started with ThreadPoolExecutor / ProcessPoolExecutor. I would like a cross platform implementation (Windows and Linux).
When the signal is triggered from main, the main process exits and I do NOT want that, only the child.
What is the correct way to force the child to quit? I do NOT want Events because in the following example I can have a while loop that never gets to event.is_set() again
eg:
while not event.is_set():
# Do stuff
while waiting_for_something:
# Here is blocked
Here is the code I am using but I miss something and I don't know what:
import os
import signal
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import time
def handler(signum, frame):
print(signum, os.getpid())
os.kill(os.getpid(), signal.SIGINT)
class asd:
def __init__(self):
pass
def run(self):
signal.signal(signal.SIGBREAK, handler)
while True:
print('running thread', os.getpid())
time.sleep(1)
while True:
print('running 2 ', os.getpid())
time.sleep(1)
print("after while")
if __name__ == "__main__":
t1 = asd()
pool = ProcessPoolExecutor(max_workers=4)
# pool = ThreadPoolExecutor(max_workers=4)
pool.submit(t1.run)
print('running main', os.getpid())
time.sleep(3)
signal.raise_signal(signal.SIGBREAK)
while True:
print("after killing process")
time.sleep(1)
Thank you!
you are sending the signal to your main python process not to the children.
in order to send signals to your children you need their PID, which is not available using the concurrent module, instead you should use multiprocess.Pool, then you can get the PID of the children and send the signal to them using os.kill
just remember to eventually use pool.terminate() to guarantee resources cleanup.
import os
import signal
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import time
import psutil
import multiprocessing
def handler(signum, frame):
print(signum, os.getpid())
os.kill(os.getpid(), signal.SIGINT)
class asd:
def __init__(self):
pass
def run(self):
signal.signal(signal.SIGBREAK, handler)
while True:
print('running thread', os.getpid())
time.sleep(1)
while True:
print('running 2 ', os.getpid())
time.sleep(1)
print("after while")
if __name__ == "__main__":
t1 = asd()
pool = multiprocessing.Pool(4)
children = multiprocessing.active_children()
res = pool.apply_async(t1.run)
print('running main', os.getpid())
time.sleep(3)
for child in children:
os.kill(child.pid,signal.SIGBREAK)
while True:
print("after killing process")
time.sleep(1)
with result
running main 16860
running thread 14212
running 2 14212
running 2 14212
after killing process
after killing process
after killing process
You can take a look at pebble which has been designed to solve this problem transparently for the User.
It provides concurrent.futures compatible APIs and allows to end a processing job either by cancelling the returned Future object or by setting a computing timeout.
import time
from pebble import ProcessPool
from concurrent.futures import TimeoutError
TIMEOUT = 5
def function(sleep):
while True:
time.sleep(sleep)
with ProcessPool() as pool:
future = pool.submit(function, TIMEOUT, 1)
assert isinstance(future.exception(), TimeoutError)
Note that you cannot stop executing threads in Python so only process pools can support such functionality.
Let's assume we have such a trivial daemon written in python:
def mainloop():
while True:
# 1. do
# 2. some
# 3. important
# 4. job
# 5. sleep
mainloop()
and we daemonize it using start-stop-daemon which by default sends SIGTERM (TERM) signal on --stop.
Let's suppose the current step performed is #2. And at this very moment we're sending TERM signal.
What happens is that the execution terminates immediately.
I've found that I can handle the signal event using signal.signal(signal.SIGTERM, handler) but the thing is that it still interrupts the current execution and passes the control to handler.
So, my question is - is it possible to not interrupt the current execution but handle the TERM signal in a separated thread (?) so that I was able to set shutdown_flag = True so that mainloop() had a chance to stop gracefully?
A class based clean to use solution:
import signal
import time
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, *args):
self.kill_now = True
if __name__ == '__main__':
killer = GracefulKiller()
while not killer.kill_now:
time.sleep(1)
print("doing something in a loop ...")
print("End of the program. I was killed gracefully :)")
First, I'm not certain that you need a second thread to set the shutdown_flag.
Why not set it directly in the SIGTERM handler?
An alternative is to raise an exception from the SIGTERM handler, which will be propagated up the stack. Assuming you've got proper exception handling (e.g. with with/contextmanager and try: ... finally: blocks) this should be a fairly graceful shutdown, similar to if you were to Ctrl+C your program.
Example program signals-test.py:
#!/usr/bin/python
from time import sleep
import signal
import sys
def sigterm_handler(_signo, _stack_frame):
# Raises SystemExit(0):
sys.exit(0)
if sys.argv[1] == "handle_signal":
signal.signal(signal.SIGTERM, sigterm_handler)
try:
print "Hello"
i = 0
while True:
i += 1
print "Iteration #%i" % i
sleep(1)
finally:
print "Goodbye"
Now see the Ctrl+C behaviour:
$ ./signals-test.py default
Hello
Iteration #1
Iteration #2
Iteration #3
Iteration #4
^CGoodbye
Traceback (most recent call last):
File "./signals-test.py", line 21, in <module>
sleep(1)
KeyboardInterrupt
$ echo $?
1
This time I send it SIGTERM after 4 iterations with kill $(ps aux | grep signals-test | awk '/python/ {print $2}'):
$ ./signals-test.py default
Hello
Iteration #1
Iteration #2
Iteration #3
Iteration #4
Terminated
$ echo $?
143
This time I enable my custom SIGTERM handler and send it SIGTERM:
$ ./signals-test.py handle_signal
Hello
Iteration #1
Iteration #2
Iteration #3
Iteration #4
Goodbye
$ echo $?
0
Here is a simple example without threads or classes.
import signal
run = True
def handler_stop_signals(signum, frame):
global run
run = False
signal.signal(signal.SIGINT, handler_stop_signals)
signal.signal(signal.SIGTERM, handler_stop_signals)
while run:
pass # do stuff including other IO stuff
I think you are near to a possible solution.
Execute mainloop in a separate thread and extend it with the property shutdown_flag. The signal can be caught with signal.signal(signal.SIGTERM, handler) in the main thread (not in a separate thread). The signal handler should set shutdown_flag to True and wait for the thread to end with thread.join()
Based on the previous answers, I have created a context manager which protects from sigint and sigterm.
import logging
import signal
import sys
class TerminateProtected:
""" Protect a piece of code from being killed by SIGINT or SIGTERM.
It can still be killed by a force kill.
Example:
with TerminateProtected():
run_func_1()
run_func_2()
Both functions will be executed even if a sigterm or sigkill has been received.
"""
killed = False
def _handler(self, signum, frame):
logging.error("Received SIGINT or SIGTERM! Finishing this block, then exiting.")
self.killed = True
def __enter__(self):
self.old_sigint = signal.signal(signal.SIGINT, self._handler)
self.old_sigterm = signal.signal(signal.SIGTERM, self._handler)
def __exit__(self, type, value, traceback):
if self.killed:
sys.exit(0)
signal.signal(signal.SIGINT, self.old_sigint)
signal.signal(signal.SIGTERM, self.old_sigterm)
if __name__ == '__main__':
print("Try pressing ctrl+c while the sleep is running!")
from time import sleep
with TerminateProtected():
sleep(10)
print("Finished anyway!")
print("This only prints if there was no sigint or sigterm")
Found easiest way for me.
Here an example with fork for clarity that this way is useful for flow control.
import signal
import time
import sys
import os
def handle_exit(sig, frame):
raise(SystemExit)
def main():
time.sleep(120)
signal.signal(signal.SIGTERM, handle_exit)
p = os.fork()
if p == 0:
main()
os._exit()
try:
os.waitpid(p, 0)
except (KeyboardInterrupt, SystemExit):
print('exit handled')
os.kill(p, signal.SIGTERM)
os.waitpid(p, 0)
The simplest solution I have found, taking inspiration by responses above is
class SignalHandler:
def __init__(self):
# register signal handlers
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
self.logger = Logger(level=ERROR)
def exit_gracefully(self, signum, frame):
self.logger.info('captured signal %d' % signum)
traceback.print_stack(frame)
###### do your resources clean up here! ####
raise(SystemExit)
Sample of my code how I use signal:
#! /usr/bin/env python
import signal
def ctrl_handler(signum, frm):
print "You can't cannot kill me"
print "Installing signal handler..."
signal.signal(signal.SIGINT, ctrl_handler)
print "done"
while True:
# do something
pass
You can set a threading.Event when catching the signal.
threading.Event is threadsafe to use and pass around, can be waited on, and the same event can be set and cleared from other places.
import signal, threading
quit_event = threading.Event()
signal.signal(signal.SIGTERM, lambda *_args: quit_event.set())
while not quit_event.is_set():
print("Working...")
I have the following toy example for Python threading module
from __future__ import print_function
import threading
import time
import signal
import sys
import os
import time
class ThreadShutdown(Exception):
# Custom exception to allow clean thread exit
pass
def thread_shutdown(signum, frame):
print(" o Signal {} caught and raising ThreadShutdown exception".format(signum))
raise ThreadShutdown
def main():
"""
Register the signal handlers needed to stop
cleanly the child accessing thread
"""
signal.signal(signal.SIGTERM, thread_shutdown)
signal.signal(signal.SIGINT, thread_shutdown)
test_run_seconds = 120
try:
thread = ChildThread()
thread.start()
time.sleep(1)
while test_run_seconds > 0:
test_run_seconds -= 1
print(" o [{}] remaining time is {} seconds".format(time.asctime( time.localtime(time.time()) ), test_run_seconds))
time.sleep(1)
except ThreadShutdown:
thread.shutdown_flag.set()
thread.join()
print(" o ThreadShutdown procedure complete")
return
proc.terminate()
thread.shutdown_flag.set()
thread.join()
print(" o Test terminated")
class ChildThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.shutdown_flag = threading.Event()
def run(self):
while not self.shutdown_flag.is_set():
print(" o [{}] is the current time in child, sleep for 10s".format(time.asctime( time.localtime(time.time()))))
time.sleep(10)
return
if __name__ == "__main__":
sys.exit(main())
which behaves as expected (the main thread counts every second while the spawned thread prints only every 10 seconds.
I was trying to understand the behaviour of the same code snippet in presence of blocking waits in kernel mode in the spawned thread. For example, assume that the spawned thread now goes into a killable wait in an ioctl with a timeout of 10s, I would still expect to have the main thread counting every second. For some reason, it instead counts every 10s, as if it was blocked as well in the wait of the spawned thread. What is the reason?
I want to implement a proper SIGINT handling in my script, which opens multiple files and a database connection. These should be closed if the script is CTRL+C'd or somehow else interrupted.
Previously I've used the KeyboardInterrupt exception to catch CTRL+C, there I checked if files/connections are defined, if so close them, then exit.
Is this really the pythonic way to do it, or is it better adviced to use signal handlers? e.g.
import signal, sys, time
def handler(signum, frame):
print("..kthxbye")
sys.exit(1)
def main():
signal.signal(signal.SIGINT, handler)
i = 0
while True:
print(i)
i += 1
time.sleep(1)
if __name__ == "__main__":
main()
This seems cleaner to me, yet don't I know how I would pass filenames or database connections to the handler.
I would rather catch the KeyboardInterrupt exception on the main thread. KeyboardInterrupt is the result of the default SIGINT handler of python. The exception handler of a KeyboardInterrupt exception is a much safer/friendly context than what you are in when catching SIGINT directly.
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
cleanup()
EDIT: Here is how to share variables (state) between the two methods:
Procedural:
import sys, time
class SharedState:
def __init__(self):
self.var0 = 42
self.var1 = 'string'
# method 1
shared_variable = 'woof woof'
# method 2: avoiding global declarations in functions
shared_state = SharedState()
def main():
# In order to write a global variable you need a global
# declaration otherwise the assignment would create a
# local variable
global shared_variable
shared_variable = 5
shared_state.var0 = 10
time.sleep(10)
def cleanup():
print shared_variable
print shared_state.var0
sys.exit(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
cleanup()
Object oriented (my preference):
import sys, time
# method 3: object oriented programming
class Program:
def __init__(self):
self.var0 = 42
self.var1 = 'string'
def main(self):
self.var0 = 5
self.var1 = 'woof woof'
time.sleep(10)
def cleanup(self):
# both main and cleanup can access the member
# variables of this class
print self.var0
print self.var1
sys.exit(1)
def execute(self):
try:
self.main()
except KeyboardInterrupt:
self.cleanup()
if __name__ == '__main__':
Program().execute()
My suggestion is to use the signal library to handle the signals. Signals are not exceptions and they are part of Inter Process Communication (IPC) infrastructure of the Operating System.
Signals can help you to communicate with your program, like reloading the configuration file, closing your log file handler during log rotation and so on. Most of the daemon process like apache dose it.
Shell scripts have trap command to process the signals and take appropriate actions based on the signals captured.
Generally python closes all file handlers and database connection automatically during the time of exit. But to be safe we can have a function to handle them implicitly.
Below code traps SIGINT and closes the files properly.
import signal
import sys
die = False
def handler(signum, frame):
global die
print('Got SIGINT.')
die = True
def closeFile(fh):
fh.flush()
fh.close()
signal.signal(signal.SIGINT, handler)
fh = open('/tmp/a.txt', 'w')
while True:
data = input('> ')
if data == 'q':
closeFile(fh)
break
else:
fh.write(data + '\n')
if die:
closeFile(fh)
print('Completed cleanup.. ')
sys.exit()
Let's assume we have such a trivial daemon written in python:
def mainloop():
while True:
# 1. do
# 2. some
# 3. important
# 4. job
# 5. sleep
mainloop()
and we daemonize it using start-stop-daemon which by default sends SIGTERM (TERM) signal on --stop.
Let's suppose the current step performed is #2. And at this very moment we're sending TERM signal.
What happens is that the execution terminates immediately.
I've found that I can handle the signal event using signal.signal(signal.SIGTERM, handler) but the thing is that it still interrupts the current execution and passes the control to handler.
So, my question is - is it possible to not interrupt the current execution but handle the TERM signal in a separated thread (?) so that I was able to set shutdown_flag = True so that mainloop() had a chance to stop gracefully?
A class based clean to use solution:
import signal
import time
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, *args):
self.kill_now = True
if __name__ == '__main__':
killer = GracefulKiller()
while not killer.kill_now:
time.sleep(1)
print("doing something in a loop ...")
print("End of the program. I was killed gracefully :)")
First, I'm not certain that you need a second thread to set the shutdown_flag.
Why not set it directly in the SIGTERM handler?
An alternative is to raise an exception from the SIGTERM handler, which will be propagated up the stack. Assuming you've got proper exception handling (e.g. with with/contextmanager and try: ... finally: blocks) this should be a fairly graceful shutdown, similar to if you were to Ctrl+C your program.
Example program signals-test.py:
#!/usr/bin/python
from time import sleep
import signal
import sys
def sigterm_handler(_signo, _stack_frame):
# Raises SystemExit(0):
sys.exit(0)
if sys.argv[1] == "handle_signal":
signal.signal(signal.SIGTERM, sigterm_handler)
try:
print "Hello"
i = 0
while True:
i += 1
print "Iteration #%i" % i
sleep(1)
finally:
print "Goodbye"
Now see the Ctrl+C behaviour:
$ ./signals-test.py default
Hello
Iteration #1
Iteration #2
Iteration #3
Iteration #4
^CGoodbye
Traceback (most recent call last):
File "./signals-test.py", line 21, in <module>
sleep(1)
KeyboardInterrupt
$ echo $?
1
This time I send it SIGTERM after 4 iterations with kill $(ps aux | grep signals-test | awk '/python/ {print $2}'):
$ ./signals-test.py default
Hello
Iteration #1
Iteration #2
Iteration #3
Iteration #4
Terminated
$ echo $?
143
This time I enable my custom SIGTERM handler and send it SIGTERM:
$ ./signals-test.py handle_signal
Hello
Iteration #1
Iteration #2
Iteration #3
Iteration #4
Goodbye
$ echo $?
0
Here is a simple example without threads or classes.
import signal
run = True
def handler_stop_signals(signum, frame):
global run
run = False
signal.signal(signal.SIGINT, handler_stop_signals)
signal.signal(signal.SIGTERM, handler_stop_signals)
while run:
pass # do stuff including other IO stuff
I think you are near to a possible solution.
Execute mainloop in a separate thread and extend it with the property shutdown_flag. The signal can be caught with signal.signal(signal.SIGTERM, handler) in the main thread (not in a separate thread). The signal handler should set shutdown_flag to True and wait for the thread to end with thread.join()
Based on the previous answers, I have created a context manager which protects from sigint and sigterm.
import logging
import signal
import sys
class TerminateProtected:
""" Protect a piece of code from being killed by SIGINT or SIGTERM.
It can still be killed by a force kill.
Example:
with TerminateProtected():
run_func_1()
run_func_2()
Both functions will be executed even if a sigterm or sigkill has been received.
"""
killed = False
def _handler(self, signum, frame):
logging.error("Received SIGINT or SIGTERM! Finishing this block, then exiting.")
self.killed = True
def __enter__(self):
self.old_sigint = signal.signal(signal.SIGINT, self._handler)
self.old_sigterm = signal.signal(signal.SIGTERM, self._handler)
def __exit__(self, type, value, traceback):
if self.killed:
sys.exit(0)
signal.signal(signal.SIGINT, self.old_sigint)
signal.signal(signal.SIGTERM, self.old_sigterm)
if __name__ == '__main__':
print("Try pressing ctrl+c while the sleep is running!")
from time import sleep
with TerminateProtected():
sleep(10)
print("Finished anyway!")
print("This only prints if there was no sigint or sigterm")
Found easiest way for me.
Here an example with fork for clarity that this way is useful for flow control.
import signal
import time
import sys
import os
def handle_exit(sig, frame):
raise(SystemExit)
def main():
time.sleep(120)
signal.signal(signal.SIGTERM, handle_exit)
p = os.fork()
if p == 0:
main()
os._exit()
try:
os.waitpid(p, 0)
except (KeyboardInterrupt, SystemExit):
print('exit handled')
os.kill(p, signal.SIGTERM)
os.waitpid(p, 0)
The simplest solution I have found, taking inspiration by responses above is
class SignalHandler:
def __init__(self):
# register signal handlers
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
self.logger = Logger(level=ERROR)
def exit_gracefully(self, signum, frame):
self.logger.info('captured signal %d' % signum)
traceback.print_stack(frame)
###### do your resources clean up here! ####
raise(SystemExit)
Sample of my code how I use signal:
#! /usr/bin/env python
import signal
def ctrl_handler(signum, frm):
print "You can't cannot kill me"
print "Installing signal handler..."
signal.signal(signal.SIGINT, ctrl_handler)
print "done"
while True:
# do something
pass
You can set a threading.Event when catching the signal.
threading.Event is threadsafe to use and pass around, can be waited on, and the same event can be set and cleared from other places.
import signal, threading
quit_event = threading.Event()
signal.signal(signal.SIGTERM, lambda *_args: quit_event.set())
while not quit_event.is_set():
print("Working...")