Cleanly stopping a multiprocessing.Process - KeyboardInterrupt escapes in Windows - python

I'm using multiprocessing to spawn a task (multiprocessing.Process) that can be stopped (without cooperation from the task itself, e.g.: without using something like multiprocessing.Event to signal the task to gracefully stop).
Since .terminate() (or .kill()) won't stop it cleanly (the finally: clause won't execute), I thought I would use os.kill() to emulate a CTRL+C event:
from multiprocessing import Process
from time import sleep
import os
import signal
def task(n):
try:
for i in range(n):
sleep(1)
print(f'task: i={i}')
finally:
print('task: finally clause executed!')
return i
if __name__ == '__main__':
t = Process(target=task, args=(10,))
print(f'main: starting task...')
t.start()
sleep(5)
for i in ('CTRL_C_EVENT', 'SIGINT'):
if hasattr(signal, i):
sig = getattr(signal, i)
break
print(f'main: attempt to stop task...')
os.kill(t.pid, sig)
The finally: clause executes on both Windows, macOS and Linux; hoever on Windows it additionally spits out the error:
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "c:\Python38\lib\multiprocessing\util.py", line 357, in
_exit_function
p.join()
File "c:\Python38\lib\multiprocessing\process.py", line 149, in join
res = self._popen.wait(timeout)
File "c:\Python38\lib\multiprocessing\popen_spawn_win32.py", line 108, in wait
res = _winapi.WaitForSingleObject(int(self._handle), msecs)
KeyboardInterrupt
while on macOS and Linux it only print the messages meant to be printed.

It seems CTRL_C_EVENT in Windows is also propagated from the child process to the parent process. See for example this related question.
I added some book keeping code and a try...except block to the code. It shows what happens, and that the KeyboardInterrupt needs to be caught on parent process as well.
from multiprocessing import Process
from time import sleep
import os
import signal
def task(n):
try:
for i in range(n):
sleep(1)
print(f'task: i={i}')
except KeyboardInterrupt:
print("task: caught KeyboardInterrupt")
finally:
print('task: finally clause executed!')
return i
if __name__ == '__main__':
try:
t = Process(target=task, args=(10,))
print(f'main: starting task...')
t.start()
sleep(5)
for i in ('CTRL_C_EVENT', 'SIGINT'):
if hasattr(signal, i):
sig = getattr(signal, i)
break
print(f'main: attempt to stop task...')
os.kill(t.pid, sig)
finally:
try:
print("main: finally in main process. Waiting for 3 seconds")
sleep(3)
except KeyboardInterrupt:
print("main: caught KeyboardInterrupt in finally block")
It prevents the error and produces the following output:
main: starting task...
task: i=0
task: i=1
task: i=2
task: i=3
main: attempt to stop task...
main: finally in main process. Waiting for 3 seconds
task: caught KeyboardInterrupt
main: caught KeyboardInterrupt in finally block
task: finally clause executed!

Related

threadpool executor inside processpoolexecutor RuntimeError: There is no current event loop in thread

I have a processpoolexecutor into which I submit multiple disk read/write calls.
I want to create a threadpool inside every process for performance benefits.
below is my attempt to override and modify _process_worker method of concurrent.futures process.py to use with ProcessPoolExecutor. I am trying to run the function in a ThreadPoolExecutor inside -
from concurrent.futures import process as process_futures
class ProcessPoolExecutor(process_futures.ProcessPoolExecutor):
"""Override process creation to use our processes"""
def _adjust_process_count(self):
"""This is copy-pasted from concurrent.futures to override the Process class"""
for _ in range(len(self._processes), self._max_workers):
p = Process(
target=_process_worker,
args=(self._call_queue, self._result_queue, None, None))
p.start()
self._processes[p.pid] = p
def _process_worker(call_queue, result_queue):
with ThreadPoolExecutor(max_workers=8) as executor: # starting a Threadpool
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
if 1: # my changes , problem with this code
future = executor.submit(call_item.fn, *call_item.args, **call_item.kwargs)
future.add_done_callback(
functools.partial(_return_result, call_item, result_queue))
else: # original code with only processpool as in futures process.py
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
result_queue.put(process_futures._ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(process_futures._ResultItem(call_item.work_id,
result=r))
when I add a threadpoolexecutor inside processpoolexecutor , i get below error
RuntimeError: There is no current event loop in thread '<threadedprocess._ThreadPoolExecutor object at 0x000001C5897B1FA0>_0'.
I understand that eventloop are not created on child threads, so its complaining of no current event loop. and so, even if i add new event loop -
def _process_worker(call_queue, result_queue, a, b):
try:
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
except Exception as e:
logger.info("eexception {} ".format(e))
with ThreadPoolExecutor(max_workers=8) as executor:
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
if 1: # my changes , problem with this code
job_func = functools.partial(call_item.fn, *call_item.args, **call_item.kwargs)
try:
loop.run_in_executor(executor, job_func)
except Exception as e:
logger.info("exception recvd {}".format(e))
else: # original code with only processpool as in futures process.py
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
result_queue.put(process_futures._ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(process_futures._ResultItem(call_item.work_id,
result=r))
I get a new error -
concurrent.futures.process.BrokenProcessPool: A process in the process pool was terminated abruptly while the future was running or pending.
how can i change _process_worker to run the work in a threadpool ?
Any suggestions please.
You can use asyncio.run(your_async_function_here()) which creates new event loop and schedules the given function as a task in that event loop.

Python threads and linux ioctl wait

I have the following toy example for Python threading module
from __future__ import print_function
import threading
import time
import signal
import sys
import os
import time
class ThreadShutdown(Exception):
# Custom exception to allow clean thread exit
pass
def thread_shutdown(signum, frame):
print(" o Signal {} caught and raising ThreadShutdown exception".format(signum))
raise ThreadShutdown
def main():
"""
Register the signal handlers needed to stop
cleanly the child accessing thread
"""
signal.signal(signal.SIGTERM, thread_shutdown)
signal.signal(signal.SIGINT, thread_shutdown)
test_run_seconds = 120
try:
thread = ChildThread()
thread.start()
time.sleep(1)
while test_run_seconds > 0:
test_run_seconds -= 1
print(" o [{}] remaining time is {} seconds".format(time.asctime( time.localtime(time.time()) ), test_run_seconds))
time.sleep(1)
except ThreadShutdown:
thread.shutdown_flag.set()
thread.join()
print(" o ThreadShutdown procedure complete")
return
proc.terminate()
thread.shutdown_flag.set()
thread.join()
print(" o Test terminated")
class ChildThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.shutdown_flag = threading.Event()
def run(self):
while not self.shutdown_flag.is_set():
print(" o [{}] is the current time in child, sleep for 10s".format(time.asctime( time.localtime(time.time()))))
time.sleep(10)
return
if __name__ == "__main__":
sys.exit(main())
which behaves as expected (the main thread counts every second while the spawned thread prints only every 10 seconds.
I was trying to understand the behaviour of the same code snippet in presence of blocking waits in kernel mode in the spawned thread. For example, assume that the spawned thread now goes into a killable wait in an ioctl with a timeout of 10s, I would still expect to have the main thread counting every second. For some reason, it instead counts every 10s, as if it was blocked as well in the wait of the spawned thread. What is the reason?

Terminate background python script nicely

I am running a python script in the background using the command python script.py &. The script might look like this.
import time
def loop():
while True:
time.sleep(1)
if __name__=='__main__':
try:
loop()
except KeyboardInterrupt:
print("Terminated properly")
When it comes to terminating the script, I would like to do some cleanup before it is stopped (such as printing "Terminated properly"). If I run as a current process, this would be handled by the except statement after a keyboard interrupt.
Using the kill PID command means the cleanup is never executed. How can I stop a background process and execute some lines of code before it is terminated?
You can use signal module to catch any signals sent to your script via kill.
You setup a signal handler to catch the signal in question that would perform the cleanup.
import signal
import time
running = 0
def loop ():
global running
running = 1
while running:
try: time.sleep(0.25)
except KeyboardInterrupt: break
print "Ended nicely!"
def cleanup (signumber, stackframe):
global running
running = 0
signal.signal(signal.SIGABRT, cleanup)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGQUIT, cleanup)
loop()
Use finally clause:
def loop():
while True:
time.sleep(1)
if __name__=='__main__':
try:
loop()
except KeyboardInterrupt:
print("Terminated properly")
finally:
print('executes always')

How to let child thread deal with main process killed or key interrupt in Python?

In a multi-threaded design, I want to do some clean steps when the program exits abnormally. The running thread should clean up the current task and then quit, rather than be killed immediately and leave some dirty data. I found that using threading module could not catch KeyInterrupt exception.
Here is my test code:
#!/usr/bin/env python3
from time import sleep
def do_sth():
print("I'm doing something...")
sleep(10)
if __name__ == "__main__":
do_sth()
Python will raise KeyInterrupt exception when I press CTRL-c
$ Python3 test.py
I'm doing something ...
^C
Traceback (most recent call last):
File "test.py", line 10, in <module>
do_sth ()
File "test.py", line 7, in do_sth
sleep (10)
KeyboardInterrupt
So I can catch this exception.
def do_sth ():
try:
print ("I'm doing something ...")
sleep (10)
except (KeyboardInterrupt, SystemExit):
print ("I'm doing some clean steps and exit.")
But when I use threading module, this exception is not raised at all.
#!/usr/bin/env python3
from time import sleep
import threading
def do_sth():
print("I'm doing something...")
sleep(10)
if __name__ == '__main__':
t = threading.Thread(target=do_sth)
t.start()
t.join()
result:
$ python3 test.py
I'm doing something...
^C
The running thread has been killed directly and no exception is raised.
How do I deal with this?
One way is to handle KeyboardInterrupt exceptions.
Another thing to do in such scenarios is to manage the state of your application across all threads.
One of the solutions is to add support for Signals in your code. It allows graceful handling of the shutting down of your process.
Here's one simple setup for that:
class SignalHandler:
continue_running = True
def __init__(self):
signal.signal(signal.SIGUSR2, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGINT, self.signal_handler)
logging.info("SignalHandler::Init")
def signal_handler(self, num, stack):
logging.warning('Received signal %d in %s' % (num, threading.currentThread()))
SignalHandler.continue_running = False
logging.warning("Time to SHUT DOWN ALL MODULES")
All threads would try and utilise the status from SignalHandler.continue_running to ensure that they all know when to stop.
If somebody tried to kill this python process by calling kill -2 [PID] for example - all threads will come to know about the need to shut down.

threading ignores KeyboardInterrupt exception

I'm running this simple code:
import threading, time
class reqthread(threading.Thread):
def run(self):
for i in range(0, 10):
time.sleep(1)
print('.')
try:
thread = reqthread()
thread.start()
except (KeyboardInterrupt, SystemExit):
print('\n! Received keyboard interrupt, quitting threads.\n')
But when I run it, it prints
$ python prova.py
.
.
^C.
.
.
.
.
.
.
.
Exception KeyboardInterrupt in <module 'threading' from '/usr/lib/python2.6/threading.pyc'> ignored
In fact python thread ignore my Ctrl+C keyboard interrupt and doesn't print Received Keyboard Interrupt. Why? What is wrong with this code?
Try
try:
thread=reqthread()
thread.daemon=True
thread.start()
while True: time.sleep(100)
except (KeyboardInterrupt, SystemExit):
print '\n! Received keyboard interrupt, quitting threads.\n'
Without the call to time.sleep, the main process is jumping out of the try...except block too early, so the KeyboardInterrupt is not caught. My first thought was to use thread.join, but that seems to block the main process (ignoring KeyboardInterrupt) until the thread is finished.
thread.daemon=True causes the thread to terminate when the main process ends.
To summarize the changes recommended in the comments, the following works well for me:
try:
thread = reqthread()
thread.start()
while thread.isAlive():
thread.join(1) # not sure if there is an appreciable cost to this.
except (KeyboardInterrupt, SystemExit):
print '\n! Received keyboard interrupt, quitting threads.\n'
sys.exit()
Slight modification of ubuntu's solution.
Removing tread.daemon = True as suggested by Eric and replacing the sleeping loop by signal.pause():
import signal
try:
thread=reqthread()
thread.start()
signal.pause() # instead of: while True: time.sleep(100)
except (KeyboardInterrupt, SystemExit):
print '\n! Received keyboard interrupt, quitting threads.\n'
My (hacky) solution is to monkey-patch Thread.join() like this:
def initThreadJoinHack():
import threading, thread
mainThread = threading.currentThread()
assert isinstance(mainThread, threading._MainThread)
mainThreadId = thread.get_ident()
join_orig = threading.Thread.join
def join_hacked(threadObj, timeout=None):
"""
:type threadObj: threading.Thread
:type timeout: float|None
"""
if timeout is None and thread.get_ident() == mainThreadId:
# This is a HACK for Thread.join() if we are in the main thread.
# In that case, a Thread.join(timeout=None) would hang and even not respond to signals
# because signals will get delivered to other threads and Python would forward
# them for delayed handling to the main thread which hangs.
# See CPython signalmodule.c.
# Currently the best solution I can think of:
while threadObj.isAlive():
join_orig(threadObj, timeout=0.1)
else:
# In all other cases, we can use the original.
join_orig(threadObj, timeout=timeout)
threading.Thread.join = join_hacked
Putting the try ... except in each thread and also a signal.pause() in true main() works for me.
Watch out for import lock though. I am guessing this is why Python doesn't solve ctrl-C by default.

Categories

Resources