Python custom signal handling in processes pool - python

I am dealing with the following problem:
I've implemented a dummy 'Thing' class that sleeps for 10 seconds and logs a message ('foo'). This class is instantiated in a worker function for a Processes Pool and the 'foo' method that implements the above mentioned logic is called.
What I want to achieve is a custom signal handling: as long as the processes haven't terminated, if CTRL+C (SIGINT) is sent, each process will log the signal and they will immediately terminate.
Half of the logic is working: while each process is sleeping, on SIGINT, they'll be interrupted and the Pool will be closed.
Problem: if ALL the process end successfully and SIGINT is sent, the message will be logged but the Pool won't be closed.
Code:
import logging
import signal
import os
import time
from multiprocessing import Pool, current_process
logger = logging.getLogger('test')
SIGNAL_NAMES = dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_'))
class Thing(object):
def __init__(self, my_id):
self.my_id = my_id
self.logger = logging.getLogger(str(my_id))
def foo(self):
time.sleep(10)
self.logger.info('[%s] Foo after 10 secs!', self.my_id)
class Daemon(object):
def __init__(self, no_processes, max_count):
signal.signal(signal.SIGINT, self.stop)
self.done = False
self.count = 0
self.max_count = max_count
self.pool = Pool(no_processes, initializer=self.pool_initializer)
def stop(self, signum, _):
""" Stop function for Daemon """
sig = SIGNAL_NAMES.get(signum) or signum
logger.info('[Daemon] Stopping (received signal %s', sig)
self.done = True
def _generate_ids(self):
""" Generator function of the IDs for the Processes Pool """
while not self.done:
if self.count < self.max_count:
my_id = "ID-{}".format(self.count)
logger.info('[Daemon] Generated ID %s', my_id)
time.sleep(3)
yield my_id
self.count += 1
time.sleep(1)
def run(self):
""" Main daemon run function """
pid = os.getpid()
logger.info('[Daemon] Started running on PID %s', str(pid))
my_ids = self._generate_ids()
for res in self.pool.imap_unordered(run_thing, my_ids):
logger.info("[Daemon] Finished %s", res or '')
logger.info('[Daemon] Closing & waiting processes to terminate')
self.pool.close()
self.pool.join()
def pool_initializer(self):
""" Pool initializer function """
signal.signal(signal.SIGINT, self.worker_signal_handler)
#staticmethod
def worker_signal_handler(signum, _):
""" Signal handler for the Process worker """
sig = SIGNAL_NAMES.get(signum) or signum
cp = current_process()
logger.info("[%s] Received in worker %s signal %s", WORKER_THING_ID or '', str(cp), sig)
global WORKER_EXITING
WORKER_EXITING = True
WORKER_EXITING = False
WORKER_THING_ID = None
def run_thing(arg):
""" Worker function for processes """
if WORKER_EXITING:
return
global WORKER_THING_ID
WORKER_THING_ID = arg
run_exception = None
logger.info('[%s] START Thing foo-ing', arg)
logging.getLogger('Thing-{}'.format(arg)).setLevel(logging.INFO)
try:
thing = Thing(arg)
thing.foo()
except Exception as e:
run_exception = e
finally:
WORKER_THING_ID = None
logger.info('[%s] STOP Thing foo-ing', arg)
if run_exception:
logger.error('[%s] EXCEPTION on Thing foo-ing: %s', arg, run_exception)
return arg
if __name__ == '__main__':
logging.basicConfig()
logger.setLevel(logging.INFO)
daemon = Daemon(4, 3)
daemon.run()

Your problem is logic in function _generate_ids(). The function never ends so pool.imap_unordered() never finishes by itself, only needs to be interrupted by CTRL-C.
Change it for something like this:
def _generate_ids(self):
""" Generator function of the IDs for the Processes Pool """
for i in range(self.max_count):
time.sleep(3)
my_id = "ID-{}".format(self.count)
logger.info('[Daemon] Generated ID %s', my_id)
if self.done:
break
self.count += 1
yield my_id
And the processes end by themselves normally.

Related

Raising exceptions in a thread

I tried to get the example 1 Raising exceptions in a python thread using PyThreadState_SetAsyncExc() from geeksforgeeks
Different ways to kill a Thread running.
But for some reason the thread does not terminate.
I use python3.6
here is the source-code
# Python program raising
# exceptions in a python
# thread
import threading
import ctypes
import time
class thread_with_exception(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
# target function of the thread class
try:
while True:
print('running ' + self.name)
finally:
print('ended')
def get_id(self):
# returns id of the respective thread
if hasattr(self, '_thread_id'):
return self._thread_id
for id, thread in threading._active.items():
if thread is self:
return id
def raise_exception(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id,
ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print('Exception raise failure')
t1 = thread_with_exception('Thread 1')
t1.start()
time.sleep(2)
t1.raise_exception()
t1.join()
Does anybody have an idea why the thread is not terminated with the raise signal?
Wrap thread_id in ctypes.c_long as:
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id),
ctypes.py_object(SystemExit))

Python Multiprocessing - terminate / restart worker process

I have a bunch of long running processes that I would like to split up into multiple processes. That part I can do no problem. The issue I run into is sometimes these processes go into a hung state. To address this issue I would like to be able to set a time threshold for each task that a process is working on. When that time threshold is exceeded I would like to restart or terminate the task.
Originally my code was very simple using a process pool, however with the pool I could not figure out how to retrieve the processes inside the pool, nevermind how to restart / terminate a process in the pool.
I have resorted to using a queue and process objects as is illustrated in this example (https://pymotw.com/2/multiprocessing/communication.html#passing-messages-to-processes with some changes.
My attempts to figure this out are in the code below. In its current state the process does not actually get terminated. Further to that I cannot figure out how to get the process to move onto the next task after the current task is terminated. Any suggestions / help appreciated, perhaps I’m going about this the wrong way.
Thanks
import multiprocess
import time
class Consumer(multiprocess.Process):
def __init__(self, task_queue, result_queue, startTimes, name=None):
multiprocess.Process.__init__(self)
if name:
self.name = name
print 'created process: {0}'.format(self.name)
self.task_queue = task_queue
self.result_queue = result_queue
self.startTimes = startTimes
def stopProcess(self):
elapseTime = time.time() - self.startTimes[self.name]
print 'killing process {0} {1}'.format(self.name, elapseTime)
self.task_queue.cancel_join_thread()
self.terminate()
# now want to get the process to start procesing another job
def run(self):
'''
The process subclass calls this on a separate process.
'''
proc_name = self.name
print proc_name
while True:
# pulling the next task off the queue and starting it
# on the current process.
task = self.task_queue.get()
self.task_queue.cancel_join_thread()
if task is None:
# Poison pill means shutdown
#print '%s: Exiting' % proc_name
self.task_queue.task_done()
break
self.startTimes[proc_name] = time.time()
answer = task()
self.task_queue.task_done()
self.result_queue.put(answer)
return
class Task(object):
def __init__(self, a, b, startTimes):
self.a = a
self.b = b
self.startTimes = startTimes
self.taskName = 'taskName_{0}_{1}'.format(self.a, self.b)
def __call__(self):
import time
import os
print 'new job in process pid:', os.getpid(), self.taskName
if self.a == 2:
time.sleep(20000) # simulate a hung process
else:
time.sleep(3) # pretend to take some time to do the work
return '%s * %s = %s' % (self.a, self.b, self.a * self.b)
def __str__(self):
return '%s * %s' % (self.a, self.b)
if __name__ == '__main__':
# Establish communication queues
# tasks = this is the work queue and results is for results or completed work
tasks = multiprocess.JoinableQueue()
results = multiprocess.Queue()
#parentPipe, childPipe = multiprocess.Pipe(duplex=True)
mgr = multiprocess.Manager()
startTimes = mgr.dict()
# Start consumers
numberOfProcesses = 4
processObjs = []
for processNumber in range(numberOfProcesses):
processObj = Consumer(tasks, results, startTimes)
processObjs.append(processObj)
for process in processObjs:
process.start()
# Enqueue jobs
num_jobs = 30
for i in range(num_jobs):
tasks.put(Task(i, i + 1, startTimes))
# Add a poison pill for each process object
for i in range(numberOfProcesses):
tasks.put(None)
# process monitor loop,
killProcesses = {}
executing = True
while executing:
allDead = True
for process in processObjs:
name = process.name
#status = consumer.status.getStatusString()
status = process.is_alive()
pid = process.ident
elapsedTime = 0
if name in startTimes:
elapsedTime = time.time() - startTimes[name]
if elapsedTime > 10:
process.stopProcess()
print "{0} - {1} - {2} - {3}".format(name, status, pid, elapsedTime)
if allDead and status:
allDead = False
if allDead:
executing = False
time.sleep(3)
# Wait for all of the tasks to finish
#tasks.join()
# Start printing results
while num_jobs:
result = results.get()
print 'Result:', result
num_jobs -= 1
I generally recommend against subclassing multiprocessing.Process as it leads to code hard to read.
I'd rather encapsulate your logic in a function and run it in a separate process. This keeps the code much cleaner and intuitive.
Nevertheless, rather than reinventing the wheel, I'd recommend you to use some library which already solves the issue for you such as Pebble or billiard.
For example, the Pebble library allows to easily set timeouts to processes running independently or within a Pool.
Running your function within a separate process with a timeout:
from pebble import concurrent
from concurrent.futures import TimeoutError
#concurrent.process(timeout=10)
def function(foo, bar=0):
return foo + bar
future = function(1, bar=2)
try:
result = future.result() # blocks until results are ready
except TimeoutError as error:
print("Function took longer than %d seconds" % error.args[1])
Same example but with a process Pool.
with ProcessPool(max_workers=5, max_tasks=10) as pool:
future = pool.schedule(function, args=[1], timeout=10)
try:
result = future.result() # blocks until results are ready
except TimeoutError as error:
print("Function took longer than %d seconds" % error.args[1])
In both cases, the timing out process will be automatically terminated for you.
A way simpler solution would be to continue using a than reimplementing the Pool is to design a mechanism which timeout the function you are running.
For instance:
from time import sleep
import signal
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError()
def run_with_timeout(func, *args, timeout=10, **kwargs):
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
try:
res = func(*args, **kwargs)
except TimeoutError as exc:
print("Timeout")
res = exc
finally:
signal.alarm(0)
return res
def test():
sleep(4)
print("ok")
if __name__ == "__main__":
import multiprocessing as mp
p = mp.Pool()
print(p.apply_async(run_with_timeout, args=(test,),
kwds={"timeout":1}).get())
The signal.alarm set a timeout and when this timeout, it run the handler, which stop the execution of your function.
EDIT: If you are using a windows system, it seems to be a bit more complicated as signal does not implement SIGALRM. Another solution is to use the C-level python API. This code have been adapted from this SO answer with a bit of adaptation to work on 64bit system. I have only tested it on linux but it should work the same on windows.
import threading
import ctypes
from time import sleep
class TimeoutError(Exception):
pass
def run_with_timeout(func, *args, timeout=10, **kwargs):
interupt_tid = int(threading.get_ident())
def interupt_thread():
# Call the low level C python api using ctypes. tid must be converted
# to c_long to be valid.
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(interupt_tid), ctypes.py_object(TimeoutError))
if res == 0:
print(threading.enumerate())
print(interupt_tid)
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(interupt_tid), 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
timer = threading.Timer(timeout, interupt_thread)
try:
timer.start()
res = func(*args, **kwargs)
except TimeoutError as exc:
print("Timeout")
res = exc
else:
timer.cancel()
return res
def test():
sleep(4)
print("ok")
if __name__ == "__main__":
import multiprocessing as mp
p = mp.Pool()
print(p.apply_async(run_with_timeout, args=(test,),
kwds={"timeout": 1}).get())
print(p.apply_async(run_with_timeout, args=(test,),
kwds={"timeout": 5}).get())
For long running processes and/or long iterators, spawned workers might hang after some time. To prevent this, there are two built-in techniques:
Restart workers after they have delivered maxtasksperchild tasks from the queue.
Pass timeout to pool.imap.next(), catch the TimeoutError, and finish the rest of the work in another pool.
The following wrapper implements both, as a generator. This also works when replacing stdlib multiprocessing with multiprocess.
import multiprocessing as mp
def imap(
func,
iterable,
*,
processes=None,
maxtasksperchild=42,
timeout=42,
initializer=None,
initargs=(),
context=mp.get_context("spawn")
):
"""Multiprocessing imap, restarting workers after maxtasksperchild tasks to avoid zombies.
Example:
>>> list(imap(str, range(5)))
['0', '1', '2', '3', '4']
Raises:
mp.TimeoutError: if the next result cannot be returned within timeout seconds.
Yields:
Ordered results as they come in.
"""
with context.Pool(
processes=processes,
maxtasksperchild=maxtasksperchild,
initializer=initializer,
initargs=initargs,
) as pool:
it = pool.imap(func, iterable)
while True:
try:
yield it.next(timeout)
except StopIteration:
return
To catch the TimeoutError:
>>> import time
>>> iterable = list(range(10))
>>> results = []
>>> try:
... for i, result in enumerate(imap(time.sleep, iterable, processes=2, timeout=2)):
... results.append(result)
... except mp.TimeoutError:
... print("Failed to process the following subset of iterable:", iterable[i:])
Failed to process the following subset of iterable: [2, 3, 4, 5, 6, 7, 8, 9]

Handle multiprocess in python

My code is processing some parallel perforce tasks while showing a progress bar and letting user to terminate the job whenever he wants, the problem is when user clicks the close button the thread function is not being killed but the lock is released and the main UI thread is being unlocked.
The p4.run_sync() is not terminating when Cancel button is clicked.
def P4SyncLibrary(args, que):
syncType = args[0]
view = args[1]
p4 = P4CreateConnection(disable_tmp_cleanup=True)
try:
p4.run_sync(view)
except P4Exception:
for e in p4.errors:
print "SyncError: - %s" %e
p4.disconnect()
que.put(None)
class CreateJob(QtGui.QDialog):
def __init__(self, thread, args):
QtGui.QDialog.__init__(self)
self.ui=Ui_ProgressBar()
self.ui.setupUi(self)
self.ui.cancel.clicked.connect(self.closeEvent)
self.ui.cancel.setIcon(QtGui.QIcon(QtGui.QPixmap("%s/delete.xpm" %resources)))
self.threadControl = ThreadControl(thread=thread, args=args)
self.connect(self.threadControl, QtCore.SIGNAL("__updateProgressBar(int)"), self.__updateProgressBar)
self.threadControl.finished.connect(self.closeEvent)
self.threadControl.start()
#QtCore.pyqtSlot(int)
def __updateProgressBar(self,val):
self.ui.progressBar.setValue(val)
self.setWindowTitle("Processing: {0}%".format(val))
def closeEvent(self, QCloseEvent=None):
if self.threadControl.isRunning():
self.threadControl.stop()
self.threadControl.wait()
if QCloseEvent: QtGui.QDialog.closeEvent(self, QCloseEvent)
else: self.close()
def getResults(self):
return self.threadControl.resultDict
class ThreadControl(QtCore.QThread):
stopFlag = 0
def __init__(self, thread=None, args=None):
super(ThreadControl, self).__init__()
self.args = args
self.thread = thread
self.resultDict = []
def run(self):
threads = {}
queue = multiprocessing.Queue()
for arg in self.args:
process = multiprocessing.Process(target=self.thread, args=(arg, queue))
process.start()
threads[process] = 1 ## ACTIVE thread
# WAIT TILL ALL PROCESSES COMPLETE
completedThreads = 0
total = len(threads.keys())
while completedThreads != total:
if self.stopFlag:
for t in threads.keys():
if threads[t] == 1:
t.terminate()
t.join()
threads[t] = 0
completedThreads += 1
else:
for t in threads.keys():
if self.stopFlag: break ## Process threads termination
elif threads[t] == 1 and not t.is_alive():
threads[t] = 0
completedThreads += 1
self.resultDict.append(queue.get())
self.emit(QtCore.SIGNAL('__updateProgressBar(int)'),(completedThreads*100)/total)
sleep(0.5) ## Prevent CPU from overloading
def stop(self):
self.stopFlag=1
a job is being created using instance of CreateJob
CreateJob(thread=P4SyncLibrary, args=P4Libraries).exec_()
The only solution I could give is to pass p4 object to calling thread as argument so that p4 server connection can disconnect when user wants to cancel the job.
def P4SyncLibrary(p4, args, que):
syncType = args[0]
view = args[1]
try:
p4.run_sync(view)
except P4Exception:
for e in p4.errors:
print "SyncError: - %s" %e
que.put(None)
class ThreadControl(QtCore.QThread):
...
def run(self):
threads = {}
queue = multiprocessing.Queue()
for arg in self.args:
connection = P4CreateConnection(disable_tmp_cleanup=True)
if connection.connected():
process = multiprocessing.Process(target=self.thread, args=(connection, arg, queue))
process.start()
threads[process] = {
'isAlive': True,
'connection': connection
}
# WAIT TILL ALL PROCESSES COMPLETE
completedThreads = 0
total = len(threads.keys())
while completedThreads != total:
if self._stop:
for t in threads.keys():
if threads[t]['isAlive']:
threads[t]['connection'].disconnect()
t.terminate()
t.join()
threads[t]['isAlive'] = False
completedThreads += 1
else:
for t in threads.keys():
if self._stop: break ## Process threads termination
elif threads[t]['isAlive'] and not t.is_alive():
threads[t]['connection'].disconnect()
threads[t]['isAlive'] = False
completedThreads += 1
self.results.append(queue.get())
self.emit(QtCore.SIGNAL('__updateProgressBar(int)'),(completedThreads*100)/total)
sleep(0.5) ## Prevent CPU from overloading

Python sys.excepthook working only on main process but not on subprocesses

I have an app with some subprocess running and I have successfully set the sys.excepthook exception handling for the main process. Now, I want to set it for the same hook on the subprocesses. I would expect it to be as simple copying the exact lines of code I used on the main process and that's it but it didn't work.
Next is my code:
class Consumer(multiprocessing.Process):
def __init__(self, codec_status_queue, logger_queue):
multiprocessing.Process.__init__(self)
self.codec_status_queue = codec_status_queue
self.logger_queue = logger_queue
def run(self):
# Set default unhandled exceptions handler
uncaughtErrorHandler = UncaughtErrorHandler(self.logger_queue)
sys.excepthook = uncaughtErrorHandler.error_handler
1/0
class UncaughtErrorHandler(object):
def __init__(self, logger_queue, child_processes=None):
self.logger_queue = logger_queue
self.child_processes = child_processes
def error_handler(self, type, value, trace_back):
trace_formatted = "".join(traceback.format_tb(trace_back))
exeption_message = "Unhandled Exception:\n Type: %s\n Value: %s\n Line: %s\n Traceback:\n %s" % (type, value.message, trace_back.tb_lineno, trace_formatted)
logger_queue.put((LoggerThread.CRITICAL, exeption_message))
if self.child_processes:
self.stop_children()
# Stopping this process
sys.exit()
def stop_children(self):
num_children = len(self.child_processes)
logger_queue.put((LoggerThread.DEBUG, "Terminating child processes (%s)" % num_children))
for process in self.child_processes:
log_message = "Terminating %s with PID %s" % (process.name, process.pid)
logger_queue.put((LoggerThread.DEBUG, log_message))
process.terminate()
if __name__ == '__main__':
...
# Create processes and communication queues
codec_status_queue = multiprocessing.Queue()
num_consumers = multiprocessing.cpu_count() * 2
print 'Creating %d consumers' % num_consumers
consumers = [ Consumer(codec_status_queue, logger_queue)
for i in xrange(num_consumers) ]
# Set default unhandled exceptions handler
uncaughtErrorHandler = UncaughtErrorHandler(logger_queue, consumers)
sys.excepthook = uncaughtErrorHandler.error_handler
# Start processes
for consumer in consumers:
consumer.daemon = True
consumer.start()
If I put the 1/0 on the __main__ part the UncaughtErrorHandler catches the exception but when the 1/0 is put as shown above, it doesn't.
Maybe someone can tell me what am I doing wrong?
The following code was written for Python 3.x but can be adapted to work with Python 3.x instead. It provides an alternative solution to overriding sys.excepthook in child processes. A simple fix involves catching all exceptions and handing the data from sys.exc_info over to the exception handler. The main process could use a similar pattern for exceptions but retains the original design from your program. The example shown below should be a full working demonstration you can play around with and adapt to your needs.
#! /usr/bin/env python3
import logging
import multiprocessing
import queue
import sys
import threading
import time
import traceback
def main():
"""Demonstrate exception handling and logging in several processes."""
logger_queue = multiprocessing.Queue()
logger_thread = LoggerThread(logger_queue)
logger_thread.start()
try:
# Create processes and communication queues
codec_status_queue = multiprocessing.Queue()
num_consumers = multiprocessing.cpu_count() * 2
print('Creating {} consumers'.format(num_consumers))
consumers = [Consumer(codec_status_queue, logger_queue)
for _ in range(num_consumers)]
# Set default unhandled exceptions handler
uncaught_error_handler = UncaughtErrorHandler(logger_queue, consumers)
sys.excepthook = uncaught_error_handler.error_handler
# Start processes
for consumer in consumers:
consumer.start()
time.sleep(2)
finally:
logger_thread.shutdown()
def get_message(value):
"""Retrieve an exception's error message and return it."""
if hasattr(value, 'message'):
return value.message
if hasattr(value, 'args') and value.args:
return value.args[0]
class LoggerThread(threading.Thread):
"""Handle logging messages coming from various sources via a queue."""
CRITICAL = logging.CRITICAL
DEBUG = logging.DEBUG
def __init__(self, logger_queue):
"""Initialize an instance of the LoggerThread class."""
super().__init__()
self.logger_queue = logger_queue
self.mutex = threading.Lock()
self.running = False
def run(self):
"""Process messages coming through the queue until shutdown."""
self.running = True
while self.running:
try:
while True:
self.handle_message(*self.logger_queue.get(True, 0.1))
except queue.Empty:
pass
def handle_message(self, level, message):
"""Show the message while ensuring a guaranteed order on screen."""
with self.mutex:
print('Level:', level)
print('Message:', message)
print('=' * 80, flush=True)
def shutdown(self):
"""Signal the thread to exit once it runs out of messages."""
self.running = False
class Consumer(multiprocessing.Process):
"""Simulate a consumer process that handles data from a queue."""
def __init__(self, codec_status_queue, logger_queue):
"""Initialize an instance of the Consumer class."""
super().__init__()
self.codec_status_queue = codec_status_queue
self.logger_queue = logger_queue
self.daemon = True
def run(self):
"""Begin working as a consumer while handling any exceptions."""
# Set default unhandled exceptions handler
uncaught_error_handler = UncaughtErrorHandler(self.logger_queue)
try:
self.do_consumer_work()
except:
uncaught_error_handler.error_handler(*sys.exc_info())
def do_consumer_work(self):
"""Pretend to be doing the work of a consumer."""
junk = 1 / 0
print('Process', self.ident, 'calculated', junk)
class UncaughtErrorHandler:
"""Organize error handling to automatically terminate child processes."""
def __init__(self, logger_queue, child_processes=None):
"""Initialize an instance of the UncaughtErrorHandler class."""
self.logger_queue = logger_queue
self.child_processes = child_processes
def error_handler(self, kind, value, trace_back):
"""Record errors as they happen and terminate the process tree."""
trace_formatted = ''.join(traceback.format_tb(trace_back))
exception_message = ('Unhandled Exception:\n'
' Type: {}\n'
' Value: {}\n'
' Line: {}\n'
' Traceback:\n{}').format(
kind, get_message(value), trace_back.tb_lineno, trace_formatted)
self.logger_queue.put((LoggerThread.CRITICAL, exception_message))
if self.child_processes:
self.stop_children()
# Stopping this process
sys.exit()
def stop_children(self):
"""Terminate all children associated with this error handler."""
num_children = len(self.child_processes)
log_message = 'Terminating child processes({})'.format(num_children)
self.logger_queue.put((LoggerThread.DEBUG, log_message))
for process in self.child_processes:
log_message = 'Terminating {} with PID {}'.format(
process.name, process.pid)
self.logger_queue.put((LoggerThread.DEBUG, log_message))
process.terminate()
if __name__ == '__main__':
main()

Start python Process with output and timeout

I'm trying to find the way to start a new Process and get its output if it takes less than X seconds. If the process takes more time I would like to ignore the Process result, kill the Process and carry on.
I need to basically add the timer to the code below. Now sure if there's a better way to do it, I'm open to a different and better solution.
from multiprocessing import Process, Queue
def f(q):
# Ugly work
q.put(['hello', 'world'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print q.get()
p.join()
Thanks!
You may find the following module useful in your case:
Module
#! /usr/bin/env python3
"""Allow functions to be wrapped in a timeout API.
Since code can take a long time to run and may need to terminate before
finishing, this module provides a set_timeout decorator to wrap functions."""
__author__ = 'Stephen "Zero" Chappell ' \
'<stephen.paul.chappell#atlantis-zero.net>'
__date__ = '18 December 2017'
__version__ = 1, 0, 1
__all__ = [
'set_timeout',
'run_with_timeout'
]
import multiprocessing
import sys
import time
DEFAULT_TIMEOUT = 60
def set_timeout(limit=None):
"""Return a wrapper that provides a timeout API for callers."""
if limit is None:
limit = DEFAULT_TIMEOUT
_Timeout.validate_limit(limit)
def wrapper(entry_point):
return _Timeout(entry_point, limit)
return wrapper
def run_with_timeout(limit, polling_interval, entry_point, *args, **kwargs):
"""Execute a callable object and automatically poll for results."""
engine = set_timeout(limit)(entry_point)
engine(*args, **kwargs)
while engine.ready is False:
time.sleep(polling_interval)
return engine.value
def _target(queue, entry_point, *args, **kwargs):
"""Help with multiprocessing calls by being a top-level module function."""
# noinspection PyPep8,PyBroadException
try:
queue.put((True, entry_point(*args, **kwargs)))
except:
queue.put((False, sys.exc_info()[1]))
class _Timeout:
"""_Timeout(entry_point, limit) -> _Timeout instance"""
def __init__(self, entry_point, limit):
"""Initialize the _Timeout instance will all needed attributes."""
self.__entry_point = entry_point
self.__limit = limit
self.__queue = multiprocessing.Queue()
self.__process = multiprocessing.Process()
self.__timeout = time.monotonic()
def __call__(self, *args, **kwargs):
"""Begin execution of the entry point in a separate process."""
self.cancel()
self.__queue = multiprocessing.Queue(1)
self.__process = multiprocessing.Process(
target=_target,
args=(self.__queue, self.__entry_point) + args,
kwargs=kwargs
)
self.__process.daemon = True
self.__process.start()
self.__timeout = time.monotonic() + self.__limit
def cancel(self):
"""Terminate execution if possible."""
if self.__process.is_alive():
self.__process.terminate()
#property
def ready(self):
"""Property letting callers know if a returned value is available."""
if self.__queue.full():
return True
elif not self.__queue.empty():
return True
elif self.__timeout < time.monotonic():
self.cancel()
else:
return False
#property
def value(self):
"""Property that retrieves a returned value if available."""
if self.ready is True:
valid, value = self.__queue.get()
if valid:
return value
raise value
raise TimeoutError('execution timed out before terminating')
#property
def limit(self):
"""Property controlling what the timeout period is in seconds."""
return self.__limit
#limit.setter
def limit(self, value):
self.validate_limit(value)
self.__limit = value
#staticmethod
def validate_limit(value):
"""Verify that the limit's value is not too low."""
if value <= 0:
raise ValueError('limit must be greater than zero')
To use, see the following example that demonstrates its usage:
Example
from time import sleep
def main():
timeout_after_four_seconds = timeout(4)
# create copies of a function that have a timeout
a = timeout_after_four_seconds(do_something)
b = timeout_after_four_seconds(do_something)
c = timeout_after_four_seconds(do_something)
# execute the functions in separate processes
a('Hello', 1)
b('World', 5)
c('Jacob', 3)
# poll the functions to find out what they returned
results = [a, b, c]
polling = set(results)
while polling:
for process, name in zip(results, 'abc'):
if process in polling:
ready = process.ready
if ready is True: # if the function returned
print(name, 'returned', process.value)
polling.remove(process)
elif ready is None: # if the function took too long
print(name, 'reached timeout')
polling.remove(process)
else: # if the function is running
assert ready is False, 'ready must be True, False, or None'
sleep(0.1)
print('Done.')
def do_something(data, work):
sleep(work)
print(data)
return work
if __name__ == '__main__':
main()
Does the process you are running involve a loop?
If so you can get the timestamp prior to starting the loop and include an if statement within the loop with an sys.exit(); command terminating the script if the current timestamp differs from the recorded start time stamp by more than x seconds.
All you need to adapt the queue example from the docs to your case is to pass the timeout to the q.get() call and terminate the process on timeout:
from Queue import Empty
...
try:
print q.get(timeout=timeout)
except Empty: # no value, timeout occured
p.terminate()
q = None # the queue might be corrupted after the `terminate()` call
p.join()
Using a Pipe might be more lightweight otherwise the code is the same (you could use .poll(timeout), to find out whether there is a data to receive).

Categories

Resources