Python multprocessing callback - python

Using this post as inspiration, I am trying to add a callback. I am using GLib.add_timeout to poll for the result, as I want to use it in a Gtk app. However, the main_quit() is not called properly, and thus the following code hangs after finishing:
import multiprocessing
import queue
import collections
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GLib, Gtk
Msg = collections.namedtuple("Msg", ["event", "args"])
class BaseProcess(multiprocessing.Process):
"A process backed by internal queues for simple messaging"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.requests = multiprocessing.Queue()
self.responses = multiprocessing.Queue()
def send(self, event, *args, finished_callback=None):
"Puts the event and args as a `Msg` on the requests queue"
msg = Msg(event, args)
self.requests.put(msg)
GLib.timeout_add(100, self._monitor_process, finished_callback)
def run(self):
while True:
event, args = self.requests.get()
if event == "quit":
break
handler = getattr(self, "do_%s" % event, None)
if not handler:
raise NotImplementedError("Process has no handler for [%s]" % event)
msg = handler(*args)
self.responses.put(msg)
def _monitor_process(self, finished_callback):
print(f"in _monitor_process {finished_callback}", flush=True)
try:
result = self.responses.get(False)
if finished_callback is not None:
finished_callback(result)
except queue.Empty:
return GLib.SOURCE_CONTINUE
return GLib.SOURCE_REMOVE
class MyProcess(BaseProcess):
"test process class"
def do_sum(self, arg1, arg2):
"test method"
print(f"do_sum {arg1 + arg2}", flush=True)
return arg1 + arg2
def finished_callback(result):
print(f"result {result}", flush=True)
Gtk.main_quit()
if __name__ == "__main__":
process = MyProcess()
process.start()
process.send('sum', 1, 2, finished_callback=finished_callback)
Gtk.main()
How can I prevent the code from hanging?
Edit: I see from this page that others have noted problems. How can I build a Gtk-based app to control long-running processes like scanners without blocking the main thread?

The threading docs for pyGObject lead me to the answer, which is to add process.daemon = True before starting the process:
if __name__ == "__main__":
process = MyProcess()
process.daemon = True
process.start()
process.send('sum', 1, 2, finished_callback=finished_callback)
Gtk.main()

Related

Send data to Python thread, then read a response using Queue

It's quite easy to send or receive data through threads using Queue's module when doing each thing at a time, but I didn't figure out how to send something to a thread, then expect for a return properly.
In the below example, I was expecting to send something to thread in order to be processed, then harvest the result, but the t.queue.get() in the main function receives what what just sent above instead of waiting for the thread to return. How can I get around it?
#!/usr/bin/env python3
from threading import Thread
from queue import Queue
class MyThread(Thread):
queue:Queue
def __init__(self, *args, **kwargs):
super().__init__()
self.queue = Queue()
self.daemon = True
# receives a name, then prints "Hello, name!"
def run(self):
while True:
val = self.queue.get()
if not val:
break
self.queue.put(f'Hello, {val}!')
def main():
t = MyThread()
t.start()
# sends string to thread
t.queue.put('Jurandir')
# expects to receive "Hello, Jurandir!",
# but "Jurandir" is immediately returned
ret = t.queue.get()
print(ret)
if __name__ == '__main__':
main()
Thing is that you are getting the alleged result immediately from the queue, and the worker has still not added the result. You can split into an "input queue" and a "results queue". And then wait in the main thread until there's some output in the queue.
#!/usr/bin/env python3
from threading import Thread, Lock
from queue import Queue
class MyThread(Thread):
def __init__(self, *args, **kwargs):
super().__init__()
self.input_queue = Queue()
self.results_queue = Queue()
self.daemon = True
# receives a name, then prints "Hello, name!"
def run(self):
while True:
val = self.input_queue.get()
if not val:
break
self.results_queue.put(f'Hello, {val}!')
def main():
t = MyThread()
t.start()
# sends string to thread
t.input_queue.put('Jurandir')
ret = t.results_queue.get()
while ret is None:
ret = t.results_queue.get()
print(ret)
if __name__ == '__main__':
main()

Python multiprocessing Queue child class losing attributes in process

I am trying to implement a child class from the multiprocessing Queue in python. The child class contains a simple Boolean flag "ready". When I send the queue to a new process, the ready attribute is disappearing. The following code demonstrates the problem:
import multiprocessing
import multiprocessing.queues
class ReadyQueue(multiprocessing.queues.Queue):
def __init__(self, ctx, *args, **kwargs):
super(ReadyQueue, self).__init__(ctx=ctx, *args, **kwargs)
self.ready = False
def ready_queue(*args, **kwargs):
return ReadyQueue(ctx=multiprocessing.get_context(), *args, **kwargs)
def foo(q):
print(q.ready)
if __name__ == "__main__":
my_queue = ready_queue()
print(my_queue.ready)
p = multiprocessing.Process(target=foo, args=(my_queue,))
p.start()
p.join()
With the output:
False
Process Process-1:
Traceback (most recent call last):
File "C:\Users\acre018\Anaconda3\envs\EIT_Qt\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "C:\Users\acre018\Anaconda3\envs\EIT_Qt\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\acre018\github\EIT_Qt\Experiments\ready_queue_test.py", line 16, in foo
print(q.ready)
AttributeError: 'ReadyQueue' object has no attribute 'ready'
I implemented this workaround:
import multiprocessing
from queue import Empty
import time
import ctypes
class ReadyQueue:
def __init__(self, *args, **kwargs):
self.queue = multiprocessing.Queue(*args, **kwargs)
self._ready = multiprocessing.Value(ctypes.c_bool, False)
def set_ready(self):
self._ready.value = True
def set_not_ready(self):
self._ready.value = False
self.clear()
def is_ready(self):
return self._ready.value
def clear(self):
try:
while True:
self.queue.get(block=False)
except Empty:
pass
def get(self, block=True, timeout=None):
return self.queue.get(block, timeout)
def put(self, obj, block=True, timeout=None):
return self.queue.put(obj, block, timeout)
def full(self):
return self.queue.full()
def empty(self):
return self.queue.empty()
def qsize(self):
return self.queue.qsize()
def foo(q):
while q.is_ready():
time.sleep(1)
q.put("hello from foo")
print("q no longer ready, foo loop finished")
if __name__ == "__main__":
my_queue = ReadyQueue()
my_queue.set_ready()
p = multiprocessing.Process(target=foo, args=(my_queue,))
p.start()
for i in range(2):
print(my_queue.get())
time.sleep(2)
print("my_queue._ready = %s, qsize: %d. Setting not ready.." % (str(my_queue.is_ready()), my_queue.qsize()))
my_queue.set_not_ready()
print("my_queue._ready = %s, qusize: %d" % (str(my_queue.is_ready()), my_queue.qsize()))
With the output:
C:\Users\acre018\Anaconda3\envs\test_pyqt\python.exe C:/Users/acre018/github/EIT_Qt/Experiments/ready_queue_test2.py
hello from foo
hello from foo
my_queue._ready = True, qsize: 2. Setting not ready..
my_queue._ready = False, qusize: 0
q no longer ready, foo loop finished
Process finished with exit code 0
The workaround is to have my ReadyQueue class not inherit from multiprocessing.queues.Queue but have a queue as an attribute. For convenience I implemented the methods that I need from queue, and they just pass through to the queue attribute. I also implemented a clear method.
Note that in my first example I neglected to make self.ready a multiprocessing.Value, so wouldn't have been able to edit it across processes, but I tested after fixing that and it was not the source of the issue.

Worker process is not being called in simple example?

Here's my simple example code:
# -*- coding: utf-8 -*-
import multiprocessing
import time
def some_function():
return "hello!"
def get_address_type(address):
return 'tcp'
class PseudoThing():
def __init__(self, address):
self.address = address
self.server = None
def attach(self):
if get_address_type(self.address) == 'tcp':
ip, port = self.address.split(':', 1)
self.server = some_function()
else:
self.server = some_function()
def worker(self):
print("in worker!")
for i in range(10):
print("I'm a worker doing worker things...")
self.attach()
if self.server:
return True
time.sleep(0.2)
def parent(self):
print("I'm a parent doing parent things!")
def start_process(self):
p = multiprocessing.Process(target=self.worker)
print("starting process")
p.start()
return p
def main():
nvr = PseudoThing("kitty")
p = nvr.start_process()
p.join()
print("__name__ = {}".format(__name__))
if __name__ == "__main__":
print("doing main!")
main()
However, nothing is getting printed out from worker, as I would expect. Instead, this is the output of the program:
__name__ = __main__
doing main!
starting process
I am suspecting this has something to do with the class structure (I recall having issues with this on Python 2), but I am not entirely sure. Where can I add more print statements to figure what's going on? Where are things going wrong?

Python multiprocessing: Kill producer and consumer processes with KeyboardInterrupt

I want the customer and producer processes to stop in the following python script if the keyboard shortcut CTRL+C is performed. But the processes do not stop - the keyboard interrupt is not passed to them. Also the except block of the main process is never entered.
import time
import multiprocessing as mp
from multiprocessing.managers import SyncManager
import signal
class Consumer(mp.Process):
def __init__(self, **kwargs):
mp.Process.__init__(self, **kwargs)
def run(self):
proc_name = self.name
try:
while True:
print("{}".format(proc_name))
time.sleep(3)
except KeyboardInterrupt:
print("{} stopped".format(proc_name)) # never printed
return
class Producer(mp.Process):
def __init__(self, **kwargs):
mp.Process.__init__(self, **kwargs)
def run(self):
try:
while True:
time.sleep(3)
print("Producer here.")
except KeyboardInterrupt:
print("Producer stopped.") # never printed
return
def main():
def __init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
print('init') # not printed!!??
# manager = SyncManager() # does not change anything
# manager.start(__init_worker)
consumers = [Consumer(target=__init_worker) for i in xrange(3)]
producer = Producer(target=__init_worker)
producer.daemon = True # does not change anything
producer.start()
for c in consumers:
c.daemon = True
c.start()
try:
producer.join()
for c in consumers:
c.join()
except Exception as e:
print('STOP') # never printed
raise e
if __name__ == '__main__':
main()
There might be also a solution for my task by using multiprocesing.Pool for the customers and let the main process work as producer, but I would like to know why my implementation is not working as it is intended to and what I need to adjust.
I realised that __init_worker seems to be not executed (makes no difference if it is located outside of main). Maybe the reason for not passing KeyboardInterrupt to the customer and producer processes?
Based on eryksun's comments I improved my code and use multiprocessing.Event now. And the script is now working like expected. I also removed some lines, which I think are not necessary any more. Since I did not find any similar solution when searching the web, here my code comes:
import time
import multiprocessing as mp
class Consumer(mp.Process):
def __init__(self, quit_event, **kwargs):
mp.Process.__init__(self, **kwargs)
self.quit_event = quit_event
def run(self):
proc_name = self.name
while not self.quit_event.is_set():
print("{}".format(proc_name))
time.sleep(3)
print("{} stopped".format(proc_name))
return
class Producer(mp.Process):
def __init__(self, quit_event, **kwargs):
mp.Process.__init__(self, **kwargs)
self.quit_event = quit_event
def run(self):
while not self.quit_event.is_set():
print("Producer here.")
time.sleep(3)
print("Producer stopped")
return
def main():
quit_event = mp.Event()
consumers = [Consumer(quit_event) for i in xrange(3)]
producer = Producer(quit_event)
producer.start()
for c in consumers:
c.start()
try:
producer.join()
for c in consumers:
c.join()
except KeyboardInterrupt as e:
print('\nSTOP')
quit_event.set()
except Exception as e:
quit_event.set()
raise e
finally:
producer.terminate()
producer.join()
for c in consumers:
c.terminate()
c.join()
if __name__ == '__main__':
main()
Hoping, that it helps somebody.
Edit: Swaped terminate and join statements.

Python sys.excepthook working only on main process but not on subprocesses

I have an app with some subprocess running and I have successfully set the sys.excepthook exception handling for the main process. Now, I want to set it for the same hook on the subprocesses. I would expect it to be as simple copying the exact lines of code I used on the main process and that's it but it didn't work.
Next is my code:
class Consumer(multiprocessing.Process):
def __init__(self, codec_status_queue, logger_queue):
multiprocessing.Process.__init__(self)
self.codec_status_queue = codec_status_queue
self.logger_queue = logger_queue
def run(self):
# Set default unhandled exceptions handler
uncaughtErrorHandler = UncaughtErrorHandler(self.logger_queue)
sys.excepthook = uncaughtErrorHandler.error_handler
1/0
class UncaughtErrorHandler(object):
def __init__(self, logger_queue, child_processes=None):
self.logger_queue = logger_queue
self.child_processes = child_processes
def error_handler(self, type, value, trace_back):
trace_formatted = "".join(traceback.format_tb(trace_back))
exeption_message = "Unhandled Exception:\n Type: %s\n Value: %s\n Line: %s\n Traceback:\n %s" % (type, value.message, trace_back.tb_lineno, trace_formatted)
logger_queue.put((LoggerThread.CRITICAL, exeption_message))
if self.child_processes:
self.stop_children()
# Stopping this process
sys.exit()
def stop_children(self):
num_children = len(self.child_processes)
logger_queue.put((LoggerThread.DEBUG, "Terminating child processes (%s)" % num_children))
for process in self.child_processes:
log_message = "Terminating %s with PID %s" % (process.name, process.pid)
logger_queue.put((LoggerThread.DEBUG, log_message))
process.terminate()
if __name__ == '__main__':
...
# Create processes and communication queues
codec_status_queue = multiprocessing.Queue()
num_consumers = multiprocessing.cpu_count() * 2
print 'Creating %d consumers' % num_consumers
consumers = [ Consumer(codec_status_queue, logger_queue)
for i in xrange(num_consumers) ]
# Set default unhandled exceptions handler
uncaughtErrorHandler = UncaughtErrorHandler(logger_queue, consumers)
sys.excepthook = uncaughtErrorHandler.error_handler
# Start processes
for consumer in consumers:
consumer.daemon = True
consumer.start()
If I put the 1/0 on the __main__ part the UncaughtErrorHandler catches the exception but when the 1/0 is put as shown above, it doesn't.
Maybe someone can tell me what am I doing wrong?
The following code was written for Python 3.x but can be adapted to work with Python 3.x instead. It provides an alternative solution to overriding sys.excepthook in child processes. A simple fix involves catching all exceptions and handing the data from sys.exc_info over to the exception handler. The main process could use a similar pattern for exceptions but retains the original design from your program. The example shown below should be a full working demonstration you can play around with and adapt to your needs.
#! /usr/bin/env python3
import logging
import multiprocessing
import queue
import sys
import threading
import time
import traceback
def main():
"""Demonstrate exception handling and logging in several processes."""
logger_queue = multiprocessing.Queue()
logger_thread = LoggerThread(logger_queue)
logger_thread.start()
try:
# Create processes and communication queues
codec_status_queue = multiprocessing.Queue()
num_consumers = multiprocessing.cpu_count() * 2
print('Creating {} consumers'.format(num_consumers))
consumers = [Consumer(codec_status_queue, logger_queue)
for _ in range(num_consumers)]
# Set default unhandled exceptions handler
uncaught_error_handler = UncaughtErrorHandler(logger_queue, consumers)
sys.excepthook = uncaught_error_handler.error_handler
# Start processes
for consumer in consumers:
consumer.start()
time.sleep(2)
finally:
logger_thread.shutdown()
def get_message(value):
"""Retrieve an exception's error message and return it."""
if hasattr(value, 'message'):
return value.message
if hasattr(value, 'args') and value.args:
return value.args[0]
class LoggerThread(threading.Thread):
"""Handle logging messages coming from various sources via a queue."""
CRITICAL = logging.CRITICAL
DEBUG = logging.DEBUG
def __init__(self, logger_queue):
"""Initialize an instance of the LoggerThread class."""
super().__init__()
self.logger_queue = logger_queue
self.mutex = threading.Lock()
self.running = False
def run(self):
"""Process messages coming through the queue until shutdown."""
self.running = True
while self.running:
try:
while True:
self.handle_message(*self.logger_queue.get(True, 0.1))
except queue.Empty:
pass
def handle_message(self, level, message):
"""Show the message while ensuring a guaranteed order on screen."""
with self.mutex:
print('Level:', level)
print('Message:', message)
print('=' * 80, flush=True)
def shutdown(self):
"""Signal the thread to exit once it runs out of messages."""
self.running = False
class Consumer(multiprocessing.Process):
"""Simulate a consumer process that handles data from a queue."""
def __init__(self, codec_status_queue, logger_queue):
"""Initialize an instance of the Consumer class."""
super().__init__()
self.codec_status_queue = codec_status_queue
self.logger_queue = logger_queue
self.daemon = True
def run(self):
"""Begin working as a consumer while handling any exceptions."""
# Set default unhandled exceptions handler
uncaught_error_handler = UncaughtErrorHandler(self.logger_queue)
try:
self.do_consumer_work()
except:
uncaught_error_handler.error_handler(*sys.exc_info())
def do_consumer_work(self):
"""Pretend to be doing the work of a consumer."""
junk = 1 / 0
print('Process', self.ident, 'calculated', junk)
class UncaughtErrorHandler:
"""Organize error handling to automatically terminate child processes."""
def __init__(self, logger_queue, child_processes=None):
"""Initialize an instance of the UncaughtErrorHandler class."""
self.logger_queue = logger_queue
self.child_processes = child_processes
def error_handler(self, kind, value, trace_back):
"""Record errors as they happen and terminate the process tree."""
trace_formatted = ''.join(traceback.format_tb(trace_back))
exception_message = ('Unhandled Exception:\n'
' Type: {}\n'
' Value: {}\n'
' Line: {}\n'
' Traceback:\n{}').format(
kind, get_message(value), trace_back.tb_lineno, trace_formatted)
self.logger_queue.put((LoggerThread.CRITICAL, exception_message))
if self.child_processes:
self.stop_children()
# Stopping this process
sys.exit()
def stop_children(self):
"""Terminate all children associated with this error handler."""
num_children = len(self.child_processes)
log_message = 'Terminating child processes({})'.format(num_children)
self.logger_queue.put((LoggerThread.DEBUG, log_message))
for process in self.child_processes:
log_message = 'Terminating {} with PID {}'.format(
process.name, process.pid)
self.logger_queue.put((LoggerThread.DEBUG, log_message))
process.terminate()
if __name__ == '__main__':
main()

Categories

Resources