How to stop multiprocessing.Pool on Exception - python

In the code below, I have raised an exception during the first call, and yet it seems the exception is absorbed, and I still got all other processes executed, what's the problem? What I want is that whenever the first exception occurs, print it, and stop the multiprocessing pool directly.
def func(i):
if i==0:
raise Exception()
else:
time.sleep(1)
print(i)
num_workers = 4
pool = multiprocessing.Pool(num_workers)
try:
for i in range(4):
pool.apply_async(func,args=(i,))
except:
print("err")
pool.close()
pool.join()
The following edited code according to HTF
import multiprocessing
import time
if __name__ == '__main__':
def func(i):
if i == 0:
raise Exception()
else:
time.sleep(1)
print(i)
num_workers = 4
pool = multiprocessing.Pool(num_workers)
results = [pool.apply_async(func, args=(i,)) for i in range(4)]
try:
for result in results:
result.get()
except:
print("err")
pool.close()
pool.join()
gives output
err
1
2
3
where I expect only err

You just scheduled the tasks but you need to wait for the results:
results = [pool.apply_async(func,args=(i,)) for i in range(4)]
try:
for result in results:
result.get()
except:
print("err")
Update Wed 7 Apr 20:42:59 UTC 2021:
You can try something like this:
import time
from functools import partial
from multiprocessing import Pool
def func(i):
if i == 0:
raise Exception("something bad happened")
else:
time.sleep(1)
print(i)
def quit(pool, err):
print(f"ERROR: {err}")
pool.terminate()
def main():
pool = Pool()
partial_quit = partial(quit, pool)
for i in range(4):
pool.apply_async(func, args=(i,), error_callback=partial_quit)
pool.close()
pool.join()
if __name__ == "__main__":
main()
Test:
$ python test1.py
ERROR: something bad happened
If you need the return value back it may be actually easier to use bare processes and a queue:
import time
from multiprocessing import Process, Queue
PROCS = 4
def worker(q, i):
if i == 10:
print("error")
q.put_nowait("ERROR")
else:
time.sleep(1)
print(i)
q.put_nowait(i)
def main():
q = Queue()
procs = []
for i in range(PROCS):
p = Process(target=worker, args=(q, i))
p.start()
procs.append(p)
count = len(procs)
while count:
result = q.get()
if result == "ERROR":
for p in procs:
p.terminate()
break
print(f"Result for: {result}")
count -= 1
if __name__ == "__main__":
main()
Test:
$ python test2.py
0
2
1
3
Result for: 0
Result for: 2
Result for: 1
Result for: 3

Related

get results of terminated map_async of multiprocessing

I am running a script with multiprocessing map_async. what I need to do is to get the uncomplete result of AsyncResult object (assuming it already finished calculating some of the cases given) after terminating Pool with terminate(). using get() would just hang the script, how can I do this?
I know this can be done with apply_sync with some manipulation, but can it be done somehow with map_async?
working example of the situation:
import multiprocessing
import time
def example_run(i):
time.sleep(0.7)
return i
if __name__ == '__main__':
terminate = False
pool = multiprocessing.Pool(10)
result_async = pool.map_async(example_run,range(100))
i = 0
while True:
time.sleep(1.0)
if i == 70:
terminate = True
print(result_async.ready(),terminate)
if result_async.ready():
break
elif terminate:
pool.terminate()
break
i += 10
result = result_async.get() # The problem is here, it will just wait
print(result)
pool.close()
pool.join()
I found a solution to the problem; with some digging, AsyncResult._value seem to hold the values of the execution, with None in case it is not evaluated yet
import multiprocessing
import time
def example_run(i):
time.sleep(0.7)
return i
if __name__ == '__main__':
terminate = False
pool = multiprocessing.Pool(10)
result_async = pool.map_async(example_run,range(100))
i = 0
while True:
time.sleep(1.0)
if i == 70:
terminate = True
print(result_async.ready(),terminate)
if result_async.ready():
break
elif terminate:
pool.terminate()
break
i += 10
result = []
for value in result_async._value:
if value is not None:
result.append(value)
else:
result.append("failed")
print(result)
pool.close()
pool.join()

Why does "if keyboard.is_pressed('q'): " not execute in my code?

In my code, in the f(), "kbd.is_pressed('q'):" doesn't work. It doesn't print the message and the loop doesn't end when 'q' is pressed.
from multiprocessing import Process, Pipe
import multiprocessing as mp
import keyboard as kbd
x = 0
frames = []
def f(conn):
global x
while x == 0:
frames.append(1)
conn.send([42, None, 'hello', frames])
if kbd.is_pressed('q'):
print("Q was pressed.")
break
conn.close()
print("Pipe is closed!")
if __name__ == '__main__':
parent_conn, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
p.start()
print(mp.current_process)
print(parent_conn.recv()) # prints "[42, None, 'hello']"
f(child_conn)
print(str(frames))
p.join()
print("CPU's = " + str(mp.cpu_count()))
p.terminate()
print(mp.parent_process)

python multithreading with queue running in sequence not in parallel

why this code is not running in parallel, it take 20 seconds to run it, which means it is running in sequence. Thank you for your help.
import time
from queue import Queue
from threading import Thread
start = time.time()
def f():
time.sleep(0.5)
print("yes")
return 'yes'
def do_stuff(q):
while True:
output = q.get()
q.task_done()
q = Queue(maxsize=100)
for message_nbr in range(40):
q.put(f())
num_threads = 10
for i in range(num_threads):
worker = Thread(target=do_stuff, args=(q, ))
worker.setDaemon(True)
worker.start()
q.join()
print("time: ", time.time() - start) # ~20 seconds
The answer lies here:
for message_nbr in range(40):
q.put(f())
You're putting 40 instances of None into your queue, because you're calling f() which returns None instead of passing f (the function object) in. This block is what is taking 20 seconds to run!
Changing this code
def do_stuff(q):
while True:
output = q.get()
q.task_done()
to this
def do_stuff(q):
while True:
output = q.get()
output()
q.task_done()
is also necessary (you need to call the function!)
Final:
import time
from queue import Queue
from threading import Thread
start = time.time()
def f():
time.sleep(0.5)
print("yes")
return 'yes'
def do_stuff(q):
while True:
output = q.get()
output()
q.task_done()
q = Queue(maxsize=100)
for message_nbr in range(40):
q.put(f)
num_threads = 10
for i in range(num_threads):
worker = Thread(target=do_stuff, args=(q, ))
worker.setDaemon(True)
worker.start()
q.join()
print("time: ", time.time() - start) # time: 2.183439254760742
this one works !
start = time.time()
def f(m):
time.sleep(0.5)
print("yes")
return 'yes'
def do_stuff(q):
while True:
output = q.get()
final_result = f(output)
q.task_done()
q = Queue(maxsize=0)
for message_nbr in range(10):
# q.put(f())
q.put(message_nbr)
num_threads = 10
for i in range(num_threads):
worker = Thread(target=do_stuff, args=(q, ))
worker.setDaemon(True)
worker.start()
q.join()
print("time: ", time.time() - start)

Why does terminate() of multiprocessing.pool.Threadpool hang?

I want to stop asynchronous multiprocessing jobs with KeyboardInterrupt. But sometimes hang occurred when call terminate.
from multiprocessing.pool import ThreadPool
import multiprocessing
import time
import queue
import inspect
def worker(index):
print('{}: start'.format(index))
for i in range(5):
time.sleep(1)
print('{}: stop'.format(index))
return index, True
def wrapper(index, stopEvent, qResult):
if stopEvent.is_set() is True:
return index, False
try:
result = worker(index)
except:
print('*' * 50)
return index, False
else:
if result[1] == True:
qResult.put(result)
return result
def watcher(qResult, stopEvent):
cntQ = 0
while True:
try:
result = qResult.get(timeout=10)
qResult.task_done()
except queue.Empty:
if stopEvent.is_set() is True:
break
except KeyboardInterrupt:
stopEvent.set()
else:
cntQ += 1
print(result)
qResult.join()
qResult.close()
print('qResult count:', cntQ)
def main():
stopEvent = multiprocessing.Event()
qResult = multiprocessing.JoinableQueue()
qResult.cancel_join_thread()
watch = multiprocessing.Process(target=watcher, args=(qResult, stopEvent))
watch.start()
pool = ThreadPool()
lsRet = []
for i in range(100000):
try:
ret = pool.apply_async(wrapper, args=(i, stopEvent, qResult))
lsRet.append(ret)
except KeyboardInterrupt:
stopEvent.set()
time.sleep(1)
break
if i+1 % 10 == 0:
time.sleep(2)
cntTotal = len(lsRet)
cntRet = 0
for ret in lsRet:
if stopEvent.is_set():
break
try:
ret.get()
except KeyboardInterrupt:
stopEvent.set()
time.sleep(1)
else:
cntRet += 1
if stopEvent.is_set() is False:
stopEvent.set()
print(inspect.stack()[0][1:4])
if watch.is_alive() is True:
watch.join()
print(inspect.stack()[0][1:4])
pool.terminate() # Why hang??????????
print(inspect.stack()[0][1:4])
pool.join()
print(cntTotal, cntRet)
if __name__ == '__main__':
main()
main() invokes a watcher() thread and many wrapper() threads asynchronously using multiprocessing.pool.Threadpool.
wrapper() calls worker() and put its result to queue.
watcher() watches above queue of results.
If ctrl-c pressed, stopEvent is set.
When stopEvent is set, wrapper() stops calling worker(), and Watcher() indicates queue.Empty and stopEvent and exits loop.
Finally main() calls terminate() of pool.
Sometimes processes done well, but sometimes hang. It's different each time.
You should put the code in try except block and catch a built-in exception KeyboardInterrupt see the example here Capture keyboardinterrupt

Exit multiprocesses gracefully in python3

I would like to exit the program gracefully on Ctrl+C / SIGINT or on user input. If possible the terminal should prompt something like; "Hit enter to terminate".
Code to be executed by Python 3.6
def worker(process):
i = 0
while True:
print('Process %d count %d' % (process, i))
i += 1
def main():
available_num_cores = multiprocessing.cpu_count()
use_num_cores = available_num_cores - 1 if available_num_cores > 1 else 1
print('Using %d cores' % use_num_cores)
pool = multiprocessing.Pool(use_num_cores)
for i in range(0, use_num_cores):
pool.apply_async(worker, args=(i,))
pool.close()
pool.join()
if __name__ == '__main__':
main()
Accepted answer for this question Catch Ctrl+C / SIGINT and exit multiprocesses gracefully in python. Isn't working, it fail with error:
Process SpawnPoolWorker-1:
Process 0 count 1572
Process SpawnPoolWorker-2:
Process 1 count 1472
Process SpawnPoolWorker-3:
Traceback (most recent call last):
Any help would be appreciated. Thanks!
You need to make sure the SIGINT is ignored by the children processes.
Then you just either wait for user input or for a CTRL+C to be issued.
def initializer():
"""Ignore SIGINT in child workers."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def main():
try:
pool = multiprocessing.Pool(use_num_cores, initializer=initializer)
for i in range(0, use_num_cores):
pool.apply_async(worker, args=(i,))
pool.close()
input("Hit enter to terminate")
except KeyboardInterrupt:
print("CTRL+C")
finally:
pool.terminate()
pool.join()
print("Bye have a great time!")

Categories

Resources