Why does terminate() of multiprocessing.pool.Threadpool hang? - python

I want to stop asynchronous multiprocessing jobs with KeyboardInterrupt. But sometimes hang occurred when call terminate.
from multiprocessing.pool import ThreadPool
import multiprocessing
import time
import queue
import inspect
def worker(index):
print('{}: start'.format(index))
for i in range(5):
time.sleep(1)
print('{}: stop'.format(index))
return index, True
def wrapper(index, stopEvent, qResult):
if stopEvent.is_set() is True:
return index, False
try:
result = worker(index)
except:
print('*' * 50)
return index, False
else:
if result[1] == True:
qResult.put(result)
return result
def watcher(qResult, stopEvent):
cntQ = 0
while True:
try:
result = qResult.get(timeout=10)
qResult.task_done()
except queue.Empty:
if stopEvent.is_set() is True:
break
except KeyboardInterrupt:
stopEvent.set()
else:
cntQ += 1
print(result)
qResult.join()
qResult.close()
print('qResult count:', cntQ)
def main():
stopEvent = multiprocessing.Event()
qResult = multiprocessing.JoinableQueue()
qResult.cancel_join_thread()
watch = multiprocessing.Process(target=watcher, args=(qResult, stopEvent))
watch.start()
pool = ThreadPool()
lsRet = []
for i in range(100000):
try:
ret = pool.apply_async(wrapper, args=(i, stopEvent, qResult))
lsRet.append(ret)
except KeyboardInterrupt:
stopEvent.set()
time.sleep(1)
break
if i+1 % 10 == 0:
time.sleep(2)
cntTotal = len(lsRet)
cntRet = 0
for ret in lsRet:
if stopEvent.is_set():
break
try:
ret.get()
except KeyboardInterrupt:
stopEvent.set()
time.sleep(1)
else:
cntRet += 1
if stopEvent.is_set() is False:
stopEvent.set()
print(inspect.stack()[0][1:4])
if watch.is_alive() is True:
watch.join()
print(inspect.stack()[0][1:4])
pool.terminate() # Why hang??????????
print(inspect.stack()[0][1:4])
pool.join()
print(cntTotal, cntRet)
if __name__ == '__main__':
main()
main() invokes a watcher() thread and many wrapper() threads asynchronously using multiprocessing.pool.Threadpool.
wrapper() calls worker() and put its result to queue.
watcher() watches above queue of results.
If ctrl-c pressed, stopEvent is set.
When stopEvent is set, wrapper() stops calling worker(), and Watcher() indicates queue.Empty and stopEvent and exits loop.
Finally main() calls terminate() of pool.
Sometimes processes done well, but sometimes hang. It's different each time.

You should put the code in try except block and catch a built-in exception KeyboardInterrupt see the example here Capture keyboardinterrupt

Related

How to stop threads using Queue()

I have a program(python 3.9.10) that has a read queue and a write queue. One thread reads and once read, sends to the write queue and another thread writes.
All works fine unless there is an error. If there is, the threads do not stop.
In the following code I am simulating an error being detected in the read thread and trying to stop the threads from reading/writing so the program exits however the program/threads stay active and the program never finishes. If I remove the error simulation code, the threads stop and the program finishes.
I wish to handle the errors WITHIN the threads and if need be, stop the threads/program without throwing an error up
What am I doing wrong? Thanks
Here is a working example of my issue:
import pandas as pd
import datetime
import traceback
from queue import Queue
from threading import Thread
import time
dlQueue = Queue()
writeQueue = Queue()
dlQDone = False
errorStop = False
def log(text):
text = datetime.datetime.now().strftime("%Y/%m/%d, %H:%M:%S ") + text
print(text)
def errorBreak():
global dlQueue
global writeQueue
global errorStop
global dlQDone
dlQueue = Queue()
writeQueue = Queue()
errorStop = True
dlQDone = True
def downloadTable(t, q):
global dlQDone
global errorStop
while True:
if errorStop:
return
nextQ = q.get()
log("READING: " + nextQ)
writeQueue.put("Writing " + nextQ)
log("DONE READING: " + nextQ)
####sumulating an error and need to exit threads###
if nextQ == "Read 7":
log("Breaking Read")
errorBreak()
return
###################################################
q.task_done()
if q.qsize() == 0:
log("Download QUEUE finished")
dlQDone = True
return
def writeTable(t, q):
global errorStop
global dlQDone
while True:
if errorStop:
log("Error Stop return")
return
nextQ = q.get()
log("WRITING: " + nextQ)
log("DONE WRITING: " + nextQ)
q.task_done()
if dlQDone:
if q.qsize() == 0:
log("Writing QUEUE finished")
return
try:
log("PROCESS STARTING!!")
for i in range(10):
dlQueue.put("Read " + str(i))
startTime = time.time()
log("Starting threaded pull....")
dlWorker = Thread(
target=downloadTable,
args=(
"DL",
dlQueue,
),
)
dlWorker.start()
writeWorker = Thread(
target=writeTable,
args=(
"Write",
writeQueue,
),
)
writeWorker.start()
dlQueue.join()
writeQueue.join()
log(f"Finished thread in {str(time.time() - startTime)} seconds") # CANNOT GET HERE
log("Threads: " + str(dlWorker.is_alive()) + str(writeWorker.is_alive()))
except Exception as error:
log(error)
log(traceback.format_exc())
If I understood you correctly, you want to stop both threads in case there's some error that warrants it; you can do that with a threading.Event, and changing your queue reads to have a timeout.
import datetime
import time
import queue
import threading
dlQueue = queue.Queue()
writeQueue = queue.Queue()
stop_event = threading.Event()
def log(text):
text = datetime.datetime.now().strftime("%Y/%m/%d, %H:%M:%S ") + text
print(text)
def downloadTable(t: str, q: queue.Queue):
while not stop_event.is_set():
try:
nextQ = q.get(timeout=1)
except queue.Empty:
continue
log("READING: " + nextQ)
writeQueue.put("Writing " + nextQ)
log("DONE READING: " + nextQ)
if nextQ == "7":
log("Breaking Read")
stop_event.set()
break
q.task_done()
log("Download thread exiting")
def writeTable(t, q):
while not stop_event.is_set():
try:
nextQ = q.get(timeout=1)
except queue.Empty:
continue
log("WRITING: " + nextQ)
log("DONE WRITING: " + nextQ)
q.task_done()
log("Write thread exiting")
def main():
log("PROCESS STARTING!!")
for i in range(10):
dlQueue.put(f"{i}")
log("Starting threaded pull....")
dlWorker = threading.Thread(
target=downloadTable,
args=(
"DL",
dlQueue,
),
)
dlWorker.start()
writeWorker = threading.Thread(
target=writeTable,
args=(
"Write",
writeQueue,
),
)
writeWorker.start()
dlWorker.join()
writeWorker.join()
if __name__ == "__main__":
main()

How to stop multiprocessing.Pool on Exception

In the code below, I have raised an exception during the first call, and yet it seems the exception is absorbed, and I still got all other processes executed, what's the problem? What I want is that whenever the first exception occurs, print it, and stop the multiprocessing pool directly.
def func(i):
if i==0:
raise Exception()
else:
time.sleep(1)
print(i)
num_workers = 4
pool = multiprocessing.Pool(num_workers)
try:
for i in range(4):
pool.apply_async(func,args=(i,))
except:
print("err")
pool.close()
pool.join()
The following edited code according to HTF
import multiprocessing
import time
if __name__ == '__main__':
def func(i):
if i == 0:
raise Exception()
else:
time.sleep(1)
print(i)
num_workers = 4
pool = multiprocessing.Pool(num_workers)
results = [pool.apply_async(func, args=(i,)) for i in range(4)]
try:
for result in results:
result.get()
except:
print("err")
pool.close()
pool.join()
gives output
err
1
2
3
where I expect only err
You just scheduled the tasks but you need to wait for the results:
results = [pool.apply_async(func,args=(i,)) for i in range(4)]
try:
for result in results:
result.get()
except:
print("err")
Update Wed 7 Apr 20:42:59 UTC 2021:
You can try something like this:
import time
from functools import partial
from multiprocessing import Pool
def func(i):
if i == 0:
raise Exception("something bad happened")
else:
time.sleep(1)
print(i)
def quit(pool, err):
print(f"ERROR: {err}")
pool.terminate()
def main():
pool = Pool()
partial_quit = partial(quit, pool)
for i in range(4):
pool.apply_async(func, args=(i,), error_callback=partial_quit)
pool.close()
pool.join()
if __name__ == "__main__":
main()
Test:
$ python test1.py
ERROR: something bad happened
If you need the return value back it may be actually easier to use bare processes and a queue:
import time
from multiprocessing import Process, Queue
PROCS = 4
def worker(q, i):
if i == 10:
print("error")
q.put_nowait("ERROR")
else:
time.sleep(1)
print(i)
q.put_nowait(i)
def main():
q = Queue()
procs = []
for i in range(PROCS):
p = Process(target=worker, args=(q, i))
p.start()
procs.append(p)
count = len(procs)
while count:
result = q.get()
if result == "ERROR":
for p in procs:
p.terminate()
break
print(f"Result for: {result}")
count -= 1
if __name__ == "__main__":
main()
Test:
$ python test2.py
0
2
1
3
Result for: 0
Result for: 2
Result for: 1
Result for: 3

get results of terminated map_async of multiprocessing

I am running a script with multiprocessing map_async. what I need to do is to get the uncomplete result of AsyncResult object (assuming it already finished calculating some of the cases given) after terminating Pool with terminate(). using get() would just hang the script, how can I do this?
I know this can be done with apply_sync with some manipulation, but can it be done somehow with map_async?
working example of the situation:
import multiprocessing
import time
def example_run(i):
time.sleep(0.7)
return i
if __name__ == '__main__':
terminate = False
pool = multiprocessing.Pool(10)
result_async = pool.map_async(example_run,range(100))
i = 0
while True:
time.sleep(1.0)
if i == 70:
terminate = True
print(result_async.ready(),terminate)
if result_async.ready():
break
elif terminate:
pool.terminate()
break
i += 10
result = result_async.get() # The problem is here, it will just wait
print(result)
pool.close()
pool.join()
I found a solution to the problem; with some digging, AsyncResult._value seem to hold the values of the execution, with None in case it is not evaluated yet
import multiprocessing
import time
def example_run(i):
time.sleep(0.7)
return i
if __name__ == '__main__':
terminate = False
pool = multiprocessing.Pool(10)
result_async = pool.map_async(example_run,range(100))
i = 0
while True:
time.sleep(1.0)
if i == 70:
terminate = True
print(result_async.ready(),terminate)
if result_async.ready():
break
elif terminate:
pool.terminate()
break
i += 10
result = []
for value in result_async._value:
if value is not None:
result.append(value)
else:
result.append("failed")
print(result)
pool.close()
pool.join()

Stoping python script using KeyboardInterrupt

Hi why is my KeyboardInterrupt: is not stoping my program when i hit control c or control x? this is my current code.
I am using python Threading that runs 2 function coinPulser and coinPulserDone.
import threading
import time
lock = threading.Lock()
counter = 0
input = 3
def coinPulser ():
global counter
global input
lock.acquire()
try:
while counter < input:
counter+=1
time.sleep(.1)
if counter in [1,3,5]:
print(counter)
return counter
finally:
lock.release()
def coinPulserDone ():
while True:
print(coinPulser())
try:
coinpulser = threading.Thread(target = coinPulser)
coinpulser.start()
coinpulserdone = threading.Thread(target = coinPulserDone)
coinpulserdone.start()
except KeyboardInterrupt:
coinpulser.stop()
coinpulserdone.stop()
print('Thread Stops')
I suspect the problem is that your code exits your try/except block before you press Cntr-C. You need to add some form of a loop that will hold it in that block. A simple loop such as
while True:
time.sleep(1)
before your except line should do the trick.

Exit multiprocesses gracefully in python3

I would like to exit the program gracefully on Ctrl+C / SIGINT or on user input. If possible the terminal should prompt something like; "Hit enter to terminate".
Code to be executed by Python 3.6
def worker(process):
i = 0
while True:
print('Process %d count %d' % (process, i))
i += 1
def main():
available_num_cores = multiprocessing.cpu_count()
use_num_cores = available_num_cores - 1 if available_num_cores > 1 else 1
print('Using %d cores' % use_num_cores)
pool = multiprocessing.Pool(use_num_cores)
for i in range(0, use_num_cores):
pool.apply_async(worker, args=(i,))
pool.close()
pool.join()
if __name__ == '__main__':
main()
Accepted answer for this question Catch Ctrl+C / SIGINT and exit multiprocesses gracefully in python. Isn't working, it fail with error:
Process SpawnPoolWorker-1:
Process 0 count 1572
Process SpawnPoolWorker-2:
Process 1 count 1472
Process SpawnPoolWorker-3:
Traceback (most recent call last):
Any help would be appreciated. Thanks!
You need to make sure the SIGINT is ignored by the children processes.
Then you just either wait for user input or for a CTRL+C to be issued.
def initializer():
"""Ignore SIGINT in child workers."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def main():
try:
pool = multiprocessing.Pool(use_num_cores, initializer=initializer)
for i in range(0, use_num_cores):
pool.apply_async(worker, args=(i,))
pool.close()
input("Hit enter to terminate")
except KeyboardInterrupt:
print("CTRL+C")
finally:
pool.terminate()
pool.join()
print("Bye have a great time!")

Categories

Resources