get results of terminated map_async of multiprocessing - python

I am running a script with multiprocessing map_async. what I need to do is to get the uncomplete result of AsyncResult object (assuming it already finished calculating some of the cases given) after terminating Pool with terminate(). using get() would just hang the script, how can I do this?
I know this can be done with apply_sync with some manipulation, but can it be done somehow with map_async?
working example of the situation:
import multiprocessing
import time
def example_run(i):
time.sleep(0.7)
return i
if __name__ == '__main__':
terminate = False
pool = multiprocessing.Pool(10)
result_async = pool.map_async(example_run,range(100))
i = 0
while True:
time.sleep(1.0)
if i == 70:
terminate = True
print(result_async.ready(),terminate)
if result_async.ready():
break
elif terminate:
pool.terminate()
break
i += 10
result = result_async.get() # The problem is here, it will just wait
print(result)
pool.close()
pool.join()

I found a solution to the problem; with some digging, AsyncResult._value seem to hold the values of the execution, with None in case it is not evaluated yet
import multiprocessing
import time
def example_run(i):
time.sleep(0.7)
return i
if __name__ == '__main__':
terminate = False
pool = multiprocessing.Pool(10)
result_async = pool.map_async(example_run,range(100))
i = 0
while True:
time.sleep(1.0)
if i == 70:
terminate = True
print(result_async.ready(),terminate)
if result_async.ready():
break
elif terminate:
pool.terminate()
break
i += 10
result = []
for value in result_async._value:
if value is not None:
result.append(value)
else:
result.append("failed")
print(result)
pool.close()
pool.join()

Related

How to stop multiprocessing.Pool on Exception

In the code below, I have raised an exception during the first call, and yet it seems the exception is absorbed, and I still got all other processes executed, what's the problem? What I want is that whenever the first exception occurs, print it, and stop the multiprocessing pool directly.
def func(i):
if i==0:
raise Exception()
else:
time.sleep(1)
print(i)
num_workers = 4
pool = multiprocessing.Pool(num_workers)
try:
for i in range(4):
pool.apply_async(func,args=(i,))
except:
print("err")
pool.close()
pool.join()
The following edited code according to HTF
import multiprocessing
import time
if __name__ == '__main__':
def func(i):
if i == 0:
raise Exception()
else:
time.sleep(1)
print(i)
num_workers = 4
pool = multiprocessing.Pool(num_workers)
results = [pool.apply_async(func, args=(i,)) for i in range(4)]
try:
for result in results:
result.get()
except:
print("err")
pool.close()
pool.join()
gives output
err
1
2
3
where I expect only err
You just scheduled the tasks but you need to wait for the results:
results = [pool.apply_async(func,args=(i,)) for i in range(4)]
try:
for result in results:
result.get()
except:
print("err")
Update Wed 7 Apr 20:42:59 UTC 2021:
You can try something like this:
import time
from functools import partial
from multiprocessing import Pool
def func(i):
if i == 0:
raise Exception("something bad happened")
else:
time.sleep(1)
print(i)
def quit(pool, err):
print(f"ERROR: {err}")
pool.terminate()
def main():
pool = Pool()
partial_quit = partial(quit, pool)
for i in range(4):
pool.apply_async(func, args=(i,), error_callback=partial_quit)
pool.close()
pool.join()
if __name__ == "__main__":
main()
Test:
$ python test1.py
ERROR: something bad happened
If you need the return value back it may be actually easier to use bare processes and a queue:
import time
from multiprocessing import Process, Queue
PROCS = 4
def worker(q, i):
if i == 10:
print("error")
q.put_nowait("ERROR")
else:
time.sleep(1)
print(i)
q.put_nowait(i)
def main():
q = Queue()
procs = []
for i in range(PROCS):
p = Process(target=worker, args=(q, i))
p.start()
procs.append(p)
count = len(procs)
while count:
result = q.get()
if result == "ERROR":
for p in procs:
p.terminate()
break
print(f"Result for: {result}")
count -= 1
if __name__ == "__main__":
main()
Test:
$ python test2.py
0
2
1
3
Result for: 0
Result for: 2
Result for: 1
Result for: 3

Attempting to print a successful/unsuccessful after a simulation

I'm a relatively new python coder and I am having problems attempting to print successful_trades/unsuccessful_trades after a 10 second simulation. The only thing I can think of is making the line of code saying 'print("Successful ender pearl trade")' an operator (which obviously won't work). Any help would be greatly appreciated, code is below. Thank you!
import multiprocessing
import time
import random
successful_trades = 0
unsuccessful_trade = 0
# Your foo function
def foo(n):
for i in range(10000 * n):
print ("Tick")
time.sleep(1)
if __name__ == '__main__':
# Start foo as a process
p = multiprocessing.Process(target=foo, name="Foo", args=(10,))
p.start()
# Wait 10 seconds for foo
time.sleep(10)
# Terminate foo
p.terminate()
# Cleanup
p.join()
user_input = input()
if user_input == "o":
while True:
import random
k = random.randint(1, 109)
number = random.randint(1, 109)
if str(number) == str(k):
print("Successful ender pearl trade")
(str(successful_trades) + str(1))
if str(number) != str(k):
print("Unsuccessful ender pearl trade")
(str(unsuccessful_trade) + str(1))
It works for me
One theory is your infinite while loop is locking up the screen output.
You can test this by changing your while true loop to
for i in range(100):

Why does terminate() of multiprocessing.pool.Threadpool hang?

I want to stop asynchronous multiprocessing jobs with KeyboardInterrupt. But sometimes hang occurred when call terminate.
from multiprocessing.pool import ThreadPool
import multiprocessing
import time
import queue
import inspect
def worker(index):
print('{}: start'.format(index))
for i in range(5):
time.sleep(1)
print('{}: stop'.format(index))
return index, True
def wrapper(index, stopEvent, qResult):
if stopEvent.is_set() is True:
return index, False
try:
result = worker(index)
except:
print('*' * 50)
return index, False
else:
if result[1] == True:
qResult.put(result)
return result
def watcher(qResult, stopEvent):
cntQ = 0
while True:
try:
result = qResult.get(timeout=10)
qResult.task_done()
except queue.Empty:
if stopEvent.is_set() is True:
break
except KeyboardInterrupt:
stopEvent.set()
else:
cntQ += 1
print(result)
qResult.join()
qResult.close()
print('qResult count:', cntQ)
def main():
stopEvent = multiprocessing.Event()
qResult = multiprocessing.JoinableQueue()
qResult.cancel_join_thread()
watch = multiprocessing.Process(target=watcher, args=(qResult, stopEvent))
watch.start()
pool = ThreadPool()
lsRet = []
for i in range(100000):
try:
ret = pool.apply_async(wrapper, args=(i, stopEvent, qResult))
lsRet.append(ret)
except KeyboardInterrupt:
stopEvent.set()
time.sleep(1)
break
if i+1 % 10 == 0:
time.sleep(2)
cntTotal = len(lsRet)
cntRet = 0
for ret in lsRet:
if stopEvent.is_set():
break
try:
ret.get()
except KeyboardInterrupt:
stopEvent.set()
time.sleep(1)
else:
cntRet += 1
if stopEvent.is_set() is False:
stopEvent.set()
print(inspect.stack()[0][1:4])
if watch.is_alive() is True:
watch.join()
print(inspect.stack()[0][1:4])
pool.terminate() # Why hang??????????
print(inspect.stack()[0][1:4])
pool.join()
print(cntTotal, cntRet)
if __name__ == '__main__':
main()
main() invokes a watcher() thread and many wrapper() threads asynchronously using multiprocessing.pool.Threadpool.
wrapper() calls worker() and put its result to queue.
watcher() watches above queue of results.
If ctrl-c pressed, stopEvent is set.
When stopEvent is set, wrapper() stops calling worker(), and Watcher() indicates queue.Empty and stopEvent and exits loop.
Finally main() calls terminate() of pool.
Sometimes processes done well, but sometimes hang. It's different each time.
You should put the code in try except block and catch a built-in exception KeyboardInterrupt see the example here Capture keyboardinterrupt

How to restart python multi-thread

I'm trying to design a control interface for my system which sends and receives some data through serial link. My searches related to GUI design took me to understand the "multi-threading" issue and code below shows the latest position I arrived.
This indicates similar parts (e.g try, run) with the ones I've seen on example GUIs. I planned to convert this to a GUI, once I understand how it exactly works.
So the problem is after I start, stop the code below I can't restart it again. Because, as I understand, multi-threading features only one cycle: start, stop and quit. I mean it doesn't accept start command after stop.
My question is how I can make this code to accept start after stopping?
Best wishes
import threading, random, time
class process(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
self.leave = 0
print("\n it's running ...\n\n")
while self.leave != 1:
print "Done!"
time.sleep(1)
operate = process()
while True:
inputt = input(" START : 1 \n STOP\t : 0 \n QUIT\t : 2 \n")
try:
if int(inputt) == 1:
operate.start()
elif int(inputt) == 0:
operate.leave = 1
elif int(inputt) == 2:
break
except:
print(" Wrong input, try egain...\n")
Create process inside while True loop
if int(inputt) == 1:
operate = process()
operate.start()
It should work.
... but your code may need other changes to make it safer - you will have to check if process exists before you try to stop it. You could use operate = None to control it.
import threading
import random
import time
class Process(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
self.leave = False
print("\n it's running ...\n\n")
while self.leave == False:
print("Done!")
time.sleep(1)
operate = None
while True:
inputt = input(" START : 1 \n STOP\t : 0 \n QUIT\t : 2 \n")
try:
if int(inputt) == 1:
if operate is None:
operate = Process()
operate.start()
elif int(inputt) == 0:
if operate is not None:
operate.leave = True
operate.join() # wait on process end
operate = None
elif int(inputt) == 2:
if operate is not None:
operate.leave = True
operate.join() # wait on process end
break
except:
print(" Wrong input, try egain...\n")
Other method is not to leave run() when you set leave = True but keep running thead. You would need two loops.
def run(self):
self.leave = False
self.stoped = False
print("\n it's running ...\n\n")
while self.leave == False:
while self.stoped == False:
print("Done!")
time.sleep(1)

Daemon thread not exiting despite main program finishing

I've already referred to this thread, but it seems to be outdated
and there doesn't seem to be a clean explanation
Python daemon thread does not exit when parent thread exits
I'm running python 3.6 and trying to run the script from either IDLE or Spyder IDE.
Here is my code:
import threading
import time
total = 4
def creates_items():
global total
for i in range(10):
time.sleep(2)
print('added item')
total += 1
print('creation is done')
def creates_items_2():
global total
for i in range(7):
time.sleep(1)
print('added item')
total += 1
print('creation is done')
def limits_items():
#print('finished sleeping')
global total
while True:
if total > 5:
print ('overload')
total -= 3
print('subtracted 3')
else:
time.sleep(1)
print('waiting')
limitor = threading.Thread(target = limits_items, daemon = True)
creator1 = threading.Thread(target = creates_items)
creator2 = threading.Thread(target = creates_items_2)
print(limitor.isDaemon())
creator1.start()
creator2.start()
limitor.start()
creator1.join()
creator2.join()
print('our ending value of total is' , total)
limitor thread doesn't seem to be ending despite being a daemon thread.
Is this a way to get this working from IDLE or Spyder?
Thanks.
I had the same Problem and solved it by using multiprocessing instead of threading:
from multiprocessing import Process
import multiprocessing
from time import sleep
def daemon_thread():
for _ in range(10):
sleep(1)
print("Daemon")
if __name__ == '__main__':
multiprocessing.freeze_support()
sub_process = Process(target = daemon_thread, daemon = True)
sub_process.start()
print("Exiting Main")
I haven't yet really understood why I need the call to freeze_support() but it makes the code work.

Categories

Resources