Here is my prime factorization program,i added a callback function in pool.apply_async(findK, args=(N,begin,end)),a message prompt out prime factorization is over when factorization is over,it works fine.
import math
import multiprocessing
def findK(N,begin,end):
for k in range(begin,end):
if N% k == 0:
print(N,"=" ,k ,"*", N/k)
return True
return False
def prompt(result):
if result:
print("prime factorization is over")
def mainFun(N,process_num):
pool = multiprocessing.Pool(process_num)
for i in range(process_num):
if i ==0 :
begin =2
else:
begin = int(math.sqrt(N)/process_num*i)+1
end = int(math.sqrt(N)/process_num*(i+1))
pool.apply_async(findK, args=(N,begin,end) , callback = prompt)
pool.close()
pool.join()
if __name__ == "__main__":
N = 684568031001583853
process_num = 16
mainFun(N,process_num)
Now i want to change the callback function in apply_async,to change prompt into a shutdown function to kill all other process.
def prompt(result):
if result:
pool.terminate()
The pool instance is not defined in prompt scope or passed into prompt.
pool.terminate() can't work in prompt function.
How to pass multiprocessing.Pool instance to apply_async'callback function ?
(I have made it done in class format,just to add a class method and call self.pool.terminate can kill all other process,
how to do the job in function format?)
if not set pool as global variable, can pool be passed into callback function?
Passing extra arguments to the callback function is not supported. Yet you have plenty of elegant ways to workaround that.
You can encapsulate your pool logic into an object:
class Executor:
def __init__(self, process_num):
self.pool = multiprocessing.Pool(process_num)
def prompt(self, result):
if result:
print("prime factorization is over")
self.pool.terminate()
def schedule(self, function, args):
self.pool.apply_async(function, args=args, callback=self.prompt)
def wait(self):
self.pool.close()
self.pool.join()
def main(N,process_num):
executor = Executor(process_num)
for i in range(process_num):
...
executor.schedule(findK, (N,begin,end))
executor.wait()
Or you can use the concurrent.futures.Executor implementation which returns a Future object. You just append the pool to the Future object before setting the callback.
def prompt(future):
if future.result():
print("prime factorization is over")
future.pool_executor.shutdown(wait=False)
def main(N,process_num):
executor = concurrent.futures.ProcessPoolExecutor(max_workers=process_num)
for i in range(process_num):
...
future = executor.submit(findK, N,begin,end)
future.pool_executor = executor
future.add_done_callback(prompt)
You can simply define a local close function as a callback:
import math
import multiprocessing
def findK(N, begin, end):
for k in range(begin, end):
if N % k == 0:
print(N, "=", k, "*", N / k)
return True
return False
def mainFun(N, process_num):
pool = multiprocessing.Pool(process_num)
def close(result):
if result:
print("prime factorization is over")
pool.terminate()
for i in range(process_num):
if i == 0:
begin = 2
else:
begin = int(math.sqrt(N) / process_num * i) + 1
end = int(math.sqrt(N) / process_num * (i + 1))
pool.apply_async(findK, args=(N, begin, end), callback=close)
pool.close()
pool.join()
if __name__ == "__main__":
N = 684568031001583853
process_num = 16
mainFun(N, process_num)
You can also use a partial function from functool, with
import functools
def close_pool(pool, results):
if result:
pool.terminate()
def mainFun(N, process_num):
pool = multiprocessing.Pool(process_num)
close = funtools.partial(close_pool, pool)
....
You need to have pool end up in prompt's environment. One possibility is to move pool into the global scope (though this isn't really best-practice). This appears to work:
import math
import multiprocessing
pool = None
def findK(N,begin,end):
for k in range(begin,end):
if N% k == 0:
print(N,"=" ,k ,"*", N/k)
return True
return False
def prompt(result):
if result:
print("prime factorization is over")
pool.terminate()
def mainFun(N,process_num):
global pool
pool = multiprocessing.Pool(process_num)
for i in range(process_num):
if i ==0 :
begin =2
else:
begin = int(math.sqrt(N)/process_num*i)+1
end = int(math.sqrt(N)/process_num*(i+1))
pool.apply_async(findK, args=(N,begin,end) , callback = prompt)
pool.close()
pool.join()
if __name__ == "__main__":
N = 684568031001583853
process_num = 16
mainFun(N,process_num)
Related
I am a bitt struggled with multiprocessing philosophy in Python. To test my knowledge I thought of a multiprocessed programm that computes the prime decomposition of an integer.
It goes as follows. Put the integer in a queue. I then have a function that dequeue and search for a (prime) divisor of it. If one is found, the complementary integer is put back in the queue. How can I make this work. For the moment I have this :
import multiprocessing as mp
def f(queue, decomp):
x = queue.get()
prime = True
for i in range(2, x):
if (x % i) == 0:
decomp.put(i)
prime = False
queue.put(x // i)
break
if prime:
decomp.put(x)
class Num:
def __init__(self, n):
self.queue = mp.Queue()
self.queue.put(n)
self.decomposition = mp.Queue()
def run(self):
with mp.Pool(4) as pool:
pool.apply_async(f, (self.queue, self.decomposition))
It raises
RuntimeError: Queue objects should only be shared between processes through inheritance
What is the standard way to make this ? (I know there may be better way to give the prime decomposition)
In order to use multiprocessing.Queue, you need to pass it to each child process as the point they are created (so they get "inherited"), rather than passing them as parameters to apply_async. If you're on Linux, you can do this by declaring them in the global scope, instead of as intance variables on the Num class - they will get inherited via the forking process:
import multiprocessing as mp
queue = mp.Queue()
decomposition = mp.Queue()
def f():
x = queue.get()
prime = True
for i in range(2, x):
if (x % i) == 0:
decomposition.put(i)
prime = False
queue.put(x // i)
break
if prime:
decomposition.put(x)
class Num:
def __init__(self, n):
queue.put(n)
def run(self):
with mp.Pool(4) as pool:
pool.apply(f)
On Windows, it is a bit more involved, since it does not have support for forking. Instead, you have to use the init and initargs keyword parameters on the Pool constructor to pass the queues to the child processes, and then declare them as global variables inside the initializer function you provide. This will put the the queues in the global scope of your worker processes, allowing you to use them in the functions you pass to all Pool methods (map/map_async, apply/apply_async).
import multiprocessing as mp
def f():
x = queue.get()
prime = True
for i in range(2, x):
if (x % i) == 0:
decomp.put(i)
prime = False
queue.put(x // i)
break
if prime:
decomp.put(x)
def init(q, d):
# Put the queues in the global scope of the worker processes
global queue, decomp
queue = q
decomp = d
class Num:
def __init__(self, n):
self.queue = mp.Queue()
self.queue.put(n)
self.decomposition = mp.Queue()
def run(self):
with mp.Pool(4, initializer=init, initargs=(self.queue, self.decomposition)) as pool:
pool.apply(f)
I'm trying to use the multiprocessing library to parallelize some expensive calculations without blocking some others, much lighter. The both need to interact through some variables, although the may run with different paces.
To show this, I have created the following example, that works fine:
import multiprocessing
import time
import numpy as np
class SumClass:
def __init__(self):
self.result = 0.0
self.p = None
self.return_value = None
def expensive_function(self, new_number, return_value):
# Execute expensive calculation
#######
time.sleep(np.random.random_integers(5, 10, 1))
return_value.value = self.result + new_number
#######
def execute_function(self, new_number):
print(' New number received: %f' % new_number)
self.return_value = multiprocessing.Value("f", 0.0, lock=True)
self.p = multiprocessing.Process(target=self.expensive_function, args=(new_number, self.return_value))
self.p.start()
def is_executing(self):
if self.p is not None:
if not self.p.is_alive():
self.result = self.return_value.value
self.p = None
return False
else:
return True
else:
return False
if __name__ == '__main__':
sum_obj = SumClass()
current_value = 0
while True:
if not sum_obj.is_executing():
# Randomly determine whether the function must be executed or not
if np.random.rand() < 0.25:
print('Current sum value: %f' % sum_obj.result)
new_number = np.random.rand(1)[0]
sum_obj.execute_function(new_number)
# Execute other (light) stuff
#######
print('Executing other stuff')
current_value += sum_obj.result * 0.1
print('Current value: %f' % current_value)
time.sleep(1)
#######
Basically, in the main loop some light function is executed, and depending on a random condition, some heavy work is sent to another process if it has already finished the previous one, carried out by an object which needs to store some data between executions. Although expensive_function needs some time, the light function keeps on executing without being blocked.
Although the above code gets the job done, I'm wondering: is it the best/most appropriate method to do this?
Besides, let us suppose the class SumClass has an instance of another object, which also needs to store data. For example:
import multiprocessing
import time
import numpy as np
class Operator:
def __init__(self):
self.last_value = 1.0
def operate(self, value):
print(' Operation, last value: %f' % self.last_value)
self.last_value *= value
return self.last_value
class SumClass:
def __init__(self):
self.operator_obj = Operator()
self.result = 0.0
self.p = None
self.return_value = None
def expensive_function(self, new_number, return_value):
# Execute expensive calculation
#######
time.sleep(np.random.random_integers(5, 10, 1))
# Apply operation
number = self.operator_obj.operate(new_number)
# Apply other operation
return_value.value = self.result + number
#######
def execute_function(self, new_number):
print(' New number received: %f' % new_number)
self.return_value = multiprocessing.Value("f", 0.0, lock=True)
self.p = multiprocessing.Process(target=self.expensive_function, args=(new_number, self.return_value))
self.p.start()
def is_executing(self):
if self.p is not None:
if not self.p.is_alive():
self.result = self.return_value.value
self.p = None
return False
else:
return True
else:
return False
if __name__ == '__main__':
sum_obj = SumClass()
current_value = 0
while True:
if not sum_obj.is_executing():
# Randomly determine whether the function must be executed or not
if np.random.rand() < 0.25:
print('Current sum value: %f' % sum_obj.result)
new_number = np.random.rand(1)[0]
sum_obj.execute_function(new_number)
# Execute other (light) stuff
#######
print('Executing other stuff')
current_value += sum_obj.result * 0.1
print('Current value: %f' % current_value)
time.sleep(1)
#######
Now, inside the expensive_function, a function member of the object Operator is used, which needs to store the number passed.
As expected, the member variable last_value does not change, i.e. it does not keep any value.
Is there any way of doing this properly?
I can imagine I could arrange everything so that I only need to use one class level, and it would work well. However, this is a toy example, in reality there are different levels of complex objects and it would be hard.
Thank you very much in advance!
from concurrent.futures import ThreadPoolExecutor
from numba import jit
import requests
import timeit
def timer(number, repeat):
def wrapper(func):
runs = timeit.repeat(func, number=number, repeat=repeat)
print(sum(runs) / len(runs))
return wrapper
URL = "https://httpbin.org/uuid"
#jit(nopython=True, nogil=True,cache=True)
def fetch(session, url):
with session.get(url) as response:
print(response.json()['uuid'])
#timer(1, 1)
def runner():
with ThreadPoolExecutor(max_workers=25) as executor:
with requests.Session() as session:
executor.map(fetch, [session] * 100, [URL] * 100)
executor.shutdown(wait=True)
executor._adjust_thread_count
Maybe this might help.
I'm using ThreadPoolExecutor for multithreading. you can also use ProcessPoolExecutor.
For your compute expensive operation you can use numba for making cached byte code of your function for faster exeution.
I'm having trouble writing a benchmark code in python using threading. I was able to get my threading to work, but I can't get my object to return a value. I want to take the values and add them to a list so I can calculate the flops.
create class to carry out threading
class myThread(threading.Thread):
def calculation(self):
n=0
start=time.time()
ex_time=0
while ex_time < 30:
n+=1
end=time.time()
ex_time=end-start
return ex_time
def run(self):
t = threading.Thread(target = self.calculation)
t.start()
function to create threads
def make_threads(num):
times=[]
calcs=[]
for i in range(num):
print('start thread', i+1)
thread1=myThread()
t=thread1.start()
times.append(t)
#calcs.append(n)
#when trying to get a return value it comes back as none as seen
print(times)
#average out the times,add all the calculations to get the final numbers
#to calculate flops
time.sleep(32) #stop the menu from printing until calc finish
def main():
answer=1
while answer != 0:
answer=int(input("Please indicate how many threads to use: (Enter 0 to exit)"))
print("\n\nBenchmark test with ", answer, "threads")
make_threads(answer)
main()
Two ways to do this:
1. Using static variables (hacky, but efficient and quick)
Define some global variable that you then manipulate in the thread. I.e.:
import threading
import time
class myThread(threading.Thread):
def calculation(self):
n=0
start=time.time()
ex_time=0
print("Running....")
while ex_time < 30:
n+=1
end=time.time()
ex_time=end-start
self.myThreadValues[self.idValue] = ex_time
print(self.myThreadValues)
return ex_time
def setup(self,myThreadValues=None,idValue=None):
self.myThreadValues = myThreadValues
self.idValue = idValue
def run(self):
self.calculation()
#t = threading.Thread(target = self.calculation)
#t.start()
def make_threads(num):
threads=[]
calcs=[]
myThreadValues = {}
for i in range(num):
print('start thread', i+1)
myThreadValues[i] = 0
thread1=myThread()
thread1.setup(myThreadValues,i)
thread1.start()
#times.append(t)
threads.append(thread1)
# Now we need to wait for all the threads to finish. There are a couple ways to do this, but the best is joining.
print("joining all threads...")
for thread in threads:
thread.join()
#calcs.append(n)
#when trying to get a return value it comes back as none as seen
print("Final thread values: " + str(myThreadValues))
print("Done")
#average out the times,add all the calculations to get the final numbers
#to calculate flops
#time.sleep(32) #stop the menu from printing until calc finish
def main():
answer=1
while answer != 0:
answer=int(input("Please indicate how many threads to use: (Enter 0 to exit)"))
print("\n\nBenchmark test with ", answer, "threads")
make_threads(answer)
main()
2. The proper way to do this is with Processes
Processes are designed for passing information back and forth, versus threads which are commonly used for async work. See explanation here: https://docs.python.org/3/library/multiprocessing.html
See this answer: How can I recover the return value of a function passed to multiprocessing.Process?
import multiprocessing
from os import getpid
def worker(procnum):
print 'I am number %d in process %d' % (procnum, getpid())
return getpid()
if __name__ == '__main__':
pool = multiprocessing.Pool(processes = 3)
print pool.map(worker, range(5))
How to exit from a function called my multiprocessing.Pool
Here is an example of the code I am using, when I put a condition to exit from function worker when I use this as a script in terminal it halts and does not exit.
def worker(n):
if n == 4:
exit("wrong number") # tried to use sys.exit(1) did not work
return n*2
def caller(mylist, n=1):
n_cores = n if n > 1 else multiprocessing.cpu_count()
print(n_cores)
pool = multiprocessing.Pool(processes=n_cores)
result = pool.map(worker, mylist)
pool.close()
pool.join()
return result
l = [2, 3, 60, 4]
myresult = caller(l, 4)
As I said, I don't think you can exit the process running the main script from a worker process.
You haven't explained exactly why you want to do this, so this answer is a guess, but perhaps raising a custom Exception and handling it in an explict except as shown below would be an acceptable way to workaround the limitation.
import multiprocessing
import sys
class WorkerStopException(Exception):
pass
def worker(n):
if n == 4:
raise WorkerStopException()
return n*2
def caller(mylist, n=1):
n_cores = n if n > 1 else multiprocessing.cpu_count()
print(n_cores)
pool = multiprocessing.Pool(processes=n_cores)
try:
result = pool.map(worker, mylist)
except WorkerStopException:
sys.exit("wrong number")
pool.close()
pool.join()
return result
if __name__ == '__main__':
l = [2, 3, 60, 4]
myresult = caller(l, 4)
Output displayed when run:
4
wrong number
(The 4 is the number of CPUs my system has.)
The thing with pool.map is, that it will raise exceptions from child-processes only after all tasks are finished. But your comments sound like you need immediate abortion of all processing as soon as a wrong value is detected in any process. This would be a job for pool.apply_async then.
pool.apply_async offers error_callbacks, which you can use to let the pool terminate. Workers will be fed item-wise instead of chunk-wise like with the pool.map variants, so you get the chance for early exit on each processed argument.
I'm basically reusing my answer from here:
from time import sleep
from multiprocessing import Pool
def f(x):
sleep(x)
print(f"f({x})")
if x == 4:
raise ValueError(f'wrong number: {x}')
return x * 2
def on_error(e):
if type(e) is ValueError:
global terminated
terminated = True
pool.terminate()
print(f"oops: {type(e).__name__}('{e}')")
def main():
global pool
global terminated
terminated = False
pool = Pool(4)
results = [pool.apply_async(f, (x,), error_callback=on_error)
for x in range(10)]
pool.close()
pool.join()
if not terminated:
for r in results:
print(r.get())
if __name__ == '__main__':
main()
Output:
f(0)
f(1)
f(2)
f(3)
f(4)
oops: ValueError('wrong number: 4')
Process finished with exit code 0
I am trying my hands on python multiprocessing. I want a couple of processes which are independent to each other to run in parallel and as they return check if the process was successful or not using ApplyAsync.successful() utility. However when I call successful in the callback to my subprocess the script hangs.
import multiprocessing as mp
import time
result_map = {}
def foo_pool(x):
time.sleep(2)
print x
return x
result_list = []
def log_result(result):
print result_map[result].successful() #hangs
result_list.append(result)
def apply_async_with_callback():
pool = mp.Pool()
for i in range(10):
result_map[i] = pool.apply_async(foo_pool, args = (i, ), callback = log_result)
pool.close()
pool.join()
print(result_list)
if __name__ == '__main__':
apply_async_with_callback()
You don't need to check successful() because the callback is only called when the result was successful.
Following is the relevant code (multiprocessing/pool.py - AsyncResult)
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success: # <-----
self._callback(self._value) # <-----
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
del self._cache[self._job]