In order to control the code I am working on, I have tried to create a single tracking across many tasks that occur in different threads.
I know at the beginning of the run the amount of tasks (and workers).
For demonstration (does not work, toy example):
from multiprocessing import Pool
from tqdm import tqdm
def work(i, t):
for _ in range(10**6):
t.update()
return i
def wrapped_work(params):
work(*params)
def main(n=1):
# another loop:
with Pool(processes=8) as p:
with tqdm(total=n * 10**6) as t:
return sum(p.map(work, ((i, t) for i in range(1, n+1))))
if __name__ == "__main__":
main(5)
I tried to implies this topic with pool, but without success.
I would greatly appreciate your help.
based on this post:
from multiprocessing import Pool, Process, Value
from ctypes import c_bool, c_long
from tqdm.auto import tqdm
class TqdmMultiprocessing:
max_processes = 64
def __init__(self, static_func, processes=64):
self.counter = Value(c_long, lock=False)
self.pool = Pool(
processes=min(processes, self.max_processes),
initializer=self.worker_init,
initargs=(static_func, self.counter)
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.pool.close()
def tqdm(self, static_func, iterable, **kwargs):
done_value = Value(c_bool)
proc = Process(target=self.listener, args=(self.counter, done_value, kwargs,))
proc.start()
result = self.pool.map(static_func, iterable)
done_value.value = True
proc.join()
self.counter.value = 0
return result
#staticmethod
def listener(counter: Value, is_done: Value, kwargs):
with tqdm(**kwargs) as tqdm_bar:
old_counter = 0
while not is_done.value:
new_counter = counter.value
tqdm_bar.update(new_counter - old_counter)
old_counter = new_counter
tqdm_bar.update(tqdm_bar.total - old_counter)
#staticmethod
def worker_init(static_func, counter: Value):
static_func.counter = counter
def work(i):
for _ in range(10**6):
work.counter.value += 1
return i
def main(n=1):
with TqdmMultiprocessing(work, processes=3) as p:
p.tqdm(work, range(n), total=n * 10 ** 6)
p.tqdm(work, range(n), total=n * 10 ** 6)
if __name__ == "__main__":
main(5)
Related
I want to send tasks to the POOL inside the shared class based on some conditions. But I got some unexpected result, Which are shown below.
• Why the len(self.map) is 0, not 100.
• Do I have to reconstruct my code to achieve this goal.
from multiprocessing import Pool
from multiprocessing.managers import BaseManager
pool = None
def doSomething(obj, *args):
obj.doSomething(*args)
class SharedClass:
def __init__(self):
global pool
self.map = set()
pool = Pool(4)
def someCondition(self):
# the condition is rely on the instance, here is just an example
return True
def go(self, n):
global pool
for i in xrange(n):
if self.someCondition():
# pass the shared class to other process
pool.apply_async(doSomething, (self, i))
pool.close()
pool.join()
# got AssertionError here
# why the len of self.map is 0
assert len(self.map) == 100
def doSomething(self, n):
# this should change the same SharedClass instance?
self.map.add(n)
class MyManager(BaseManager):
pass
MyManager.register("SharedClass", SharedClass)
def main():
manager = MyManager()
manager.start()
obj = manager.SharedClass()
obj.go(100)
if __name__ == "__main__":
main()
the following code works fine -
import time
from concurrent.futures import Future, as_completed
from concurrent.futures.process import ProcessPoolExecutor
import multiprocessing as mp
from multiprocessing.managers import BaseManager
class Progress(object):
_target: int = 0
_progress: int = 0
def __init__(self):
self._target = 0
self._progress = 0
def completed(self, n):
self._progress += n
def progress(self):
return (self._progress/self._target) * 100
def set_target(self, n):
self._target = n
class ObjectManager(BaseManager):
pass
def dummy_worker(progress: Progress, cancel: mp.Event = None):
print("--> Worker started")
for i in range(10):
time.sleep(1)
progress.completed(1)
return 1
if __name__ == "__main__":
ObjectManager.register('Progress', Progress)
print('Starting manager')
with ObjectManager() as manager:
print('Manager started')
progress = manager.Progress()
progress.set_target(10)
with ProcessPoolExecutor() as pool:
f = pool.submit(dummy_worker, progress)
futures = [f]
for f in as_completed(futures):
print(f.result())
print(f'Progress: {progress.progress()}')
while the following gives me Condition objects should only be shared between processes through inheritance. i am not sure how to use Event in dummy_worker. the main goal is to pass some shared object and also have a way to cancel the worker. sorry for the long post, i wanted to give full code for clarity.
import time
from concurrent.futures import Future, as_completed
from concurrent.futures.process import ProcessPoolExecutor
import multiprocessing as mp
from multiprocessing.managers import BaseManager
class Progress(object):
_target: int = 0
_progress: int = 0
def __init__(self):
self._target = 0
self._progress = 0
def completed(self, n):
self._progress += n
def progress(self):
return (self._progress/self._target) * 100
def set_target(self, n):
self._target = n
class ObjectManager(BaseManager):
pass
def dummy_worker(progress: Progress, cancel: mp.Event = None):
print("--> Worker started")
for i in range(10):
time.sleep(1)
progress.completed(1)
return 1
if __name__ == "__main__":
ObjectManager.register('Progress', Progress)
cancel = mp.Event()
print('Starting manager')
with ObjectManager() as manager:
print('Manager started')
progress = manager.Progress()
progress.set_target(10)
with ProcessPoolExecutor() as pool:
f = pool.submit(dummy_worker, progress, cancel)
futures = [f]
for f in as_completed(futures):
print(f.result())
print(f'Progress: {progress.progress()}')
assume i am using python 3.5+.
Try the following changes:
from multiprocessing.managers import SyncManager
class ObjectManager(SyncManager):
pass
# use an Event() created by ObjectManager instance: cancel = manager.Event()
if __name__ == "__main__":
ObjectManager.register('Progress', Progress)
#cancel = mp.Event() # not this
print('Starting manager')
with ObjectManager() as manager:
print('Manager started')
progress = manager.Progress()
cancel = manager.Event() # but rather this
progress.set_target(10)
with ProcessPoolExecutor() as pool:
f = pool.submit(dummy_worker, progress, cancel)
futures = [f]
for f in as_completed(futures):
print(f.result())
print(f'Progress: {progress.progress()}')
Prints:
Starting manager
Manager started
--> Worker started
1
Progress: 100.0
Also, get rid of/change references to mp.Event.
Here is my prime factorization program,i added a callback function in pool.apply_async(findK, args=(N,begin,end)),a message prompt out prime factorization is over when factorization is over,it works fine.
import math
import multiprocessing
def findK(N,begin,end):
for k in range(begin,end):
if N% k == 0:
print(N,"=" ,k ,"*", N/k)
return True
return False
def prompt(result):
if result:
print("prime factorization is over")
def mainFun(N,process_num):
pool = multiprocessing.Pool(process_num)
for i in range(process_num):
if i ==0 :
begin =2
else:
begin = int(math.sqrt(N)/process_num*i)+1
end = int(math.sqrt(N)/process_num*(i+1))
pool.apply_async(findK, args=(N,begin,end) , callback = prompt)
pool.close()
pool.join()
if __name__ == "__main__":
N = 684568031001583853
process_num = 16
mainFun(N,process_num)
Now i want to change the callback function in apply_async,to change prompt into a shutdown function to kill all other process.
def prompt(result):
if result:
pool.terminate()
The pool instance is not defined in prompt scope or passed into prompt.
pool.terminate() can't work in prompt function.
How to pass multiprocessing.Pool instance to apply_async'callback function ?
(I have made it done in class format,just to add a class method and call self.pool.terminate can kill all other process,
how to do the job in function format?)
if not set pool as global variable, can pool be passed into callback function?
Passing extra arguments to the callback function is not supported. Yet you have plenty of elegant ways to workaround that.
You can encapsulate your pool logic into an object:
class Executor:
def __init__(self, process_num):
self.pool = multiprocessing.Pool(process_num)
def prompt(self, result):
if result:
print("prime factorization is over")
self.pool.terminate()
def schedule(self, function, args):
self.pool.apply_async(function, args=args, callback=self.prompt)
def wait(self):
self.pool.close()
self.pool.join()
def main(N,process_num):
executor = Executor(process_num)
for i in range(process_num):
...
executor.schedule(findK, (N,begin,end))
executor.wait()
Or you can use the concurrent.futures.Executor implementation which returns a Future object. You just append the pool to the Future object before setting the callback.
def prompt(future):
if future.result():
print("prime factorization is over")
future.pool_executor.shutdown(wait=False)
def main(N,process_num):
executor = concurrent.futures.ProcessPoolExecutor(max_workers=process_num)
for i in range(process_num):
...
future = executor.submit(findK, N,begin,end)
future.pool_executor = executor
future.add_done_callback(prompt)
You can simply define a local close function as a callback:
import math
import multiprocessing
def findK(N, begin, end):
for k in range(begin, end):
if N % k == 0:
print(N, "=", k, "*", N / k)
return True
return False
def mainFun(N, process_num):
pool = multiprocessing.Pool(process_num)
def close(result):
if result:
print("prime factorization is over")
pool.terminate()
for i in range(process_num):
if i == 0:
begin = 2
else:
begin = int(math.sqrt(N) / process_num * i) + 1
end = int(math.sqrt(N) / process_num * (i + 1))
pool.apply_async(findK, args=(N, begin, end), callback=close)
pool.close()
pool.join()
if __name__ == "__main__":
N = 684568031001583853
process_num = 16
mainFun(N, process_num)
You can also use a partial function from functool, with
import functools
def close_pool(pool, results):
if result:
pool.terminate()
def mainFun(N, process_num):
pool = multiprocessing.Pool(process_num)
close = funtools.partial(close_pool, pool)
....
You need to have pool end up in prompt's environment. One possibility is to move pool into the global scope (though this isn't really best-practice). This appears to work:
import math
import multiprocessing
pool = None
def findK(N,begin,end):
for k in range(begin,end):
if N% k == 0:
print(N,"=" ,k ,"*", N/k)
return True
return False
def prompt(result):
if result:
print("prime factorization is over")
pool.terminate()
def mainFun(N,process_num):
global pool
pool = multiprocessing.Pool(process_num)
for i in range(process_num):
if i ==0 :
begin =2
else:
begin = int(math.sqrt(N)/process_num*i)+1
end = int(math.sqrt(N)/process_num*(i+1))
pool.apply_async(findK, args=(N,begin,end) , callback = prompt)
pool.close()
pool.join()
if __name__ == "__main__":
N = 684568031001583853
process_num = 16
mainFun(N,process_num)
I swear I saw the following in an example somewhere, but now I can't find that example and this isn't working. The __call__ class function never gets called.
EDIT: Code updated
pool.map appears to start the QueueWriter instance and the __call__ function is reached. However, the workers never seem to start or at least no results are pulled from the queue. Is my queue set up the right way? Why do the workers not fire off?
import multiprocessing as mp
import os
import random
class QueueWriter(object):
def __init__(self, **kwargs):
self.grid = kwargs.get("grid")
self.path = kwargs.get("path")
def __call__(self, q):
print self.path
log = open(self.path, "a", 1)
log.write("QueueWriter called.\n")
while 1:
res = q.get()
if res == 'kill':
self.log.write("QueueWriter received 'kill' message. Closing Writer.\n")
break
else:
self.log.write("This is where I'd write: {0} to grid file.\n".format(res))
log.close()
log = None
class Worker(object):
def __init__(self, **kwargs):
self.queue = kwargs.get("queue")
self.grid = kwargs.get("grid")
def __call__(self, idx):
res = self.workhorse(self, idx)
self.queue.put((idx,res))
return res
def workhorse(self,idx):
#in reality a fairly complex operation
return self.grid[idx] ** self.grid[idx]
if __name__ == '__main__':
# log = open(os.path.expanduser('~/minimal.log'), 'w',1)
path = os.path.expanduser('~/minimal.log')
pool = mp.Pool(mp.cpu_count())
manager = mp.Manager()
q = manager.Queue()
grid = [random.random() for _ in xrange(10000)]
# in actuality grid is a shared resource, read by Workers and written
# to by QueueWriter
qWriter = QueueWriter(grid=grid, path=path)
watcher = pool.map(qWriter, (q,),1)
wrkr = Worker(queue=q,grid=grid)
result = pool.map(wrkr, range(10000), 1)
result.get()
q.put('kill')
pool.close()
pool.join()
So the log does indeed print the initialization message, but then __call__ function is never called. Is this one of those pickling issues I've seen discussed so often? I've found answers about class member functions, but what about class instances?
At the gentle and patient prodding of martineau (thanks!) I think I've ironed out the problems. I have yet to apply it to my original code, but it is working in the example above and I'll start new questions for future implementation problems.
So in addition to changing where in the code the target file (the log, in this example) gets opened, I also started the QueueWriter instance as a single multiprocessing process rather than using pool.map. As martineau pointed out the map call blocks until the qWriter.__call__() returns and this prevented the workers from being called.
There were some other bugs in the code above, but those were incidental and fixed below:
import multiprocessing as mp
import os
import random
class QueueWriter(object):
def __init__(self, **kwargs):
self.grid = kwargs.get("grid")
self.path = kwargs.get("path")
def __call__(self, q):
print self.path
log = open(self.path, "a", 1)
log.write("QueueWriter called.\n")
while 1:
res = q.get()
if res == 'kill':
log.write("QueueWriter received 'kill' message. Closing Writer.\n")
break
else:
log.write("This is where I'd write: {0} to grid file.\n".format(res))
log.close()
log = None
class Worker(object):
def __init__(self, **kwargs):
self.queue = kwargs.get("queue")
self.grid = kwargs.get("grid")
def __call__(self, idx):
res = self.workhorse(idx)
self.queue.put((idx,res))
return res
def workhorse(self,idx):
#in reality a fairly complex operation
return self.grid[idx] ** self.grid[idx]
if __name__ == '__main__':
# log = open(os.path.expanduser('~/minimal.log'), 'w',1)
path = os.path.expanduser('~/minimal.log')
pool = mp.Pool(mp.cpu_count())
manager = mp.Manager()
q = manager.Queue()
grid = [random.random() for _ in xrange(10000)]
# in actuality grid is a shared resource, read by Workers and written
# to by QueueWriter
qWriter = QueueWriter(grid=grid, path=path)
# watcher = pool.map(qWriter, (q,),1)
# Start the writer as a single process rather than a pool
p = mp.Process(target=qWriter, args=(q,))
p.start()
wrkr = Worker(queue=q,grid=grid)
result = pool.map(wrkr, range(10000), 1)
# result.get()
# not required for pool
q.put('kill')
pool.close()
p.join()
pool.join()
In order to speed up a certain list processing logic, I wrote a decorator that would 1) intercept incoming function call 2) take its input list, break it into multiple pieces 4) pass these pieces to the original function on seperate threads 5) combine output and return
I thought it was a pretty neat idea, until I coded it and saw there was no change in speed! Even though I see multiple cores busy on htop, multithreaded version is actually slower than the single thread version.
Does this have to do with the infamous cpython GIL?
Thanks!
from threading import Thread
import numpy as np
import time
# breaks a list into n list of lists
def split(a, n):
k, m = len(a) / n, len(a) % n
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in xrange(n))
THREAD_NUM = 8
def parallel_compute(fn):
class Worker(Thread):
def __init__(self, *args):
Thread.__init__(self)
self.result = None
self.args = args
def run(self):
self.result = fn(*self.args)
def new_compute(*args, **kwargs):
threads = [Worker(args[0], args[1], args[2], x) for x in split(args[3], THREAD_NUM)]
for x in threads: x.start()
for x in threads: x.join()
final_res = []
for x in threads: final_res.extend(x.result)
return final_res
return new_compute
# some function that does a lot of computation
def f(x): return np.abs(np.tan(np.cos(np.sqrt(x**2))))
class Foo:
#parallel_compute
def compute(self, bla, blah, input_list):
return map(f, input_list)
inp = [i for i in range(40*1000*100)]
#inp = [1,2,3,4,5,6,7]
if __name__ == "__main__":
o = Foo()
start = time.time()
res = o.compute(None, None, inp)
end = time.time()
print 'parallel', end - start
Single thread version
import time, fast_one, numpy as np
class SlowFoo:
def compute(self, bla, blah, input_list):
return map(fast_one.f, input_list)
if __name__ == "__main__":
o = SlowFoo()
start = time.time()
res = np.array(o.compute(None, None, fast_one.inp))
end = time.time()
print 'single', end - start
And here is the multiprocessing version that gives "PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed".
import pathos.multiprocessing as mp
import numpy as np, dill
import time
def split(a, n):
k, m = len(a) / n, len(a) % n
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in xrange(n))
def f(x): return np.abs(np.tan(np.cos(np.sqrt(x**2))))
def compute(input_list):
return map(f, input_list)
D = 2; pool = mp.Pool(D)
def parallel_compute(fn):
def new_compute(*args, **kwargs):
inp = []
for x in split(args[0], D): inp.append(x)
outputs_async = pool.map_async(fn, inp)
outputs = outputs_async.get()
outputs = [y for x in outputs for y in x]
return outputs
return new_compute
compute = parallel_compute(compute)
inp = [i for i in range(40*1000)]
if __name__ == "__main__":
start = time.time()
res = compute(inp)
end = time.time()
print 'parallel', end - start
print len(res)
Yes, when your threads are doing CPU-bound work implemented in Python (not by, say, C extensions which can release the GIL before and after marshalling/demarshalling data from Python structures), the GIL is a problem here.
I'd suggest using a multiprocessing model, a Python implementation that doesn't have it (IronPython, Jython, etc), or a different language altogether (if you're doing performance-sensitive work, there's no end of languages nearly as fluid as Python but with considerably better runtime performance).
Alternatively you can redsign and start all parallel Code in subprocesses.
You need worker-threads which start a subprocess for calculation.
Those subprocesses can run really parallel.