Multiprocessing over grid points of a 2D function - python

I have a two dimensional function and I want to compute the elements of the function on the grid points but the two loops over rows and columns are very slow and I want to use multiprocessing to increase the speed of the code. I have written the following code to do two loops:
from multiprocessing import Pool
#Grid points
ra = np.linspace(25.1446, 25.7329, 1000)
dec = np.linspace(-10.477, -9.889, 1000)
#The 2D function
def like2d(x,y):
stuff=[RaDec, beta, rho_c_over_sigma_c, zhalo, rho_crit]
m=3e14
c=7.455
param=[x, y, m, c]
return reduced_shear( param, stuff, observed_g, g_err)
pool = Pool(processes=12)
def data_stream(a, b):
for i, av in enumerate(a):
for j, bv in enumerate(b):
yield (i, j), (av, bv)
def myfunc(args):
return args[0], like2d(*args[1])
counter,likelihood = pool.map(myfunc, data_stream(ra, dec))
But I got the following error message:
Process PoolWorker-1:
Traceback (most recent call last):
File "/user/anaconda/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/user/anaconda/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/user/anaconda/lib/python2.7/multiprocessing/pool.py", line 102, in worker
task = get()
File "/user/anaconda/lib/python2.7/multiprocessing/queues.py", line 376, in get
return recv()
AttributeError: 'module' object has no attribute 'myfunc'
Process PoolWorker-2:
Traceback (most recent call last):
File "/user/anaconda/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/user/anaconda/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/user/anaconda/lib/python2.7/multiprocessing/pool.py", line 102, in worker
task = get()
File "/user/anaconda/lib/python2.7/multiprocessing/queues.py", line 376, in get
return recv()
AttributeError: 'module' object has no attribute 'myfunc'
Process PoolWorker-3:
Traceback (most recent call last):
File "/user/anaconda/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/user/anaconda/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/user/anaconda/lib/python2.7/multiprocessing/pool.py", line 102, in worker
task = get()
File "/user/anaconda/lib/python2.7/multiprocessing/queues.py", line 376, in get
return recv()
AttributeError: 'module' object has no attribute 'myfunc'
Process PoolWorker-4:
Everything is defined and I do not understand why this error message raised!! Can anybody point out what might be wrong?
Another approach to do loops with multiprocessing and save the results in a 2d array:
#Grid points
ra = np.linspace(25.1446, 25.7329, 1000)
dec = np.linspace(-10.477, -9.889, 1000)
#The 2D function
def like2d(x,y):
stuff=[RaDec, beta, rho_c_over_sigma_c, zhalo, rho_crit]
m=3e14
c=7.455
param=[x, y, m, c]
return reduced_shear( param, stuff, observed_g, g_err)
shared_array_base = multiprocessing.Array(ctypes.c_double, ra.shape[0]*dec.shape[0])
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = shared_array.reshape( ra.shape[0],dec.shape[0])
# Parallel processing
def my_func(i, def_param=shared_array):
shared_array[i,:] = np.array([float(like2d(ra[j],dec[i])) for j in range(ra.shape[0])])
print "processing to estimate likelihood in 2D grids......!!!"
start = time.time()
pool = multiprocessing.Pool(processes=12)
pool.map(my_func, range(dec.shape[0]))
print shared_array
end = time.time()
print end - start

You have to create the Pool after the worker function (myfunc) definition. Creating the Pool causes Python to fork your worker processes right at that point, and the only things that will be defined in the children are the functions defined above the Pool definition. Also, map will return a list of tuples (one for each object yielded by data_stream), not a single tuple. So you need this:
from multiprocessing import Pool
#Grid points
ra = np.linspace(25.1446, 25.7329, 1000)
dec = np.linspace(-10.477, -9.889, 1000)
#The 2D function
def like2d(x,y):
stuff=[RaDec, beta, rho_c_over_sigma_c, zhalo, rho_crit]
m=3e14
c=7.455
param=[x, y, m, c]
return reduced_shear( param, stuff, observed_g, g_err)
def data_stream(a, b):
for i, av in enumerate(a):
for j, bv in enumerate(b):
yield (i, j), (av, bv)
def myfunc(args):
return args[0], like2d(*args[1])
if __name__ == "__main__":
pool = Pool(processes=12)
results = pool.map(myfunc, data_stream(ra, dec)) # results is a list of tuples.
for counter,likelihood in results:
print("counter: {}, likelihood: {}".format(counter, likelihood))
I added the if __name__ == "__main__": guard, which isn't necessary on POSIX platforms, but would be necessary on Windows (which doesn't support os.fork()).

Related

Multiprocessing BrokenPipeError

I have a following problem. I am running a parallel task. I am getting this error:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "eclat_model.py", line 127, in do_work
function(*args, work_queue, valid_list)
File "eclat_model.py", line 115, in eclat_parallel_helper
valid_list.extend(next_vectors)
File "<string>", line 2, in extend
File "/usr/lib/python3.8/multiprocessing/managers.py", line 834, in _callmethod
conn.send((self._id, methodname, args, kwds))
File "/usr/lib/python3.8/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/usr/lib/python3.8/multiprocessing/connection.py", line 404, in _send_bytes
self._send(header)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 368, in _send
n = write(self._handle, buf)
BrokenPipeError: [Errno 32] Broken pipe
Relevant functions in eclat_model.py look like this:
def eclat_parallel_helper(index, bit_vectors, min_support, work_queue, valid_list):
next_vectors = []
for j in range(index + 1, len(bit_vectors)):
item_vector = bit_vectors[index][0] | bit_vectors[j][0]
transaction_vector = bit_vectors[index][1] & bit_vectors[j][1]
support = get_vector_support(transaction_vector)
if support >= min_support:
next_vectors.append((item_vector, transaction_vector, support))
if len(next_vectors) > 0:
valid_list.extend(next_vectors)
for i in range(len(next_vectors)):
work_queue.put((eclat_parallel_helper, (i, next_vectors, min_support)))
def do_work(work_queue, valid_list, not_done):
# work queue entries have the form (function, args)
while not_done.value:
try:
function, args = work_queue.get_nowait()
except QueueEmptyError:
continue
function(*args, work_queue, valid_list)
work_queue.task_done()
work_queue.close()
EDIT:
Multiprocessing part of the code is as follows: bit_vectors is a list of lists, where each entry is of the form
[items, transactions, support], where items is a bit vector encoding which items appear in the itemset, vector is a bit vector encoding which transactions the itemset appears in, and support is the number of transactions in which the itemset occurs.
from multiprocessing import Process, JoinableQueue, Manager, Value, cpu_count
def eclat_parallel(bit_vectors, min_support):
not_done = Value('i', 1)
manager = Manager()
valid_list = manager.list()
work_queue = JoinableQueue()
for i in range(len(bit_vectors)):
work_queue.put((eclat_parallel_helper, (i, bit_vectors, min_support)))
processes = []
for i in range(cpu_count()):
p = Process(target=do_work, args=(work_queue, valid_list, not_done), daemon=True)
p.start()
processes.append(p)
work_queue.join()
not_done.value = 0
work_queue.close()
valid_itemset_vectors = bit_vectors
for element in valid_list:
valid_itemset_vectors.append(element)
for p in processes:
p.join()
return valid_itemset_vectors
What does this error mean, please? Am I appending too many elements into next_vectors list?
I had the same issue, in my case just added a delay (time.sleep(0.01)) to solve it.
The problem is that the individual processes are too fast on queue that causes the error.

Can't use pool.map for a class method in ROS Python

I have a time-consuming function which is called many times with different input arguments. I am trying to parallelize it by using the Pool from the multiprocessing module in Python 3. Please see a sample code below:
#! /usr/bin/env python3
import rospy
import actionlib
import multiprocessing as mp
from actionlib_tutorials.msg import FibonacciAction, FibonacciResult
def time_taking_task(n):
# only for debugging
a, b = 0, 1
for _ in range(0, n):
a, b = b, a + b
return a
class FibonacciActionClass(object):
def __init__(self, name):
self._as = actionlib.SimpleActionServer(
name,
FibonacciAction,
execute_cb=self.cb,
auto_start=False,
)
self._as.start()
def time_taking_task(self, n):
# only for debugging
a, b = 0, 1
for _ in range(0, n):
a, b = b, a + b
return a
def cb(self, goal):
result = FibonacciResult()
result.sequence.append(0)
with mp.Pool(processes=4) as pool:
nums = pool.map(self.time_taking_task, list(range(1, goal.order + 1))) # doesn't work
# nums = pool.map(time_taking_task, list(range(1, goal.order + 1))) # works
result.sequence.extend(nums)
self._as.set_succeeded(result)
if __name__ == "__main__":
rospy.init_node("fibonacci")
server = FibonacciActionClass(rospy.get_name())
rospy.spin()
The map method takes a function and an iterable object. Giving time_taking_task as function works but self.time_taking_task throws the following error:
[ERROR] [1643875827.675670]: Exception in your execute callback: cannot pickle '_thread.RLock' object
Traceback (most recent call last):
File "/opt/ros/noetic/lib/python3/dist-packages/actionlib/simple_action_server.py", line 289, in executeLoop
self.execute_callback(goal)
File "/home/user/ros_ws/src/actionlib_tutorials/scripts/fibonacci_server.py", line 34, in cb
nums = pool.map(self.time_taking_task, list(range(1, goal.order + 1)))
File "/usr/lib/python3.8/multiprocessing/pool.py", line 364, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/usr/lib/python3.8/multiprocessing/pool.py", line 771, in get
raise self._value
File "/usr/lib/python3.8/multiprocessing/pool.py", line 537, in _handle_tasks
put(task)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/usr/lib/python3.8/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: cannot pickle '_thread.RLock' object
I believe ROS isn't allowing the Pool somehow. Any workaround, please?
If you want to run a function in a python mutliprocessing subprocess, the function and all its arguments must be pickable. As self is one of the arguments for the function you are passing to the pool, your class seems to be not pickable. This seems to be due to actionlib.SimpleActionServer nit being pickable due to the RLock object. See this website for what types are pickable by default.
You could either try using another library like pathos instead of multiprocessing, as it uses a different way of pickling objects. Or, you could try to move your function out of your class and pass data that resides in your class and that has to be used by the function as function arguments.

pytorch multiprocessing - TypeError: 'str' object is not callable

I want to configure the Multiple gpu environment using ‘torch.multiprocessing’ and ‘torch.distributed’. However, I received the following error message.
Traceback (most recent call last):
File "train_custom.py", line 398, in <module>
mp.spawn(init_process, args=(world_size, backend), nprocs=world_size, join=True)
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/spawn.py", line 230, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/spawn.py", line 188, in start_processes
while not context.join():
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/spawn.py", line 150, in join
raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException:
-- Process 0 terminated with the following error:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
fn(i, *args)
File "/root/USRGAN_step2/train_custom.py", line 390, in init_process
fn(rank, size2)
TypeError: 'str' object is not callable
My code is as follows.
def run(rank, size2):
...
def init_process(rank, size2, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=torch.cuda.device_count())
fn(rank, size2)
###################################
if __name__ == "__main__":
world_size = torch.cuda.device_count()
backend = 'gloo'
mp.spawn(init_process, args=(world_size, backend), nprocs=world_size, join=True)
#mp.set_start_method('spawn', force=True)
processes = []
size2 = 4
for rank in range(size2):
p = Process(target=init_process, args=(rank, size2, run))
p.start()
processes.append(p)
for p in processes:
p.join()
I followed the tutorial(https://pytorch.org/tutorials/intermediate/dist_tuto.html) as it was, but this error occurred. And I don't find the cause of error.

How to execute a defined function with different arguments parallel

I have a function with multiple arguments. I need is a code block that will execute my function parallel with 3 different sets of arguments then hold there till all the processes are done before running my other code block.
I tried this but the result isn't the one I need.
import multiprocessing
from itertools import product
bs = []
def a(i):
for x in range(i):
print(i)
b = x + 1
bs.append(b)
return bs
if __name__ == '__main__':
i = range(4)
with multiprocessing.Pool(processes=3) as pool:
result = pool.starmap(a, product(i))
print(result)
The result:
1
2
2
3
3
3
[[], [1], [1, 1, 2], [1, 1, 2, 1, 2, 3]]
I have read thread where guys used Process from multiprocessing, which have .join() but I don't understand enough to write codes base on them.
Edit:
I also tried this and get an error:
from multiprocessing import Process
bs = []
def a(i):
for x in range(i):
print(i)
b = x + 1
bs.append(b)
return bs
if __name__ == '__main__':
p1 = Process(target=a(2))
p1.start()
p2 = Process(target=a(3))
p2.start()
p1.join()
p2.join()
The result:
2
2
3
3
3
Process Process-1:
Traceback (most recent call last):
File "E:\Python38-32\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "E:\Python38-32\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
TypeError: 'list' object is not callable
Process Process-2:
Traceback (most recent call last):
File "E:\Python38-32\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "E:\Python38-32\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
TypeError: 'list' object is not callable
Since multiprocessing is a bit complicated, can you help me to solve this?.
Thank you.

Multiprocessing deadlocks during large computation using Pool().apply_async

I have an issue in Python 3.7.3 where my multiprocessing operation (using Queue, Pool, and apply_async) deadlocks when handling large computational tasks.
For small computations, this multiprocessing task works just fine. However, when dealing with larger processes, the multiprocessing task stops, or deadlocks, altogether without exiting the process! I read that this will happen if you "grow your queue without bounds, and you are joining up to a subprocess that is waiting for room in the queue [...] your main process is stalled waiting for that one to complete, and it never will." (Process.join() and queue don't work with large numbers)
I am having trouble converting this concept into code. I would greatly appreciate guidance on refactoring the code I have written below:
import multiprocessing as mp
def listener(q, d): # task to queue information into a manager dictionary
while True:
item_to_write = q.get()
if item_to_write == 'kill':
break
foo = d['region']
foo.add(item_to_write)
d['region'] = foo # add items and set to manager dictionary
def main():
manager = mp.Manager()
q = manager.Queue()
d = manager.dict()
d['region'] = set()
pool = mp.Pool(mp.cpu_count() + 2)
watcher = pool.apply_async(listener, (q, d))
jobs = []
for i in range(24):
job = pool.apply_async(execute_search, (q, d)) # task for multiprocessing
jobs.append(job)
for job in jobs:
job.get() # begin multiprocessing task
q.put('kill') # kill multiprocessing task (view listener function)
pool.close()
pool.join()
print('process complete')
if __name__ == '__main__':
main()
Ultimately, I would like to prevent deadlocking altogether to facilitate a multiprocessing task that could operate indefinitely until completion.
BELOW IS THE TRACEBACK WHEN EXITING DEADLOCK IN BASH
^CTraceback (most recent call last):
File "multithread_search_cl_gamma.py", line 260, in <module>
main(GEOTAG)
File "multithread_search_cl_gamma.py", line 248, in main
job.get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 651, in get
Process ForkPoolWorker-28:
Process ForkPoolWorker-31:
Process ForkPoolWorker-30:
Process ForkPoolWorker-27:
Process ForkPoolWorker-29:
Process ForkPoolWorker-26:
self.wait(timeout)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 648, in wait
Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
self._event.wait(timeout)
File "/Users/Ira/anaconda3/lib/python3.7/threading.py", line 552, in wait
Traceback (most recent call last):
Traceback (most recent call last):
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 352, in get
res = self._reader.recv_bytes()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/connection.py", line 407, in _recv_bytes
buf = self._recv(4)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
KeyboardInterrupt
signaled = self._cond.wait(timeout)
File "/Users/Ira/anaconda3/lib/python3.7/threading.py", line 296, in wait
waiter.acquire()
KeyboardInterrupt
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
Traceback (most recent call last):
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
Below is the updated script:
import multiprocessing as mp
import queue
def listener(q, d, stop_event):
while not stop_event.is_set():
try:
while True:
item_to_write = q.get(False)
if item_to_write == 'kill':
break
foo = d['region']
foo.add(item_to_write)
d['region'] = foo
except queue.Empty:
pass
time.sleep(0.5)
if not q.empty():
continue
def main():
manager = mp.Manager()
stop_event = manager.Event()
q = manager.Queue()
d = manager.dict()
d['region'] = set()
pool = mp.get_context("spawn").Pool(mp.cpu_count() + 2)
watcher = pool.apply_async(listener, (q, d, stop_event))
stop_event.set()
jobs = []
for i in range(24):
job = pool.apply_async(execute_search, (q, d))
jobs.append(job)
for job in jobs:
job.get()
q.put('kill')
pool.close()
pool.join()
print('process complete')
if __name__ == '__main__':
main()
UPDATE::
execute_command executes several processes necessary for search, so I put in code for where q.put() lies.
Alone, the script will take > 72 hrs to finish. Each multiprocess never completes the entire task, rather they work individually and reference a manager.dict() to avoid repeating tasks. These tasks work until every tuple in the manager.dict() has been processed.
def area(self, tup, housing_dict, q):
state, reg, sub_reg = tup[0], tup[1], tup[2]
for cat in housing_dict:
"""
computationally expensive, takes > 72 hours
for a list of 512 tup(s)
"""
result = self.search_geotag(
state, reg, cat, area=sub_reg
)
q.put(tup)
The q.put(tup) is ultimately placed in the listener function to add tup to the manager.dict()
Since listener and execute_search are sharing the same queue object, there could be race,
where execute_search gets 'kill' from queue before listener does, thus listener will stuck in blocking get() forever, since there are no more new items.
For that case you can use Event object to signal all processes to stop:
import multiprocessing as mp
import queue
def listener(q, d, stop_event):
while not stop_event.is_set():
try:
item_to_write = q.get(timeout=0.1)
foo = d['region']
foo.add(item_to_write)
d['region'] = foo
except queue.Empty:
pass
print("Listener process stopped")
def main():
manager = mp.Manager()
stop_event = manager.Event()
q = manager.Queue()
d = manager.dict()
d['region'] = set()
pool = mp.get_context("spawn").Pool(mp.cpu_count() + 2)
watcher = pool.apply_async(listener, (q, d, stop_event))
stop_event.set()
jobs = []
for i in range(24):
job = pool.apply_async(execute_search, (q, d))
jobs.append(job)
try:
for job in jobs:
job.get(300) #get the result or throws a timeout exception after 300 seconds
except multiprocessing.TimeoutError:
pool.terminate()
stop_event.set() # stop listener process
print('process complete')
if __name__ == '__main__':
main()

Categories

Resources