python Infinite Loop append in multiprocessing.manger list - python

Why this code isnt working:
from multiprocessing import Process, Manager
import pcap, string, sys
def f(a, p):
try:
while True:
a.append(p.next())
except KeyboardInterrupt:
print 'stop'
def g(a):
# print a
while True:
print a[len(a)-1]
if __name__ == '__main__':
# num = Value('d', 0.0)
manager = Manager()
l = manager.list([])
p = pcap.pcapObject()
dev = sys.argv[1]
p.open_live(dev, 65000, 0, 100)
p.setfilter(string.join(sys.argv[2:], ' '), 0, 0)
p = Process(target=f, args=(l, p))
p.start()
p.join()
a = Process(target=g, args=(l,))
a.start()
a.join()
# print l
# print len(l)
while below code is working fine:
from multiprocessing import Process, Manager
import pcap, string, sys
def f(a, p):
try:
while len(a) < 100:
a.append(p.next())
except KeyboardInterrupt:
print 'stop'
def g(a):
# print a
while True:
print a[len(a)-1]
if __name__ == '__main__':
# num = Value('d', 0.0)
manager = Manager()
l = manager.list([])
p = pcap.pcapObject()
dev = sys.argv[1]
p.open_live(dev, 65000, 0, 100)
p.setfilter(string.join(sys.argv[2:], ' '), 0, 0)
p = Process(target=f, args=(l, p))
p.start()
p.join()
a = Process(target=g, args=(l,))
a.start()
a.join()
# print l
# print len(l)
Other Question:
Is this is a best and fastest/optimized way way to create shared
memory between different processes?
Is multiprocessing.manager class for finite size data structure. if
no what am i doing wrong?
any help/hint would be appreciated. Thanx in advance.

Related

Kill threads after one thread succesfully finished function [duplicate]

This question already has answers here:
Is there any way to kill a Thread?
(31 answers)
Closed 12 months ago.
I search a way to transform this kind of code from multiprocessing into multithreading:
import multiprocessing
import random
import time
FIND = 50
MAX_COUNT = 100000
INTERVAL = range(10)
queue = multiprocessing.Queue(maxsize=1)
def find(process, initial):
succ = False
while succ == False:
start=initial
while(start <= MAX_COUNT):
if(FIND == start):
queue.put(f"Found: {process}, start: {initial}")
break;
i = random.choice(INTERVAL)
start = start + i
print(process, start)
processes = []
manager = multiprocessing.Manager()
for i in range(5):
process = multiprocessing.Process(target=find, args=(f'computer_{i}', i))
processes.append(process)
process.start()
ret = queue.get()
for i in range(5):
process = processes[i]
process.terminate()
print(f'terminated {i}')
print(ret)
The way it works is it starts multiple processes and after the first process finished the function find isn't needed anymore. I tried to transform it in that way, but unfortunately the terminate function is not usable:
import _thread as thread
import queue
import random
import time
FIND = 50
MAX_COUNT = 100000
INTERVAL = range(10)
qu = queue.Queue(maxsize=1)
def find(process, initial):
succ = False
while succ == False:
start=initial
while(start <= MAX_COUNT):
if(FIND == start):
qu.put(f"Found: {process}, start: {initial}")
break;
i = random.choice(INTERVAL)
start = start + i
print(process, start)
threads = []
for i in range(5):
th = thread.start_new_thread(find, (f'computer_{i}', i))
threads.append(th)
ret = qu.get()
for i in range(5):
th = threads[i]
th.terminate()
print(f'terminated {i}')
print(ret)
How can I get some termination of threads?
Try:
for id, thread in threading._active.items():
types.pythonapi.PyThreadState_SetAsyncExc(id, ctypes.py_object(SystemExit))

Python Concurrent.Futures ProcessPool Executor wrong output

I am trying to use the ProcessPoolExecutor() to run some functions but I cant manage to understand how to get the return of the functions out of the with.
def threaded_upload(i):
time.sleep(2)
if i == 0:
k = 10
elif i == 2:
k = i*i
else:
k = -99
return [k]
def controller():
if __name__ == "__main__":
futures = []
with ProcessPoolExecutor() as pool:
for paso in range(4):
futuro_i = pool.submit(threaded_upload,paso)
wth=[futuro_i.result()]
futures.append(futuro_i)
wait(futures, return_when=ALL_COMPLETED)
merged_list = []
for future in futures:
for valor in future.result():
merged_list.append(valor)
Lista_Final = merged_list
wait(futures, return_when=ALL_COMPLETED)
return Lista_Final
print(controller())
The output of the code is:
None
[10, -99, 4, -99]
I am not sure why?
The "wait" doesn't seem to wait until all functions are executed either.
To be honest, I have been reading and reading for a few days but the description of concurrent.futures or multiprocessing are more advanced that my current knowledge.
Any clarification will be appreciated.
Thanks in advance.
You first submit the jobs and then wait for the results. You can also return an integer instead of a list and then skip the inner loop:
test.py:
import random
import time
from concurrent.futures import ProcessPoolExecutor, wait
def worker(i):
t = random.uniform(1, 5)
print(f"START: {i} ({t:.3f}s)")
time.sleep(t)
if i == 0:
k = 10
elif i == 2:
k = i * i
else:
k = -99
print(f"END: {i}")
return k
def main():
futures = []
with ProcessPoolExecutor() as pool:
for i in range(4):
future = pool.submit(worker, i)
futures.append(future)
results = []
done, pending = wait(futures) # ALL_COMPLETED is the default value
for future in done:
results.append(future.result())
print(results)
if __name__ == "__main__":
main()
Test:
$ python test.py
START: 0 (1.608s)
START: 1 (1.718s)
START: 2 (1.545s)
START: 3 (1.588s)
END: 2
END: 3
END: 0
END: 1
[10, -99, 4, -99]

Subprocesses are not conducted when use multiprocessing in Python

I'm now testing the code below to calculate an array in parallel, but it seems that the long time function would not be conducted. My Python version: 3.7.4, Operation system: win 10.
from multiprocessing import Pool, Lock, Array
import os, time
def long_time_task(i,array,lock):
print('Run task %s (%s)...' % (i, os.getpid()))
start = time.time()
total_count = 0
for k in range(5*10**7): total_count += 1
total_count += i
lock.acquire()
array[i] = total_count
lock.release()
end = time.time()
print('Task %s runs %0.2f seconds.' % (i, (end - start)))
def mainFunc():
print('Parent process %s.' % os.getpid())
p = Pool()
array = Array('f', 20)
lock = Lock()
for i in range(20): p.apply_async(long_time_task, args=(i,array,lock))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
if __name__ == '__main__':
mainFunc()
There are a few issues with your code:
The apply_async method returns a result object that you need to await for.
You can't pass normal multiprocessing Array or Lock objects to Pool methods, because they can't be pickled. You can use manager object instead.
Try this:
from multiprocessing import Pool, Lock, Array, Manager
import os, time
def long_time_task(i,array,lock):
print('Run task %s (%s)...' % (i, os.getpid()))
start = time.time()
total_count = 0
for k in range(5*10**7): total_count += 1
total_count += i
lock.acquire()
array[i] = total_count
lock.release()
end = time.time()
print('Task %s runs %0.2f seconds.' % (i, (end - start)))
def mainFunc():
print('Parent process %s.' % os.getpid())
p = Pool()
m = Manager()
array = m.Array('f', [0] * 20)
lock = m.Lock()
results = [p.apply_async(long_time_task, args=(i,array,lock)) for i in range(20)]
[result.get() for result in results]
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
if __name__ == '__main__':
mainFunc()
You could also simplify it like this but I'm not sure if this will work in your case:
import array
import os
import time
from multiprocessing import Pool
def long_time_task(i):
print(f'Run task {i} ({os.getpid()})...')
start = time.time()
total_count = 0
for k in range(5 * 10 ** 7):
total_count += 1
total_count += i
end = time.time()
print(f'Task {i} runs {end - start:.2f} seconds.')
return total_count
def main():
print('Parent process %s.' % os.getpid())
a = array.array('d', range(20))
r = range(20)
with Pool() as pool:
for idx, result in zip(r, pool.map(long_time_task, r)):
a[idx] = result
print(a)
print(f'All subprocesses done.')
if __name__ == '__main__':
main()

How to stop multiprocessing.Pool.map on exception

When I raise an exception inside my thread_function, it doesn't stop the rest of the map processing. I'd like to stop it.
def thread_function(n):
if n == 10:
raise Exception('Stop everything!')
pool = Pool(processes = 4)
pool.map(thread_function, range(1, 1000), chunksize = 1)
I'd expect no more processing after one thread reached n == 10.
I don't know of a way to do this directly with map but you can monitor an async_map like this...
from multiprocessing import Pool
import time
def thread_function(n):
if n == 10:
print('Raising Exception')
raise Exception('Stop everything!')
print(n)
time.sleep(0.1)
pool = Pool(processes = 4)
result = pool.map_async(thread_function, range(1, 1000), chunksize = 1)
while not result.ready():
if not result._success:
print('Exiting for failure')
pool.terminate()
pool.join()
break

How can I change multithreading with multiprocessing in the code (Python)

I wrote a program which implements caesar-cipher with threads and queues with python. I would like to change all the threading work with multiprocessing in my code and I have no idea how to do it. I would appreciate if you could explain where & how to start the implementation. Here is the code:
import threading
import Queue
import sys
import string
lock = threading.Lock()
def do_work(in_queue, out_queue, shift):
while True:
lock.acquire()
item = in_queue.get()
result = caesar(item, shift)
out_queue.put(result)
in_queue.task_done()
lock.release()
def caesar(plaintext, shift):
plaintext = plaintext.upper()
alphabet = string.ascii_uppercase
shifted_alphabet = alphabet[shift:] + alphabet[:shift]
table = string.maketrans(alphabet, shifted_alphabet)
return plaintext.translate(table)
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Duzgun giriniz: '<filename>.py s n l'")
sys.exit(0)
else:
s = int(sys.argv[1])
n = int(sys.argv[2])
l = int(sys.argv[3])
work = Queue.Queue()
results = Queue.Queue()
myfile=open('metin.txt','r')
text_data=myfile.read() # <=== here load file
index=0
for i in xrange(n):
t = threading.Thread(target=do_work, args=(work, results, s))
t.daemon = True
t.start()
for i in range(0, len(text_data), l):
work.put(text_data[index:index + l])
index += l
work.join()
index=0
output_file=open("crypted"+ "_"+ str(s)+"_"+str(n)+"_"+str(l)+".txt", "w")
for i in range(0, len(text_data), l):
output_file.write(results.get())
index += l
sys.exit()
You can save yourself some code and move to the standard multiprocessing.Pool implementation.
import multiprocessing
import sys
import string
import itertools
# for non-forking systems like Windows
def worker(args):
# args: (text, shift)
return caesar(*args)
# for forking systems like linux
def forking_worker(args):
# args: ((start_index, end_index), shift)
return caesar(text_data[args[0][0]:args[0][1], args[1])
def caesar(plaintext, shift):
plaintext = plaintext.upper()
alphabet = string.ascii_uppercase
shifted_alphabet = alphabet[shift:] + alphabet[:shift]
table = string.maketrans(alphabet, shifted_alphabet)
return plaintext.translate(table)
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Duzgun giriniz: '<filename>.py s n l'")
sys.exit(0)
else:
s = int(sys.argv[1])
n = int(sys.argv[2])
l = int(sys.argv[3])
pool = multiprocessing.Pool() # todo: change number of cpus...
with open('metin.txt') as myfile:
text_data=myfile.read() # <=== here load file
# on a forking system so only pass index, not text to child
result = pool.map(forking_worker,
zip(((index, index + l)
for index in range(0, len(text_data), l)),
itertools.cycle([s])))
with open("crypted"+ "_"+ str(s)+"_"+str(n)+"_"+str(l)+".txt", "w") as output_file:
output_file.writelines(result)

Categories

Resources