An error while using locks in multiprocessing in Python - python

I'm a beginner learning python and ran into some issues while using locks during multiprocessing.
I get an exit code 0 and the right answer but still have some sort of error message which I really don't fully understand. Here's The code I've written-
import time
import multiprocessing
def deposit(balance):
for i in range(100):
time.sleep(0.01)
lck.acquire()
balance.value += 1
lck.release()
def withdraw(balance):
for i in range(100):
time.sleep(0.01)
lck.acquire()
balance.value -= 1
lck.release()
if __name__ == '__main__':
balance = multiprocessing.Value('i', 200)
lck = multiprocessing.Lock()
d = multiprocessing.Process(target=deposit, args=(balance,))
w = multiprocessing.Process(target=withdraw, args=(balance,))
d.start()
w.start()
d.join()
w.join()
print(balance.value)
and here's the error I get
`Process Process-1:
Traceback (most recent call last):
File "C:\Users\rahul\AppData\Local\Programs\Python\Python39\lib\multiprocessing\process.py", line
315, in _bootstrap
self.run()
File "C:\Users\rahul\AppData\Local\Programs\Python\Python39\lib\multiprocessing\process.py", line
108, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\rahul\PycharmProjects\pythonProject\LearningPython.py", line 10, in deposit
lck.acquire()
NameError: name 'lck' is not defined
Process Process-2:
Traceback (most recent call last):
File "C:\Users\rahul\AppData\Local\Programs\Python\Python39\lib\multiprocessing\process.py", line
315, in _bootstrap
self.run()
File "C:\Users\rahul\AppData\Local\Programs\Python\Python39\lib\multiprocessing\process.py", line
108, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\rahul\PycharmProjects\pythonProject\LearningPython.py", line 17, in withdraw
lck.acquire()
NameError: name 'lck' is not defined
200
Process finished with exit code 0

The problem here is that lck is out of scope of your child processes. Global variables aren't shared across processes. Try passing the lock into the processes.
Alternatively use threads as suggested in kahn's answer. They are much friendlier and still work fine in this case.
import time
import multiprocessing
def deposit(balance,lck):
for i in range(100):
time.sleep(0.01)
lck.acquire()
balance.value += 1
lck.release()
def withdraw(balance,lck):
for i in range(100):
time.sleep(0.01)
lck.acquire()
balance.value -= 1
lck.release()
if __name__ == '__main__':
balance = multiprocessing.Value('i', 200)
lck = multiprocessing.Lock()
d = multiprocessing.Process(target=deposit, args=(balance,lck))
w = multiprocessing.Process(target=withdraw, args=(balance,lck))
d.start()
w.start()
d.join()
w.join()
print(balance.value)

Because deposit and withdraw run in other processes.In their view,the process is not __main__,so the if statement is not executed and lck is not defined.
Try Run
import os
import multiprocessing
def deposit(balance):
print(os.getpid(),__name__)
def withdraw(balance):
print(os.getpid(),__name__)
if __name__ == '__main__':
print(os.getpid(), __name__)
balance = multiprocessing.Value('i', 200)
lck = multiprocessing.Lock()
d = multiprocessing.Process(target=deposit, args=(balance,))
w = multiprocessing.Process(target=withdraw, args=(balance,))
d.start()
w.start()
d.join()
w.join()
In my case ,it shows
19604 __main__
33320 __mp_main__
45584 __mp_main__
Your code can run if you put lck = multiprocessing.Lock() outside if.But I'm sure it's not what you want.
You should use threading instead of multiprocess in this case,and have a look at difference between multi-thread and multi-process.

Related

Join timeout in multiprocessing

I have a dummy example, I want to apply multiprocessing in it. Consider a scenario where you have a stream of numbers(which I call frame) incoming one by one. And I want to assign it to any single process that is available currently. So I am creating 4 processes that are running a while loop, seeing if any element in queue, than apply function on it.
The problem is that when I join it, it gets stuck in any while loop, even though I close the while loop before it. But somehow it gets stuck inside it.
Code:
# step 1, 4 processes
import multiprocessing as mp
import os
import time
class MpListOperations:
def __init__(self):
self.results_queue = mp.Manager().Queue()
self.frames_queue = mp.Manager().Queue()
self.flag = mp.Manager().Value(typecode='b',value=True)
self.list_nums = list(range(0,5000))
def process_list(self):
print(f"Process id {os.getpid()} started")
while self.flag.value:
# print(self.flag.value)
if self.frames_queue.qsize():
self.results_queue.put(self.frames_queue.get()**2)
def create_processes(self, no_of_processes = mp.cpu_count()):
print("Creating Processes")
self.processes = [mp.Process(target=self.process_list) for _ in range(no_of_processes)]
def start_processes(self):
print(f"starting processes")
for process in self.processes:
process.start()
def join_process(self):
print("Joining Processes")
while True:
if not self.frames_queue.qsize():
self.flag.value=False
print("JOININNG HERE")
for process in self.processes:
exit_code = process.join()
print(exit_code)
print("BREAKING DONE")
break
def stream_frames(self):
print("Streaming Frames")
for frame in self.list_nums:
self.frames_queue.put(frame)
if __name__=="__main__":
start = time.time()
mp_ops = MpListOperations()
mp_ops.create_processes()
mp_ops.start_processes()
mp_ops.stream_frames()
mp_ops.join_process()
print(time.time()-start)
Now if I add a timeout parameter in join, even 0, i.e exit_code = process.join(0) it works. I want to understand in this scenario, if this code is correct, what should be the value of timeout? Why is it working with timeout and not without it? What is the proper way to implement multiprocessing with it?
If you look at the documentation for a managed queue you will see that the qsize method only returns an approximate size. I would therefore not use it for testing when all the items have been taken of the frames queue. Presumably you want to let the processes run until all frames have been processed. The simplest way I know would be to put N sentinel items on the frames queue after the actual frames have been put where N is the number of processes getting from the queue. A sentinel item is a special value that cannot be mistaken for an actual frame and signals to the process that there are no more items for it to get from the queue (i.e. a quasi end-of-file item). In this case we can use None as the sentinel items. Each process then just continues to do get operations on the queue until it sees a sentinel item and then terminates. There is therefore no need for the self.flag attribute.
Here is the updated and simplified code. I have made some other minor changes that have been commented:
import multiprocessing as mp
import os
import time
class MpListOperations:
def __init__(self):
# Only create one manager process:
manager = mp.Manager()
self.results_queue = manager.Queue()
self.frames_queue = manager.Queue()
# No need to convert range to a list:
self.list_nums = range(0, 5000)
def process_list(self):
print(f"Process id {os.getpid()} started")
while True:
frame = self.frames_queue.get()
if frame is None: # Sentinel?
# Yes, we are done:
break
self.results_queue.put(frame ** 2)
def create_processes(self, no_of_processes = mp.cpu_count()):
print("Creating Processes")
self.no_of_processes = no_of_processes
self.processes = [mp.Process(target=self.process_list) for _ in range(no_of_processes)]
def start_processes(self):
print("Starting Processes")
for process in self.processes:
process.start()
def join_processes(self):
print("Joining Processes")
for process in self.processes:
# join returns None:
process.join()
def stream_frames(self):
print("Streaming Frames")
for frame in self.list_nums:
self.frames_queue.put(frame)
# Put sentinels:
for _ in range(self.no_of_processes):
self.frames_queue.put(None)
if __name__== "__main__":
start = time.time()
mp_ops = MpListOperations()
mp_ops.create_processes()
mp_ops.start_processes()
mp_ops.stream_frames()
mp_ops.join_processes()
print(time.time()-start)
Prints:
Creating Processes
Starting Processes
Process id 28 started
Process id 29 started
Streaming Frames
Process id 33 started
Process id 31 started
Process id 38 started
Process id 44 started
Process id 42 started
Process id 45 started
Joining Processes
2.3660173416137695
Note for Windows
I have modified method start_processes to temporarily set attribute self.processes to None:
def start_processes(self):
print("Starting Processes")
processes = self.processes
# Don't try to pickle list of processes:
self.processes = None
for process in processes:
process.start()
# Restore attribute:
self.processes = processes
Otherwise under Windows we get a pickle error trying to serialize/deserialize a list of processes containing two or more multiprocessing.Process instances. The error is "TypeError: cannot pickle 'weakref' object." This can be demonstrated with the following code where we first try to pickle a list of 1 process and then a list of 2 processes:
import multiprocessing as mp
import os
class Foo:
def __init__(self, number_of_processes):
self.processes = [mp.Process(target=self.worker) for _ in range(number_of_processes)]
self.start_processes()
self.join_processes()
def start_processes(self):
processes = self.processes
for process in self.processes:
process.start()
def join_processes(self):
for process in self.processes:
process.join()
def worker(self):
print(f"Process id {os.getpid()} started")
print(f"Process id {os.getpid()} ended")
if __name__== "__main__":
foo = Foo(1)
foo = Foo(2)
Prints:
Process id 7540 started
Process id 7540 ended
Traceback (most recent call last):
File "C:\Booboo\test\test.py", line 26, in <module>
foo = Foo(2)
File "C:\Booboo\test\test.py", line 7, in __init__
self.start_processes()
File "C:\Booboo\test\test.py", line 13, in start_processes
process.start()
File "C:\Program Files\Python38\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python38\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'weakref' object
Process id 18152 started
Process id 18152 ended
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
The target loop is stuck in the get() method of your loop. This is because multiple processes could see that the queue wasn't empty, but only 1 of them was able to get the last item. The remaining processes are waiting for the next item to be available from the queue.
You might need to add a Lock when you are reading the size of the Queue object And getting the object of that queue.
Or alternatively, you avoid reading the size of the queue by simply using the queue.get() method with a timeout that allows us to check the flag regularly
import queue
TIMEOUT = 1 # seconds
class MpListOperations:
#[...]
def process_list(self):
print(f"Process id {os.getpid()} started")
previous = self.flag.value
while self.flag.value:
try:
got = self.frames_queue.get(timeout=TIMEOUT)
except queue.Empty:
pass
else:
print(f"Gotten {got}")
self.results_queue.put(got**2)
_next = self.flag.value
if previous != _next:
print(f"Flag change: {_next}")
$ python ./test_mp.py
Creating Processes
starting processes
Process id 36566 started
Streaming Frames
Process id 36565 started
Process id 36564 started
Process id 36570 started
Process id 36567 started
Gotten 0
Process id 36572 started
Gotten 1
Gotten 2
Gotten 3
Process id 36579 started
Gotten 4
Gotten 5
Gotten 6
Process id 36583 started
Gotten 7
# [...]
Gotten 4997
Joining Processes
Gotten 4998
Gotten 4999
JOININNG HERE
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
BREAKING DONE
1.4375360012054443
Alternatively, using a multiprocessing.Pool object:
def my_func(arg):
time.sleep(0.002)
return arg**2
def get_input():
for i in range(5000):
yield i
time.sleep(0.001)
if __name__=="__main__":
start = time.time()
mp_pool = mp.Pool()
result = mp_pool.map(my_func, get_input())
mp_pool.close()
mp_pool.join()
print(len(result))
print(f"Duration: {time.time()-start}")
Giving:
$ python ./test_mp.py
5000
Duration: 6.847279787063599

python mutiprocessing on windows using shared values

I am trying to gain an insight into using multiprocessing with python. I have an example of using shared values for Unix but I cannot get a simple educational example to work on Windows 10.
I have the code below running ok on Windows but with the call updating the shared value commented out in foo().
What is my problem please?
import multiprocessing as mp
def foo(q):
#global shared_num
q.put('hello')
#shared_num.value = 777
if __name__ == '__main__':
global shared_num
mp.set_start_method('spawn')
shared_num = mp.Value('d', 0)
lock = mp.Lock()
q = mp.Queue()
p = mp.Process(target=foo, args=(q,))
p.start()
p.join()
print(q.get(), " ",shared_num.value)
#print(q.get(), " ")
If I run the code below with the foo() setting the shared value I get:
Traceback (most recent call last):
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.8_3.8.2032.0_x64__qbz5n2kfra8p0\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.8_3.8.2032.0_x64__qbz5n2kfra8p0\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\ken38\OneDrive\Projects\Python\GH_Pi\python\ms_mp.py", line 6, in foo
shared_num.value = 777
NameError: name 'shared_num' is not defined
hello 0.0
Michael Butscher actually answered this with his comment. But as I could not flag that as an answer I thought I would show the corrected code as an answer.
This does highlight difference if you try and test simple examples on Windows. Linux based examples may not work an Windows.
This code worked on both Windows and Debian (Rpi).
import multiprocessing as mp
def foo(q, shared_num, lock):
#global shared_num
q.put('hello')
with lock:
shared_num.value = 777
if __name__ == '__main__':
global shared_num
mp.set_start_method('spawn')
shared_num = mp.Value('d', 0)
lock = mp.Lock()
q = mp.Queue()
p = mp.Process(target=foo, args=(q, shared_num, lock,))
p.start()
p.join()
print(q.get(), " ",shared_num.value)
#print(q.get(), " ")

python multiprocessing process pool fails to find asynced function

Pretty simple multiprocessing example. Goals:
Create a pool of process workers using mp.Pool
Do some sort of transformation (here a simple string operation on line)
Push the transformed line to mp.Queue
Further process data from that mp.Queue in the main program afterwards
So lets do this:
import multiprocessing as mp
Init async processes with a mp.queue
def process_pool_init_per_process(q):
global mp_queue
mp_queue = q
Really init the mp_pool
no_of_processes = 4
q = mp.Queue()
mp_pool = mp.Pool(no_of_processes, process_pool_init_per_process, (q,))
This is getting called for every line to be proccesed async
def process_async_main(line):
print(line)
q.put(line + '_asynced')
And now let´s start it using apply_async
line = "Hi, this is a test to test mp_queues with mp process pools"
handler = mp_pool.apply_async(process_async_main, (line))
mp_resp = handler.get()
And read from the queue
while not q.empty():
print(q.get()) # This should be the inital line
Fails wih:
python3 mp_process_example.py
Process ForkPoolWorker-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.6/multiprocessing/pool.py", line 108, in worker
task = get()
File "/usr/lib/python3.6/multiprocessing/queues.py", line 337, in get
return _ForkingPickler.loads(res)
AttributeError: Can't get attribute 'process_async_main' on <module '__main__' from 'mp_process_example.py'>
The question is: Why is multiprocessing not finding the main class?
Complete code to reproduce:
import multiprocessing as mp
##### Init async processes
def process_pool_init_per_process(q):
global mp_queue
mp_queue = q
# Really init the mp_pool
no_of_processes = 4
q = mp.Queue()
mp_pool = mp.Pool(no_of_processes, process_pool_init_per_process, (q,))
#This is getting called for every line to be proccesed async
def process_async_main(line):
print(line)
q.put(line + '_asynced')
line = "Hi, this is a test to test mp_queues with mp process pools"
handler = mp_pool.apply_async(process_async_main, (line))
mp_resp = handler.get()
while not q.empty():
print(q.get()) # This should be the inital line
Ok... I´ve got it... For some strange reason multiprocessing is not able to have the function to be asynced in the same file as the synchronized code.
Writing the code like this:
asynced.py
##### Init async processes
def process_pool_init_per_process(q):
global mp_queue
mp_queue = q
##### Function to be asycned
def process_async_main(line):
print(line)
mp_queue.put(line + '_asynced')
And than mp_process_example.py:
import multiprocessing as mp
from asynced import process_async_main, process_pool_init_per_process
# Really init the mp_pool
no_of_processes = 4
q = mp.Queue()
mp_pool = mp.Pool(no_of_processes, process_pool_init_per_process, (q,))
line = "Hi, this is a test to test mp_queues with mp process pools"
handler = mp_pool.apply_async(process_async_main, (line,))
mp_resp = handler.get()
while not q.empty():
print(q.get()) # This should be the inital line + "_asynced"
Works as expected:
$ python3 mp_process_example.py
Hi, this is a test to test mp_queues with mp process pools
Hi, this is a test to test mp_queues with mp process pools_asynced

multiprocessing queue storing list of lists

I'm trying to enqueue a list of lists in a process (in the function named proc below), and then have the process terminate itself after I call event.set(). My function proc always finishes, judging by the printout, but the process itself is still going. I can get this to work if I make the number of lists enqueued in a call to put lower (batchperq variable) (or the size of each nested list smaller).
import multiprocessing as mp
import queue
import numpy as np
import time
def main():
trainbatch_q = mp.Queue(10)
batchperq = 50
event = mp.Event()
tl1 = mp.Process(target=proc,
args=( trainbatch_q, 20, batchperq, event))
tl1.start()
time.sleep(3)
event.set()
tl1.join()
print("Never printed..")
def proc(batch_q, batch_size, batchperentry, the_event):
nrow = 100000
i0 = 0
to_q = []
while i0 < nrow:
rowend = min(i0 + batch_size,nrow)
somerows = np.random.randint(0,5,(rowend-i0,2))
to_q.append(somerows.tolist())
if len(to_q) == batchperentry:
print("adding..", i0, len(to_q))
while not the_event.is_set():
try:
batch_q.put(to_q, block=False)
to_q = []
break
except queue.Full:
time.sleep(1)
i0 += batch_size
print("proc finishes")
When I do a keyboard interrupt, I get the trace below... what could the "lock" it's trying to acquire be? Something to do with the queue?
Traceback (most recent call last):
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/process.py", line 252, in _bootstrap
util._exit_function()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/util.py", line 322, in _exit_function
_run_finalizers()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/util.py", line 262, in _run_finalizers
finalizer()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/util.py", line 186, in __call__
res = self._callback(*self._args, **self._kwargs)
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/queues.py", line 198, in _finalize_join
thread.join()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/threading.py", line 1056, in join
self._wait_for_tstate_lock()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/threading.py", line 1072, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
The reason your process is never exiting is because you're never telling it to exit. I added a return to the end of your function and your process appears to exit correctly now.
def proc(batch_q, batch_size, batchperentry, the_event):
nrow = 100000
i0 = 0
to_q = []
while i0 < nrow:
rowend = min(i0 + batch_size,nrow)
somerows = np.random.randint(0,5,(rowend-i0,2))
to_q.append(somerows.tolist())
if len(to_q) == batchperentry:
print("adding..", i0, len(to_q))
while not the_event.is_set():
try:
batch_q.put(to_q, block=False)
to_q = []
break
except queue.Full:
time.sleep(1)
i0 += batch_size
print("proc finishes")
return # Added this line, You can have it return whatever is most relevant to you.
Here is the full program I ran including my changes to make it exit successfully.
import multiprocessing as mp
import queue
import numpy as np
import random
import time
def main():
trainbatch_q = mp.Queue(10)
batchperq = 50
event = mp.Event()
tl1 = mp.Process(target=proc,
args=( trainbatch_q, 20, batchperq, event))
print("Starting")
tl1.start()
time.sleep(3)
event.set()
tl1.join()
print("Never printed..")
def proc(batch_q, batch_size, batchperentry, the_event):
nrow = 100000
i0 = 0
to_q = []
while i0 < nrow:
rowend = min(i0 + batch_size,nrow)
somerows = np.random.randint(0,5,(rowend-i0,2))
to_q.append(somerows.tolist())
if len(to_q) == batchperentry:
print("adding..", i0, len(to_q))
while not the_event.is_set():
try:
batch_q.put(to_q, block=False)
to_q = []
break
except queue.Full:
time.sleep(1)
i0 += batch_size
print("proc finishes")
return # Added this line, You can have it return whatever is most relevant to you.
if __name__ == "__main__":
main()
Hope this is helps.

Python Multiprocess issues process not starting

I wrote this code, I want to have a main thread that starts multiple subprocesses that spawn a listener thread to wait for a kill message subprocess works but testprocess does not run there are no errors any ideas?
from multiprocessing import Process, Pipe
from threading import Thread
import time
Alive = True
def listener_thread(conn): #listens for kill from main
global Alive
while True:
data = conn.recv()
if data == "kill":
Alive = False #value for kill
break
def subprocess(conn):
t = Thread(target=listener_thread, args=(conn,))
count = 0
t.start()
while Alive:
print "Run number = %d" % count
count = count + 1
def testprocess(conn):
t = Thread(target=listner_thread, args=(conn,))
count = 0
t.start()
while Alive:
print "This is a different thread run = %d" % count
count = count + 1
parent_conn, child_conn = Pipe()
p = Process(target=subprocess, args=(child_conn,))
p2 = Process(target=testprocess, args=(child_conn,))
runNum = int(raw_input("Enter a number: "))
p.start()
p2.start()
time.sleep(runNum)
parent_conn.send("kill") #sends kill to listener thread to tell them when to stop
p.join()
p2.join()
A typo in testprocess makes the function to quit early.
listner_thread should be listener_thread.
If you comment out subprocess related code and run the code, you will see following error:
Process Process-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "t.py", line 25, in testprocess
t = Thread(target=listner_thread, args=(conn,))
NameError: global name 'listner_thread' is not defined

Categories

Resources