dequeue function in python - python

I am using threads. One thread as a producer thread and the other thread as the consumer thread. The threads enqueue and dequeue from the same queue (q1). The producer function seems to be working fine. However, the consumer thread (dequeue function) is giving me error. Below is my code
def PacketProducer(Threadnum, numTicks, onProb, offProb, q):
l=0
onState = True
packetId = 0
for i in range(numTicks):
x = generator(onProb, offProb, 'OnOff', onState)
if(x==True):
onState = True
pkt = packet
pkt.srcID = Threadnum
pkt.pktId = packetId
q.put(pkt)
print(pkt.pktId, ' ', pkt.srcID)
l = l+1
packetId = packetId + 1
else:
onState = False
time.sleep(1)
#print(x)
print(l/numTicks, ' threadNum', Threadnum)
return
def PacketConsumer(q): #dequeues and enqueues and returns the packet
while True:
if not q.empty():
pkt = q.get()
print('dequeuing Packet', ' ', pkt.pktId, pkt.srcID)
time.sleep(1)
return
if __name__ == '__main__':
numTicks = 40
q1 = Queue()
pkt = packet
x = threading.Thread(target=PacketProducer, args=(1, numTicks, 0.7, 0.7, q1))
#y = threading.Thread(target=PacketProducer, args=(2, numTicks, 0.6, 0.4, q1))
t = threading.Thread(target=PacketConsumer, args=(q1))
t.start()
x.start()
#y.start()
x.join()
# y.join()
t.join()
It gives me the following error:
Exception in thread Thread-2:
Traceback (most recent call last):
File "C:\Users\Syed\AppData\Local\Programs\Python\Python37-32\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "C:\Users\Syed\AppData\Local\Programs\Python\Python37-32\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
TypeError: PacketConsumer() argument after * must be an iterable, not Queue

A single item tuple must have a , in it to distinguish it as such, otherwise it's just grouping parenthesis.
Change this line:
t = threading.Thread(target=PacketConsumer, args=(q1,))

Related

multiprocessing queue storing list of lists

I'm trying to enqueue a list of lists in a process (in the function named proc below), and then have the process terminate itself after I call event.set(). My function proc always finishes, judging by the printout, but the process itself is still going. I can get this to work if I make the number of lists enqueued in a call to put lower (batchperq variable) (or the size of each nested list smaller).
import multiprocessing as mp
import queue
import numpy as np
import time
def main():
trainbatch_q = mp.Queue(10)
batchperq = 50
event = mp.Event()
tl1 = mp.Process(target=proc,
args=( trainbatch_q, 20, batchperq, event))
tl1.start()
time.sleep(3)
event.set()
tl1.join()
print("Never printed..")
def proc(batch_q, batch_size, batchperentry, the_event):
nrow = 100000
i0 = 0
to_q = []
while i0 < nrow:
rowend = min(i0 + batch_size,nrow)
somerows = np.random.randint(0,5,(rowend-i0,2))
to_q.append(somerows.tolist())
if len(to_q) == batchperentry:
print("adding..", i0, len(to_q))
while not the_event.is_set():
try:
batch_q.put(to_q, block=False)
to_q = []
break
except queue.Full:
time.sleep(1)
i0 += batch_size
print("proc finishes")
When I do a keyboard interrupt, I get the trace below... what could the "lock" it's trying to acquire be? Something to do with the queue?
Traceback (most recent call last):
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/process.py", line 252, in _bootstrap
util._exit_function()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/util.py", line 322, in _exit_function
_run_finalizers()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/util.py", line 262, in _run_finalizers
finalizer()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/util.py", line 186, in __call__
res = self._callback(*self._args, **self._kwargs)
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/multiprocessing/queues.py", line 198, in _finalize_join
thread.join()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/threading.py", line 1056, in join
self._wait_for_tstate_lock()
File "/software/Anaconda3-5.0.0.1-el7-x86_64/lib/python3.6/threading.py", line 1072, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
The reason your process is never exiting is because you're never telling it to exit. I added a return to the end of your function and your process appears to exit correctly now.
def proc(batch_q, batch_size, batchperentry, the_event):
nrow = 100000
i0 = 0
to_q = []
while i0 < nrow:
rowend = min(i0 + batch_size,nrow)
somerows = np.random.randint(0,5,(rowend-i0,2))
to_q.append(somerows.tolist())
if len(to_q) == batchperentry:
print("adding..", i0, len(to_q))
while not the_event.is_set():
try:
batch_q.put(to_q, block=False)
to_q = []
break
except queue.Full:
time.sleep(1)
i0 += batch_size
print("proc finishes")
return # Added this line, You can have it return whatever is most relevant to you.
Here is the full program I ran including my changes to make it exit successfully.
import multiprocessing as mp
import queue
import numpy as np
import random
import time
def main():
trainbatch_q = mp.Queue(10)
batchperq = 50
event = mp.Event()
tl1 = mp.Process(target=proc,
args=( trainbatch_q, 20, batchperq, event))
print("Starting")
tl1.start()
time.sleep(3)
event.set()
tl1.join()
print("Never printed..")
def proc(batch_q, batch_size, batchperentry, the_event):
nrow = 100000
i0 = 0
to_q = []
while i0 < nrow:
rowend = min(i0 + batch_size,nrow)
somerows = np.random.randint(0,5,(rowend-i0,2))
to_q.append(somerows.tolist())
if len(to_q) == batchperentry:
print("adding..", i0, len(to_q))
while not the_event.is_set():
try:
batch_q.put(to_q, block=False)
to_q = []
break
except queue.Full:
time.sleep(1)
i0 += batch_size
print("proc finishes")
return # Added this line, You can have it return whatever is most relevant to you.
if __name__ == "__main__":
main()
Hope this is helps.

Processes not joining even after flushing their queues

I wrote a program using the module multiprocessing which globally executes as follows:
both a simulation and an ui processes are started.
the simulation process feeds a queue with new simulation states. If the queue is full, the simulation loop isn't blocked so it can handle possible incoming messages.
the ui process consumes the simulation queue.
after around 1 second of execution time, the ui process sends a quit event to the main process then exits the loop. Upon exiting it sends a stopped event to the main process through the _create_process()'s inner wrapper() function.
the main process receives both events in whichever order. The quit event results in the main process sending stop signals to all the child processes, while the stopped event increments a counter in the main loop which will cause it to exit after having received as many stopped events as there are processes.
the simulation process receives the stop event and exits the loop, sending in turn a stopped event to the main process.
the main process has now received 2 stopped events in total and concludes that all child processes are—on their way to be—stopped. As a result, the main loop is exited
the run() function flushes the queues which have been written by the child processes.
the child processes are being joined.
The problem is that quite often (but not always) the program will hang upon trying to join the simulation process, as per the log below.
[...]
[INFO/ui] process exiting with exitcode 0
[DEBUG/MainProcess] starting thread to feed data to pipe
[DEBUG/MainProcess] ... done self._thread.start()
[DEBUG/simulation] Queue._start_thread()
[DEBUG/simulation] doing self._thread.start()
[DEBUG/simulation] starting thread to feed data to pipe
[DEBUG/simulation] ... done self._thread.start()
[DEBUG/simulation] telling queue thread to quit
[DEBUG/MainProcess] all child processes (2) should have been stopped!
[INFO/simulation] process shutting down
[DEBUG/simulation] running all "atexit" finalizers with priority >= 0
[DEBUG/simulation] telling queue thread to quit
[DEBUG/simulation] running the remaining "atexit" finalizers
[DEBUG/simulation] joining queue thread
[DEBUG/MainProcess] joining process <Process(simulation, started)>
[DEBUG/simulation] feeder thread got sentinel -- exiting
[DEBUG/simulation] ... queue thread joined
[DEBUG/simulation] joining queue thread
Stopping the execution through a Ctrl + C in the shell results in these mangled tracebacks:
Process simulation:
Traceback (most recent call last):
Traceback (most recent call last):
File "./debug.py", line 224, in <module>
run()
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/process.py", line 257, in _bootstrap
util._exit_function()
File "./debug.py", line 92, in run
process.join() #< This doesn't work.
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/util.py", line 312, in _exit_function
_run_finalizers()
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/process.py", line 121, in join
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/util.py", line 252, in _run_finalizers
finalizer()
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/util.py", line 185, in __call__
res = self._callback(*self._args, **self._kwargs)
res = self._popen.wait(timeout)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/popen_fork.py", line 54, in wait
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/queues.py", line 196, in _finalize_join
thread.join()
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/popen_fork.py", line 30, in poll
pid, sts = os.waitpid(self.pid, flag)
KeyboardInterrupt
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/threading.py", line 1060, in join
self._wait_for_tstate_lock()
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/threading.py", line 1076, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
As for the code, here is a stripped down version of it (hence why it often seems incomplete):
#!/usr/bin/env python3
import logging
import multiprocessing
import pickle
import queue
import time
from collections import namedtuple
_LOGGER = multiprocessing.log_to_stderr()
_LOGGER.setLevel(logging.DEBUG)
_BUFFER_SIZE = 4
_DATA_LENGTH = 2 ** 12
_STATUS_SUCCESS = 0
_STATUS_FAILURE = 1
_EVENT_ERROR = 0
_EVENT_QUIT = 1
_EVENT_STOPPED = 2
_MESSAGE_STOP = 0
_MESSAGE_EVENT = 1
_MESSAGE_SIMULATION_UPDATE = 2
_Message = namedtuple('_Message', ('type', 'value',))
_StopMessage = namedtuple('_StopMessage', ())
_EventMessage = namedtuple('_EventMessage', ('type', 'value',))
_SimulationUpdateMessage = namedtuple('_SimulationUpdateMessage', ('state',))
_MESSAGE_STRUCTS = {
_MESSAGE_STOP: _StopMessage,
_MESSAGE_EVENT: _EventMessage,
_MESSAGE_SIMULATION_UPDATE: _SimulationUpdateMessage
}
def run():
# Messages from the main process to the child ones.
downward_queue = multiprocessing.Queue()
# Messages from the child processes to the main one.
upward_queue = multiprocessing.Queue()
# Messages from the simulation process to the UI one.
simulation_to_ui_queue = multiprocessing.Queue(maxsize=_BUFFER_SIZE)
# Regroup all the queues that can be written by child processes.
child_process_queues = (upward_queue, simulation_to_ui_queue,)
processes = (
_create_process(
_simulation,
upward_queue,
name='simulation',
args=(
simulation_to_ui_queue,
downward_queue
)
),
_create_process(
_ui,
upward_queue,
name='ui',
args=(
upward_queue,
simulation_to_ui_queue,
downward_queue
)
)
)
try:
for process in processes:
process.start()
_main(downward_queue, upward_queue, len(processes))
finally:
# while True:
# alive_processes = tuple(process for process in processes
# if process.is_alive())
# if not alive_processes:
# break
# _LOGGER.debug("processes still alive: %s" % (alive_processes,))
for q in child_process_queues:
_flush_queue(q)
for process in processes:
_LOGGER.debug("joining process %s" % process)
# process.terminate() #< This works!
process.join() #< This doesn't work.
def _main(downward_queue, upward_queue, process_count):
try:
stopped_count = 0
while True:
message = _receive_message(upward_queue, False)
if message is not None and message.type == _MESSAGE_EVENT:
event_type = message.value.type
if event_type in (_EVENT_QUIT, _EVENT_ERROR):
break
elif event_type == _EVENT_STOPPED:
stopped_count += 1
if stopped_count >= process_count:
break
finally:
# Whatever happens, make sure that all child processes have stopped.
if stopped_count >= process_count:
return
# Send a 'stop' signal to all the child processes.
for _ in range(process_count):
_send_message(downward_queue, True, _MESSAGE_STOP)
while True:
message = _receive_message(upward_queue, False)
if (message is not None
and message.type == _MESSAGE_EVENT
and message.value.type == _EVENT_STOPPED):
stopped_count += 1
if stopped_count >= process_count:
_LOGGER.debug(
"all child processes (%d) should have been stopped!"
% stopped_count
)
break
def _simulation(simulation_to_ui_queue, downward_queue):
simulation_state = [i * 0.123 for i in range(_DATA_LENGTH)]
# When the queue is full (possibly form reaching _BUFFER_SIZE), the next
# solve is computed and kept around until the queue is being consumed.
next_solve_message = None
while True:
message = _receive_message(downward_queue, False)
if message is not None and message.type == _MESSAGE_STOP:
break
if next_solve_message is None:
# _step(simulation_state)
# Somehow the copy (pickle) seems to increase the chances for
# the issue to happen.
next_solve_message = _SimulationUpdateMessage(
state=pickle.dumps(simulation_state)
)
status = _send_message(simulation_to_ui_queue, False,
_MESSAGE_SIMULATION_UPDATE,
**next_solve_message._asdict())
if status == _STATUS_SUCCESS:
next_solve_message = None
def _ui(upward_queue, simulation_to_ui_queue, downward_queue):
time_start = -1.0
previous_time = 0.0
while True:
message = _receive_message(downward_queue, False)
if message is not None and message.type == _MESSAGE_STOP:
break
if time_start < 0:
current_time = 0.0
time_start = time.perf_counter()
else:
current_time = time.perf_counter() - time_start
message = _receive_message(simulation_to_ui_queue, False)
if current_time > 1.0:
_LOGGER.debug("asking to quit")
_send_message(upward_queue, True, _MESSAGE_EVENT,
type=_EVENT_QUIT, value=None)
break
previous_time = current_time
def _create_process(target, upward_queue, name='', args=None):
def wrapper(function, upward_queue, *args, **kwargs):
try:
function(*args, **kwargs)
except Exception:
_send_message(upward_queue, True, _MESSAGE_EVENT,
type=_EVENT_ERROR, value=None)
finally:
_send_message(upward_queue, True, _MESSAGE_EVENT,
type=_EVENT_STOPPED, value=None)
upward_queue.close()
process = multiprocessing.Process(
target=wrapper,
name=name,
args=(target, upward_queue) + args,
kwargs={}
)
return process
def _receive_message(q, block):
try:
message = q.get(block=block)
except queue.Empty:
return None
return message
def _send_message(q, block, message_type, **kwargs):
message_value = _MESSAGE_STRUCTS[message_type](**kwargs)
try:
q.put(_Message(type=message_type, value=message_value), block=block)
except queue.Full:
return _STATUS_FAILURE
return _STATUS_SUCCESS
def _flush_queue(q):
try:
while True:
q.get(block=False)
except queue.Empty:
pass
if __name__ == '__main__':
run()
Related questions on StackOverflow and hints in Python's doc basically boil down to needing to flush the queues before joining the processes, which I believe I've been trying to do here. I realize that the simulation queue could still be trying to push the (potentially large) buffered data onto the pipe by the time the program would try to flush them upon exiting, and thus ending up with still non-empty queues. This is why I tried to ensure that all the child processes were stopped before reaching this point. Now, looking at the log above and at the additional log outputted after uncommenting the while True loop checking for alive processes, it appears that the simulation process simply doesn't want to completely shut down even though its target function definitely exited. Could this be the reason of my problem?
If so, how am I suppsoed to deal with it cleanly? Otherwise, what am I missing here?
Tested with Python 3.4 on Mac OS X 10.9.5.
PS: I'm wondering if this couldn't be related to this bug ?
Sounds like the issue was indeed due to some delay in pushing the data through the queue, causing the flushes to be ineffective because fired too early.
A simple while process.is_alive(): flush_the_queues() seems to do the trick!
Lately I have run into a similar use case like yours: multiple processes (up to 11), one input queue, one output queue. But very heavy output queue.
I was getting an overhead of up to 5 seconds (!) using your suggestion to perform while process.is_alive(): flush_the_queues() before the process.join().
I've reduced that overhead down to 0.7 seconds by relying on a multiprocessing.Manager.list instead of a multiprocessing.Queue for the output queue. The multiprocessing.Manager.list doesn't need any flushing. I might consider also finding an alternative to the input queue if I can..
Full example here:
import multiprocessing
import queue
import time
PROCESSES = multiprocessing.cpu_count() - 1
processes = []
def run():
start = time.time()
input_queue = multiprocessing.Queue()
feed_input_queue(input_queue)
with multiprocessing.Manager() as manager:
output_list = manager.list()
for _ in range(PROCESSES):
p = multiprocessing.Process(target=_execute, args=(input_queue, output_list))
processes.append(p)
p.start()
print(f"Time to process = {time.time() - start:.10f}")
start = time.time()
for p in processes:
while p.is_alive(): # in principle we could get rid of this if we find an alternative to the output queue
_flush_queue(input_queue)
p.join()
print(f"Time to join = {time.time() - start:.10f}")
# from here you can do something with the output_list
def _feed_input_queue(input_queue):
for i in range(10000):
input_queue.put(i)
def _execute(input_queue: multiprocessing.Queue, output_list: list):
while not input_queue.empty():
input_item = input_queue.get()
output_list.append(do_and_return_something_heavy(input_item))
return True
def _flush_queue(q):
try:
while True:
q.get(block=False)
except queue.Empty:
pass
def do_and_return_something_heavy(input_item):
return str(input_item) * 100000
if __name__ == '__main__':
run()
Output
Time to process = 0.1855618954
Time to join = 0.6889970303
Tested on Python 3.6.

python multithread and strangeTypeError: takes exactly 1 argument (2 given)

def crawler(id):
print id
crawer.getCourseFromUrl("http://www.imooc.com/view/"+id)
time.sleep(3)
def main():
print '*** Starting crawler ***'
try:
for id in xrange(100):
threads = []
for i in range(10):
t = threading.Thread(target = crawler,args = str(i+1))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
t.close()
except:
pass
print '*** crawler End ***'
Above is my code, and when args is 1 to 9, it works well, but when it comes to 10 and larger it comes to the error:
Exception in thread Thread-10:
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
TypeError: crawler() takes exactly 1 argument (2 given)
I could not figure out what it's wrong.
Thread gets args and use it with function this way:
crawler(args[0], args[1], ...)
When str(i+1) has only one char it gives
crawler(args[0])
When str(i+1) has two chars it gives
crawler(args[0], args[1])
but your functions expects only one argument.
You have to use list or tuple in Thread
Thread(target = crawler,args = [ str(i+1) ] )
Thread(target = crawler,args = ( str(i+1), ) )
and then args[0] will get all str(i+1) as one element.
Try to pass args as a tuple:
t = threading.Thread(target = crawler, args = (str(i+1),))
As documented here https://docs.python.org/2/library/threading.html#threading.Thread

Anything wrong in my Producer-Consumer implementation in Python using condition objects?

Can someone let me know what is wrong in my code below that implements the producer-consumer problem in python. I am using Python 3.4
import threading
from threading import Thread
from collections import deque
import time
maxSize = 4 # maximum size of the queue
q = deque([])
cur = 0 # current value to push into the queue
lk = threading.Lock()
cvP = threading.Condition(lk) # condition object for consumer
cvC = threading.Condition(lk) # condition object for producer
class Producer:
def run(self):
global maxSize, q, cur
while True:
with cvP:
while len(q) >= maxSize:
print( "Queue is full and size = ", len(q) )
cvC.notify() # notify the Consumer
cvP.wait() # put Producer to wait
q.append(cur)
print("Produced ", cur)
cur = cur + 1
cvC.notify() # notify the Consumer
class Consumer:
def run(self):
global maxSize, q, cur
while True:
with cvC:
while len(q) == 0:
print( "Queue is empty and size = ", len(q) )
cvP.notify() # notify the Producer
cvC.wait() # put Consumer to wait
x = q.popleft()
print("Consumed ", x)
time.sleep(1)
cvP.notify() # notify the Producer
p = Producer()
c = Consumer()
pThread = Thread( target=p.run(), args=())
cThread = Thread( target=c.run(), args=())
pThread.start()
cThread.start()
pThread.join()
cThread.join()
The program output:
Produced 0
Produced 1
Produced 2
Produced 3
Queue is full and size = 4
Then it got stuck. When terminating the program, I got:
Traceback (most recent call last):
File "path/t.py", line 47, in <module>
pThread = Thread( target=p.run(), args=())
File "path/t.py", line 22, in run
cvP.wait()
File "/usr/lib/python3.4/threading.py", line 289, in wait
waiter.acquire()
KeyboardInterrupt
The Producer seemed not "nofity" the consumer. Can someone let me know why?
Many thanks in advance!
The locking and unlocking are fine, but you probably want to specify 'run' as the target and not 'run()'
pThread = Thread( target=p.run, args=())
cThread = Thread( target=c.run, args=())
:-)
Explanation: lets simplify
def foo():
# ..
# Foo will run on a new thread
Thread(target=foo)
# foo() is run before the thread is created, and its return
# value is the target for the Thread call.
Thread(target=foo())
You can see in the stack trace that it never went beyond line 47, which is
pThread = Thread( target=p.run(), args=())
Which is the same as
x = p.run()
pThread = Thread(x, args=())

Python Multiprocess issues process not starting

I wrote this code, I want to have a main thread that starts multiple subprocesses that spawn a listener thread to wait for a kill message subprocess works but testprocess does not run there are no errors any ideas?
from multiprocessing import Process, Pipe
from threading import Thread
import time
Alive = True
def listener_thread(conn): #listens for kill from main
global Alive
while True:
data = conn.recv()
if data == "kill":
Alive = False #value for kill
break
def subprocess(conn):
t = Thread(target=listener_thread, args=(conn,))
count = 0
t.start()
while Alive:
print "Run number = %d" % count
count = count + 1
def testprocess(conn):
t = Thread(target=listner_thread, args=(conn,))
count = 0
t.start()
while Alive:
print "This is a different thread run = %d" % count
count = count + 1
parent_conn, child_conn = Pipe()
p = Process(target=subprocess, args=(child_conn,))
p2 = Process(target=testprocess, args=(child_conn,))
runNum = int(raw_input("Enter a number: "))
p.start()
p2.start()
time.sleep(runNum)
parent_conn.send("kill") #sends kill to listener thread to tell them when to stop
p.join()
p2.join()
A typo in testprocess makes the function to quit early.
listner_thread should be listener_thread.
If you comment out subprocess related code and run the code, you will see following error:
Process Process-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "t.py", line 25, in testprocess
t = Thread(target=listner_thread, args=(conn,))
NameError: global name 'listner_thread' is not defined

Categories

Resources