This is a Producer Consumer Problem. I need a single producer and multiple consumers to access the shared data cell and each consumer needs to access the produced data before the producer makes additional data. The code works fine when there is a single consumer. I have attempted to make a list of the Producer and Consumers in order to .join() and .start() them. The program works so far as the first consumer, but hangs up when it gets to the second consumer. I have tried to change the locking mechanisms from "notify" to "notifyAll" in the getData and setData, I am a beginner in python and this stuff is pretty foreign to me but I have been trying stuff for 10 hours and would really appreciate some help.
import time, random
from threading import Thread, currentThread, Condition
class SharedCell(object):
def __init__(self):
self.data = -1
self.writeable = True
self.condition = Condition()
def setData(self, data):
self.condition.acquire()
while not self.writeable:
self.condition.wait()
print("%s setting data to %d" % \
(currentThread().getName(), data))
self.data = data
self.writeable = False
self.condition.notifyAll()
self.condition.release()
def getData(self):
self.condition.acquire()
while self.writeable:
self.condition.wait()
print(f'accessing data {currentThread().getName()} {self.data}')
self.writeable = True
self.condition.notifyAll()
self.condition.release()
return self.data
class Producer(Thread):
def __init__(self, cell, accessCount, sleepMax):
Thread.__init__(self, name = "Producer")
self.accessCount = accessCount
self.cell = cell
self.sleepMax = sleepMax
def run(self):
print("%s starting up" % self.getName())
for count in range(self.accessCount):
time.sleep(random.randint(1, self.sleepMax))
self.cell.setData(count + 1)
print("%s is done producing\n" % self.getName())
class Consumer(Thread):
def __init__(self, cell, accessCount, sleepMax):
Thread.__init__(self)
self.accessCount = accessCount
self.cell = cell
self.sleepMax = sleepMax
def run(self):
print("%s starting up" % self.getName())
for count in range(self.accessCount):
time.sleep(random.randint(1, self.sleepMax))
value = self.cell.getData()
print("%s is done consuming\n" % self.getName())
def main():
accessCount = int(input("Enter the number of accesses: "))
sleepMax = 4
cell = SharedCell()
producer = Producer(cell, accessCount, sleepMax)
consumer = Consumer(cell, accessCount, sleepMax)
consumerTwo = Consumer(cell, accessCount, sleepMax)
threads = []
threads.append(producer)
threads.append(consumer)
threads.append(consumerTwo)
print("Starting the threads")
for thread in threads:
thread.start()
thread.join()
main()
The join function blocks the current thread and waits until the indicated thread terminates. In your loop at the end of your main function, why do you join each thread immediately after starting it? That would result in starting thread 1, and then waiting for it to terminate before starting thread 2, and then waiting that it to terminate before starting thread 3, and so on.
Perhaps you meant something like this:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
so that every thread is started before you wait for them to terminate.
Related
It's quite easy to send or receive data through threads using Queue's module when doing each thing at a time, but I didn't figure out how to send something to a thread, then expect for a return properly.
In the below example, I was expecting to send something to thread in order to be processed, then harvest the result, but the t.queue.get() in the main function receives what what just sent above instead of waiting for the thread to return. How can I get around it?
#!/usr/bin/env python3
from threading import Thread
from queue import Queue
class MyThread(Thread):
queue:Queue
def __init__(self, *args, **kwargs):
super().__init__()
self.queue = Queue()
self.daemon = True
# receives a name, then prints "Hello, name!"
def run(self):
while True:
val = self.queue.get()
if not val:
break
self.queue.put(f'Hello, {val}!')
def main():
t = MyThread()
t.start()
# sends string to thread
t.queue.put('Jurandir')
# expects to receive "Hello, Jurandir!",
# but "Jurandir" is immediately returned
ret = t.queue.get()
print(ret)
if __name__ == '__main__':
main()
Thing is that you are getting the alleged result immediately from the queue, and the worker has still not added the result. You can split into an "input queue" and a "results queue". And then wait in the main thread until there's some output in the queue.
#!/usr/bin/env python3
from threading import Thread, Lock
from queue import Queue
class MyThread(Thread):
def __init__(self, *args, **kwargs):
super().__init__()
self.input_queue = Queue()
self.results_queue = Queue()
self.daemon = True
# receives a name, then prints "Hello, name!"
def run(self):
while True:
val = self.input_queue.get()
if not val:
break
self.results_queue.put(f'Hello, {val}!')
def main():
t = MyThread()
t.start()
# sends string to thread
t.input_queue.put('Jurandir')
ret = t.results_queue.get()
while ret is None:
ret = t.results_queue.get()
print(ret)
if __name__ == '__main__':
main()
I need to transfer data from a subprocess to the main one.
The subprocess in doing a repetitive task using threading.timer
Whenever threading.timer is called, the queue does not work anymore.
The subprocess is acquiring data, while I want to display them in real-time in the main process.
I wrote this snippet to showcase the problem:
import threading
import multiprocessing
class MyClass():
def __init__(self, q):
self.q = q
print("put value in q: ", "start")
self.q.put("start")
self.i = 0
self.update()
def update(self):
if self.i < 3:
print("put value in q: ", self.i)
self.q.put(self.i)
self.i += 1
threading.Timer(0.5, self.update).start()
else:
self.stop()
def stop(self):
print("put value in q: ", "stop")
self.q.put("stop")
if __name__ == "__main__":
q = multiprocessing.Queue()
process = multiprocessing.Process(target = MyClass, args=(q,))
process.start()
process.join()
for i in range(5):
print("get value in q: ",q.get(block = True, timeout = 2))
and I get this only:
put value in q: start
put value in q: 0
put value in q: 1
put value in q: 2
put value in q: stop
get value in q: start
get value in q: 0
Is there a solution or a workaround?
You have process. It has main thread (MyClass() call). threading.Timer() spawns another thread along with main thread so you have to wait untill all additional threads are terminated before you stop process. So to solve the problem replace threading.Timer(0.5, self.update).start() with (wait for threads):
t = threading.Timer(0.5, self.update)
t.start()
t.join()
Or replace threading.Timer(0.5, self.update).start() with (no additional threads):
time.sleep(.5)
self.update()
Both solutions should work.
I'm trying to create loop of threads and so far code is good. But I have problem when thread exits because of some exception.
Now I'm trying to figure out how to start additional thread after one thread exits because of exception. I did browse around but I didn't find any example that would work for this complex code. Any help would be great!
If thread stopped and queue is not empty restart stopped thread and continue with rest of the list.
This is my code:
some_list = [1,2,3,4,5,6,7,8]
exitFlag = 0
class threads():
#staticmethod
def process_data(threadName, q,queueLock):
workQueue = q
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print "%s processing %s" % (threadName, data)
else:
queueLock.release()
sleep(1)
def run_threads(self):
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = some_list
queueLock = threading.Lock()
workQueue = Queue.Queue(1000000)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue,queueLock)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
global exitFlag
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print "Exiting Main Thread"
class myThread (threading.Thread,threads):
def __init__(self, threadID, name, q,queueLock):
self.thread = threading.Thread(target=self.run)
threading.Thread.__init__(self,target=self.run)
self.threadID = threadID
self.queueLock = queueLock
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
threads.process_data(self.name, self.q,self.queueLock)
print "Exiting " + self.name
threads().run_threads()
Something like this should work:
...
# Wait for queue to empty
while not workQueue.empty():
for (i, t) in enumerate(threads):
if not t.is_alive():
print("Recreating thread " + t.name)
thread = myThread(threadID, threadList[i], workQueue,queueLock)
thread.start()
threads[i] = thread
threadID += 1
...
I would advice putting the thread-starting code into some method, as it will now be duplicated and hard to maintain.
The problem here is that you might "loose" the data that was popped from queue by the fatal thread.
I am trying to find a way to compare between different objects (inherited from Thread class) in a way that keep parallilsm (real-time processing).
Every worker has three fields (message, count, n ). I am updating Count everytime. Let's say that I have three threads workers. I need to compare in my server based on the field count, how can I do access and compare between Worker.count of every worker, in a way that I keep parallelism
from Queue import Queue
from threading import Thread
import time
class Worker(Thread):
def __init__(self, message, n):
Thread.__init__(self)
self.message = message
self.count= 0
self.n = n
def run(self):
while True:
print(self.message)
self.count+=1
time.sleep(self.n)
class Comparator(Thread):
def __init__(self, message, n):
Thread.__init__(self)
self.message = message
self.n = n
def run(self):
while True:
max= max([x.count for x in threads]) # how can I access to other threads
print "max", max
time.sleep(self.n)
thread1 = Worker("Test-1", 1)
thread2 = Worker("Test-2", 3)
s = Comparator("Test-3", 2)
s.start()
s.join()
threads = [thread1, thread2]
for g in threads:
g.start()
for worker in threads:
# wait for workers
worker.join()
NOTE Using shared object here is not a good solution for me, using Queue() for example is not what I want, I need to do comparision based on updated field in the object that I update on the go (for simplicity, I use max() ).
you can pass the threads list to Comparator __init__() method :
[...]
class Comparator(Thread):
def __init__(self, message, n, threads):
Thread.__init__(self)
self.threads = threads
def run(self):
while True:
max= max([x.count for x in self.threads])
print("max", max)
time.sleep(self.n)
[...]
threads = [thread1, thread2]
s = Comparator("Test-3", 2, threads)
I am building a multi threading application.
I have setup a threadPool.
[ A Queue of size N and N Workers that get data from the queue]
When all tasks are done I use
tasks.join()
where tasks is the queue .
The application seems to run smoothly until suddently at some point (after 20 minutes in example) it terminates with the error
thread.error: can't start new thread
Any ideas?
Edit: The threads are daemon Threads and the code is like:
while True:
t0 = time.time()
keyword_statuses = DBSession.query(KeywordStatus).filter(KeywordStatus.status==0).options(joinedload(KeywordStatus.keyword)).with_lockmode("update").limit(100)
if keyword_statuses.count() == 0:
DBSession.commit()
break
for kw_status in keyword_statuses:
kw_status.status = 1
DBSession.commit()
t0 = time.time()
w = SWorker(threads_no=32, network_server='http://192.168.1.242:8180/', keywords=keyword_statuses, cities=cities, saver=MySqlRawSave(DBSession), loglevel='debug')
w.work()
print 'finished'
When the daemon threads are killed?
When the application finishes or when the work() finishes?
Look at the thread pool and the worker (it's from a recipe )
from Queue import Queue
from threading import Thread, Event, current_thread
import time
event = Event()
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
'''Start processing tasks from the queue'''
while True:
event.wait()
#time.sleep(0.1)
try:
func, args, callback = self.tasks.get()
except Exception, e:
print str(e)
return
else:
if callback is None:
func(args)
else:
callback(func(args))
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads): Worker(self.tasks)
def add_task(self, func, args=None, callback=None):
''''Add a task to the queue'''
self.tasks.put((func, args, callback))
def wait_completion(self):
'''Wait for completion of all the tasks in the queue'''
self.tasks.join()
def broadcast_block_event(self):
'''blocks running threads'''
event.clear()
def broadcast_unblock_event(self):
'''unblocks running threads'''
event.set()
def get_event(self):
'''returns the event object'''
return event
ALSo maybe the problem it's because I create SWorker objects in a loop?
What happens with the old SWorker (garbage collection ?) ?
There is still not enough code for localize the problem, but I'm sure that this is because you don't utilize the threads and start too much of them. Did you see canonical example from Queue python documentation http://docs.python.org/library/queue.html (bottom of the page)?
I can reproduce your problem with the following code:
import threading
import Queue
q = Queue.Queue()
def worker():
item = q.get(block=True) # sleeps forever for now
do_work(item)
q.task_done()
# create infinite number of workers threads and fails
# after some time with "error: can't start new thread"
while True:
t = threading.Thread(target=worker)
t.start()
q.join() # newer reached this
Instead you must create the poll of threads with known number of threads and put your data to queue like:
q = Queue()
def worker():
while True:
item = q.get()
do_work(item)
q.task_done()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in source():
q.put(item)
q.join() # block until all tasks are done
UPD: In case you need to stop some thread, you can add a flag to it or send a special mark means "stop" for break while loop:
class Worker(Thread):
break_msg = object() # just uniq mark sign
def __init__(self):
self.continue = True
def run():
while self.continue: # can stop and destroy thread, (var 1)
msg = queue.get(block=True)
if msg == self.break_msg:
return # will stop and destroy thread (var 2)
do_work()
queue.task_done()
workers = [Worker() for _ in xrange(num_workers)]
for w in workers:
w.start()
for task in tasks:
queue.put(task)
for _ in xrange(num_workers):
queue.put(Worker.break_msg) # stop thread after all tasks done. Need as many messages as many threads you have
OR
queue.join() # wait until all tasks done
for w in workers:
w.continue = False
w.put(None)