I've wrote this script here to read data from a txt file and process it. But it seems that if I give it a big file and a high number of threads, the more it reads from the list, the slower the script gets.
Is there a way to avoid waiting for all the threads to finish and start a new one whenever a thread is done with the work?
Also it seems that when it finishes processing, the script doesn't exit.
import threading, Queue, time
class Work(threading.Thread):
def __init__(self, jobs):
threading.Thread.__init__(self)
self.Lock = threading.Lock()
self.jobs = jobs
def myFunction(self):
#simulate work
self.Lock.acquire()
print("Firstname: "+ self.firstname + " Lastname: "+ self.lastname)
self.Lock.release()
time.sleep(3)
def run(self):
while True:
self.item = self.jobs.get().rstrip()
self.firstname = self.item.split(":")[0]
self.lastname = self.item.split(":")[1]
self.myFunction()
self.jobs.task_done()
def main(file):
jobs = Queue.Queue()
myList = open(file, "r").readlines()
MAX_THREADS = 10
pool = [Work(jobs) for i in range(MAX_THREADS)]
for thread in pool:
thread.start()
for item in myList:
jobs.put(item)
for thread in pool:
thread.join()
if __name__ == '__main__':
main('list.txt')
The script probably seems to take longer on larger inputs because there's a 3 second pause between each batch of printing.
The issue with the script not finishing is, since you are using Queue, you need to call join() on the Queue, not on the individual threads. To make sure that the script returns when the jobs have stopped running, you should also set daemon = True.
The Lock will also not work in the current code because threading.Lock() produces a new lock each time. You need to have all the jobs share the same lock.
If you want to use this in Python 3 (which you should), the Queue module has been renamed to queue.
import threading, Queue, time
lock = threading.Lock() # One lock
class Work(threading.Thread):
def __init__(self, jobs):
threading.Thread.__init__(self)
self.daemon = True # set daemon
self.jobs = jobs
def myFunction(self):
#simulate work
lock.acquire() # All jobs share the one lock
print("Firstname: "+ self.firstname + " Lastname: "+ self.lastname)
self.Lock.release()
time.sleep(3)
def run(self):
while True:
self.item = self.jobs.get().rstrip()
self.firstname = self.item.split(":")[0]
self.lastname = self.item.split(":")[1]
self.myFunction()
self.jobs.task_done()
def main(file):
jobs = Queue.Queue()
with open(file, 'r') as fp: # Close the file when we're done
myList = fp.readlines()
MAX_THREADS = 10
pool = [Work(jobs) for i in range(MAX_THREADS)]
for thread in pool:
thread.start()
for item in myList:
jobs.put(item)
jobs.join() # Join the Queue
if __name__ == '__main__':
main('list.txt')
Simpler example (based on an example from the Python docs)
import threading
import time
from Queue import Queue # Py2
# from queue import Queue # Py3
lock = threading.Lock()
def worker():
while True:
item = jobs.get()
if item is None:
break
firstname, lastname = item.split(':')
lock.acquire()
print("Firstname: " + firstname + " Lastname: " + lastname)
lock.release()
time.sleep(3)
jobs.task_done()
jobs = Queue()
pool = []
MAX_THREADS = 10
for i in range(MAX_THREADS):
thread = threading.Thread(target=worker)
thread.start()
pool.append(thread)
with open('list.txt') as fp:
for line in fp:
jobs.put(line.rstrip())
# block until all tasks are done
jobs.join()
# stop workers
for i in range(MAX_THREADS):
jobs.put(None)
for thread in pool:
thread.join()
Related
This is a Producer Consumer Problem. I need a single producer and multiple consumers to access the shared data cell and each consumer needs to access the produced data before the producer makes additional data. The code works fine when there is a single consumer. I have attempted to make a list of the Producer and Consumers in order to .join() and .start() them. The program works so far as the first consumer, but hangs up when it gets to the second consumer. I have tried to change the locking mechanisms from "notify" to "notifyAll" in the getData and setData, I am a beginner in python and this stuff is pretty foreign to me but I have been trying stuff for 10 hours and would really appreciate some help.
import time, random
from threading import Thread, currentThread, Condition
class SharedCell(object):
def __init__(self):
self.data = -1
self.writeable = True
self.condition = Condition()
def setData(self, data):
self.condition.acquire()
while not self.writeable:
self.condition.wait()
print("%s setting data to %d" % \
(currentThread().getName(), data))
self.data = data
self.writeable = False
self.condition.notifyAll()
self.condition.release()
def getData(self):
self.condition.acquire()
while self.writeable:
self.condition.wait()
print(f'accessing data {currentThread().getName()} {self.data}')
self.writeable = True
self.condition.notifyAll()
self.condition.release()
return self.data
class Producer(Thread):
def __init__(self, cell, accessCount, sleepMax):
Thread.__init__(self, name = "Producer")
self.accessCount = accessCount
self.cell = cell
self.sleepMax = sleepMax
def run(self):
print("%s starting up" % self.getName())
for count in range(self.accessCount):
time.sleep(random.randint(1, self.sleepMax))
self.cell.setData(count + 1)
print("%s is done producing\n" % self.getName())
class Consumer(Thread):
def __init__(self, cell, accessCount, sleepMax):
Thread.__init__(self)
self.accessCount = accessCount
self.cell = cell
self.sleepMax = sleepMax
def run(self):
print("%s starting up" % self.getName())
for count in range(self.accessCount):
time.sleep(random.randint(1, self.sleepMax))
value = self.cell.getData()
print("%s is done consuming\n" % self.getName())
def main():
accessCount = int(input("Enter the number of accesses: "))
sleepMax = 4
cell = SharedCell()
producer = Producer(cell, accessCount, sleepMax)
consumer = Consumer(cell, accessCount, sleepMax)
consumerTwo = Consumer(cell, accessCount, sleepMax)
threads = []
threads.append(producer)
threads.append(consumer)
threads.append(consumerTwo)
print("Starting the threads")
for thread in threads:
thread.start()
thread.join()
main()
The join function blocks the current thread and waits until the indicated thread terminates. In your loop at the end of your main function, why do you join each thread immediately after starting it? That would result in starting thread 1, and then waiting for it to terminate before starting thread 2, and then waiting that it to terminate before starting thread 3, and so on.
Perhaps you meant something like this:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
so that every thread is started before you wait for them to terminate.
I have been struggling to implement a proper dynamic multi-thread system until now. The idea is to spin up multiple new pools of sub-threads from the main (each pool have its own number of threads and queue size) to run functions and the user can define if the main should wait for the sub-thread to finish up or just move to the next line after starting the thread. This multi-thread logic will help to extract data in parallel and at a fast frequency.
The solution to my issue is shared below for everyone who wants it. If you have any doubts and questions, please let me know.
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 5 00:00:51 2021
#author: Tahasanul Abraham
"""
#%% Initialization of Libraries
import sys, os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
parentdir_1up = os.path.dirname(parentdir)
sys.path.insert(0,parentdir_1up)
from queue import Queue
from threading import Thread, Lock
class Worker(Thread):
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.lock = Lock()
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
if func.lower() == "terminate":
break
except:
try:
with self.lock:
func(*args, **kargs)
except Exception as exception:
print(exception)
self.tasks.task_done()
class ThreadPool:
def __init__(self, num_threads, num_queue=None):
if num_queue is None or num_queue < num_threads:
num_queue = num_threads
self.tasks = Queue(num_queue)
self.threads = num_threads
for _ in range(num_threads): Worker(self.tasks)
# This function can be called to terminate all the worker threads of the queue
def terminate(self):
self.wait_completion()
for _ in range(self.threads): self.add_task("terminate")
return None
# This function can be called to add new work to the queue
def add_task(self, func, *args, **kargs):
self.tasks.put((func, args, kargs))
# This function can be called to wait till all the workers are done processing the pending works. If this function is called, the main will not process any new lines unless all the workers are done with the pending works.
def wait_completion(self):
self.tasks.join()
# This function can be called to check if there are any pending/running works in the queue. If there are any works pending, the call will return Boolean True or else it will return Boolean False
def is_alive(self):
if self.tasks.unfinished_tasks == 0:
return False
else:
return True
#%% Standalone Run
if __name__ == "__main__":
import time
def test_return(x,d):
print (str(x) + " - pool completed")
d[str(x)] = x
time.sleep(5)
# 2 thread and 10000000000 FIFO queues
pool = ThreadPool(2,1000000000)
r ={}
for i in range(10):
pool.add_task(test_return, i, r)
print (str(i) + " - pool added")
print ("Waiting for completion")
pool.wait_completion()
print ("pool done")
# 1 thread and 2 FIFO queues
pool = ThreadPool(1,2)
r ={}
for i in range(10):
pool.add_task(test_return, i, r)
print (str(i) + " - pool added")
print ("Waiting for completion")
pool.wait_completion()
print ("pool done")
# 2 thread and 1 FIFO queues
pool = ThreadPool(2,1)
r ={}
for i in range(10):
pool.add_task(test_return, i, r)
print (str(i) + " - pool added")
print ("Waiting for completion")
pool.wait_completion()
print ("pool done")
Making a new Pool
Using the above classes, one can make a pool of their own choise with the number of parallel threads they want and the size of the queue. Example of creating a pool of 10 threads with 200 queue size.
pool = ThreadPool(10,200)
Adding work to Pool
Once a pool is created, one can use that pool.add_task to do sub-routine works. In my example version i used the pool to call a function and its arguments. Example, I called the test_return fucntion with its arguments i and r.
pool.add_task(test_return, i, r)
Waiting for the pool to complete its work
If a pool is given some work to do, the user can either move to other code lines or wait for the pool to finish its work before the next lines ar being read. To wait for the pool to finish the work and then return back, a call for wait_completion is required. Example:
pool.wait_completion()
Terminate and close down the pool threads
Once the requirement of the pool threads are done, it is possible to terminate and close down the pool threads to save up memory and release the blocked threads. This can be done by calling the following function.
pool.terminate()
Checking if there are any pending works from the pool
There is a function that can be called to check if there are any pending/running works in the queue. If there are any works pending, the call will return Boolean True, or else it will return Boolean False. To check if the pool is working or not call the folling function.
pool.is_alive()
I have two tasks of which one is called every two seconds and the other one is called at random times. Both need to access an object that can't be called before the previous call is finished (if that happens I need to reboot the hardware device manually).
The object is from a class which allows the communication with a hardware device via sockets.
To do so I created a thread class, in order to run everything in the background and no other tasks are blocked. Within this class I implemented a queue: Two different functions put Tasks into the queue and a worker is supposed to execute the tasks !!NOT!! simultaneously.
As this entire project is a server it should run continuously.
Well here is my code and it obviously is not working. I would be very happy if anyone has a clue on how to solve this.
Update: 26.10.2020
In order to make my issue more clear I updated the code based on the answer from Artiom Kozyrev.
import time
from threading import Lock, Thread
import threading
from queue import Queue
class ThreadWorker(Thread):
def __init__(self, _lock: Lock, _queue: Queue, name: str):
# daemon=False means that process waits until all threads are finished
# (not only main one and garbage collector)
super().__init__(name=name, daemon=False)
# lock prevents several worker threads do work simultaneously
self.lock = _lock
# tasks are send from the main thread via Queue
self.queue = _queue
def do_work(self, job):
# lock context manager prevents other worker threads from working in the same time
with self.lock:
time.sleep(3)
print(f"{threading.current_thread().getName()}: {job * 10}")
def run(self):
while True:
job = self.queue.get()
# "poison pillow" - stop message from queue
if not job:
break
self.do_work(job)
def TimeStamp(msg):
tElapsed = (time.time() - tStart) # Display Thread Info
sElap = int(tElapsed)
msElap = int((tElapsed - sElap) * 1000)
usElap = int((tElapsed - sElap - msElap / 1000) * 1000000)
print(msg , ': ', sElap, 's', msElap, 'ms', usElap, 'us')
def f1():
TimeStamp("f1 start")
time.sleep(2)
TimeStamp("f1 finished")
def f2():
TimeStamp("f2 start")
time.sleep(6)
TimeStamp("f2 finished")
def insertf1():
for i in range(10):
q.put(f1())
time.sleep(2)
def insertf2():
for i in range(10):
time.sleep(10)
q.put(f2())
q = Queue()
lock = Lock()
workers = [ThreadWorker(lock, q, f"Th-worker-{i}") for i in range(5)] # create workers
for w in workers:
w.start()
tStart = time.time()
threading.Thread(target=insertf1, daemon=True).start()
threading.Thread(target=insertf2, daemon=True).start()
The output is:
f1 start : 0 s 0 ms 0 us
f1 finished : 2 s 2 ms 515 us
f1 start : 4 s 9 ms 335 us
f1 finished : 6 s 9 ms 932 us
f1 start : 8 s 17 ms 428 us
f2 start : 10 s 12 ms 794 us
f1 finished : 10 s 28 ms 633 us
f1 start : 12 s 29 ms 182 us
f1 finished : 14 s 34 ms 411 us
f2 finished : 16 s 19 ms 330 us
f1 started before f2 was finished, which is what needs to be avoided.
To do so you need to combine Queue and Lock. Lock will prevent worker-threads from working in the same time. Find code example below:
import time
from threading import Lock, Thread
import threading
from queue import Queue
class ThreadWorker(Thread):
def __init__(self, _lock: Lock, _queue: Queue, name: str):
# daemon=False means that process waits until all threads are finished
# (not only main one and garbage collector)
super().__init__(name=name, daemon=False)
# lock prevents several worker threads do work simultaneously
self.lock = _lock
# tasks are send from the main thread via Queue
self.queue = _queue
def do_work(self, job):
# lock context manager prevents other worker threads from working in the same time
with self.lock:
time.sleep(3)
print(f"{threading.current_thread().getName()}: {job * 10}")
def run(self):
while True:
job = self.queue.get()
# "poison pillow" - stop message from queue
if not job:
break
self.do_work(job)
if __name__ == '__main__':
q = Queue()
lock = Lock()
workers = [ThreadWorker(lock, q, f"Th-worker-{i}") for i in range(5)] # create workers
for w in workers:
w.start()
# produce tasks
for i in range(10):
q.put(i)
# stop tasks with "poison pillow"
for i in range(len(workers)):
q.put(None)
Edit based on additions to the question (Lock added)
The main idea is that you should not run f1 and f2 without Lock.
import time
from threading import Lock, Thread
import threading
from queue import Queue
class ThreadWorker(Thread):
def __init__(self, _lock: Lock, _queue: Queue, name: str):
# daemon=False means that process waits until all threads are finished
# (not only main one and garbage collector)
super().__init__(name=name, daemon=False)
# lock prevents several worker threads do work simultaneously
self.lock = _lock
# tasks are send from the main thread via Queue
self.queue = _queue
def do_work(self, f):
# lock context manager prevents other worker threads from working in the same time
with self.lock:
time.sleep(3)
print(f"{threading.current_thread().getName()}: {f()}")
def run(self):
while True:
job = self.queue.get()
# "poison pillow" - stop message from queue
if not job:
break
self.do_work(job)
def TimeStamp(msg):
tElapsed = (time.time() - tStart) # Display Thread Info
sElap = int(tElapsed)
msElap = int((tElapsed - sElap) * 1000)
usElap = int((tElapsed - sElap - msElap / 1000) * 1000000)
print(msg, ': ', sElap, 's', msElap, 'ms', usElap, 'us')
def f1():
TimeStamp("f1 start")
time.sleep(1)
TimeStamp("f1 finished")
return f"Func-1-{threading.current_thread().getName()}"
def f2():
TimeStamp("f2 start")
time.sleep(3)
TimeStamp("f2 finished")
return f"Func-2-{threading.current_thread().getName()}"
def insertf1():
for i in range(5):
q.put(f1) # do not run f1 here! Run it in worker thread with Lock
def insertf2():
for i in range(5):
q.put(f2) # do not run f2 here! Run it in worker thread with Lock
q = Queue()
lock = Lock()
workers = [ThreadWorker(lock, q, f"Th-worker-{i}") for i in range(5)] # create workers
for w in workers:
w.start()
tStart = time.time()
threading.Thread(target=insertf1, daemon=True).start()
threading.Thread(target=insertf2, daemon=True).start()
I'm trying to create loop of threads and so far code is good. But I have problem when thread exits because of some exception.
Now I'm trying to figure out how to start additional thread after one thread exits because of exception. I did browse around but I didn't find any example that would work for this complex code. Any help would be great!
If thread stopped and queue is not empty restart stopped thread and continue with rest of the list.
This is my code:
some_list = [1,2,3,4,5,6,7,8]
exitFlag = 0
class threads():
#staticmethod
def process_data(threadName, q,queueLock):
workQueue = q
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print "%s processing %s" % (threadName, data)
else:
queueLock.release()
sleep(1)
def run_threads(self):
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = some_list
queueLock = threading.Lock()
workQueue = Queue.Queue(1000000)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue,queueLock)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
global exitFlag
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print "Exiting Main Thread"
class myThread (threading.Thread,threads):
def __init__(self, threadID, name, q,queueLock):
self.thread = threading.Thread(target=self.run)
threading.Thread.__init__(self,target=self.run)
self.threadID = threadID
self.queueLock = queueLock
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
threads.process_data(self.name, self.q,self.queueLock)
print "Exiting " + self.name
threads().run_threads()
Something like this should work:
...
# Wait for queue to empty
while not workQueue.empty():
for (i, t) in enumerate(threads):
if not t.is_alive():
print("Recreating thread " + t.name)
thread = myThread(threadID, threadList[i], workQueue,queueLock)
thread.start()
threads[i] = thread
threadID += 1
...
I would advice putting the thread-starting code into some method, as it will now be duplicated and hard to maintain.
The problem here is that you might "loose" the data that was popped from queue by the fatal thread.
I am building a multi threading application.
I have setup a threadPool.
[ A Queue of size N and N Workers that get data from the queue]
When all tasks are done I use
tasks.join()
where tasks is the queue .
The application seems to run smoothly until suddently at some point (after 20 minutes in example) it terminates with the error
thread.error: can't start new thread
Any ideas?
Edit: The threads are daemon Threads and the code is like:
while True:
t0 = time.time()
keyword_statuses = DBSession.query(KeywordStatus).filter(KeywordStatus.status==0).options(joinedload(KeywordStatus.keyword)).with_lockmode("update").limit(100)
if keyword_statuses.count() == 0:
DBSession.commit()
break
for kw_status in keyword_statuses:
kw_status.status = 1
DBSession.commit()
t0 = time.time()
w = SWorker(threads_no=32, network_server='http://192.168.1.242:8180/', keywords=keyword_statuses, cities=cities, saver=MySqlRawSave(DBSession), loglevel='debug')
w.work()
print 'finished'
When the daemon threads are killed?
When the application finishes or when the work() finishes?
Look at the thread pool and the worker (it's from a recipe )
from Queue import Queue
from threading import Thread, Event, current_thread
import time
event = Event()
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
'''Start processing tasks from the queue'''
while True:
event.wait()
#time.sleep(0.1)
try:
func, args, callback = self.tasks.get()
except Exception, e:
print str(e)
return
else:
if callback is None:
func(args)
else:
callback(func(args))
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads): Worker(self.tasks)
def add_task(self, func, args=None, callback=None):
''''Add a task to the queue'''
self.tasks.put((func, args, callback))
def wait_completion(self):
'''Wait for completion of all the tasks in the queue'''
self.tasks.join()
def broadcast_block_event(self):
'''blocks running threads'''
event.clear()
def broadcast_unblock_event(self):
'''unblocks running threads'''
event.set()
def get_event(self):
'''returns the event object'''
return event
ALSo maybe the problem it's because I create SWorker objects in a loop?
What happens with the old SWorker (garbage collection ?) ?
There is still not enough code for localize the problem, but I'm sure that this is because you don't utilize the threads and start too much of them. Did you see canonical example from Queue python documentation http://docs.python.org/library/queue.html (bottom of the page)?
I can reproduce your problem with the following code:
import threading
import Queue
q = Queue.Queue()
def worker():
item = q.get(block=True) # sleeps forever for now
do_work(item)
q.task_done()
# create infinite number of workers threads and fails
# after some time with "error: can't start new thread"
while True:
t = threading.Thread(target=worker)
t.start()
q.join() # newer reached this
Instead you must create the poll of threads with known number of threads and put your data to queue like:
q = Queue()
def worker():
while True:
item = q.get()
do_work(item)
q.task_done()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in source():
q.put(item)
q.join() # block until all tasks are done
UPD: In case you need to stop some thread, you can add a flag to it or send a special mark means "stop" for break while loop:
class Worker(Thread):
break_msg = object() # just uniq mark sign
def __init__(self):
self.continue = True
def run():
while self.continue: # can stop and destroy thread, (var 1)
msg = queue.get(block=True)
if msg == self.break_msg:
return # will stop and destroy thread (var 2)
do_work()
queue.task_done()
workers = [Worker() for _ in xrange(num_workers)]
for w in workers:
w.start()
for task in tasks:
queue.put(task)
for _ in xrange(num_workers):
queue.put(Worker.break_msg) # stop thread after all tasks done. Need as many messages as many threads you have
OR
queue.join() # wait until all tasks done
for w in workers:
w.continue = False
w.put(None)