My multi-threading script raising this error:
thread.error : can't start new thread
when it reached 460 threads:
threading.active_count() = 460
I assume the old threads keeps stack up, since the script didn't kill them. This my code:
import threading
import Queue
import time
import os
import csv
def main(worker):
#Do Work
print worker
return
def threader():
while True:
worker = q.get()
main(worker)
q.task_done()
def main_threader(workers):
global q
global city
q = Queue.Queue()
for x in range(20):
t = threading.Thread(target=threader)
t.daemon = True
print "\n\nthreading.active_count() = " + str(threading.active_count()) + "\n\n"
t.start()
for worker in workers:
q.put(worker)
q.join()
How do I kill the old threads when their job is done? (Is the function returning not enough?)
Python threading API doesn't have any function to kill a thread (nothing like threading.kill(PID)).
That said, you should code some thread-stopping algorithm yourself. For example, your thread should somehow decide that is should terminate (e.g. check some global variable or check whether some signal has been sent) and simply return.
For example:
import threading
nthreads = 7
you_should_stop = [0 for _ in range(nthreads)]
def Athread(number):
while True:
if you_should_stop[number]:
print "Thread {} stopping...".format(number)
return
print "Running..."
for x in range(nthreads):
threading.Thread(target = Athread, args = (x, )).start()
for x in range(nthreads):
you_should_stop[x] = 1
print "\nStopped all threads!"
Related
I am trying to create 3 threads within each of 2 processes and share a queue of the type multiprocessing.JoinableQueue among all threads. The worker_func function simply creates the threads while the thread_func function prints out the values it gets from the queue. The program gets stuck somewhere in the time.sleep or in the get() method of queue. What am I doing wrong? I am running on a Windows computer.
import threading
from multiprocessing import Pool, Manager, JoinableQueue
import multiprocessing
from threading import Thread
import time
def thread_func(q, disp_lock):
with disp_lock:
print('thread ', threading.current_thread().name, ' in process ', multiprocessing.current_process().name ,
' reporting for duty')
while True:
time.sleep(0.1)
try:
val = q.get_nowait()
with disp_lock:
print('thread ', threading.current_thread().name, ' in process ', multiprocessing.current_process().name , ' got value: ',val)
q.task_done()
except:
with disp_lock:
print('queue is empty: ', q.qsize())
def worker_func(num_threads, q, disp_lock):
threads = []
for i in range(num_threads):
thread = Thread(target= thread_func, args=( q, disp_lock,))
thread.daemon = True
thread.start()
if __name__ == "__main__":
manager = Manager()
lock = manager.Lock()
q1 = JoinableQueue()#manager.Queue()
q1_length = 20
for i in range(q1_length):
q1.put(i)
processes = []
num_processes = 2 # 2 processes
num_threads = 3
for _ in range(num_processes):
p = multiprocessing.Process(target=worker_func, args=( num_threads, q1, lock, )) # create a new Process
p.daemon = True
p.start()
processes.append(p)
q1.join()
You are not allowing the threads to complete their work. Either set them as non-daemon, or explicitly wait for them to join:
def worker_func(num_threads, q, disp_lock):
threads = []
for i in range(num_threads):
thread = Thread(target=thread_func, args=(q, disp_lock,))
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for them to finish
for thread in threads:
thread.join()
I have been struggling to implement a proper dynamic multi-thread system until now. The idea is to spin up multiple new pools of sub-threads from the main (each pool have its own number of threads and queue size) to run functions and the user can define if the main should wait for the sub-thread to finish up or just move to the next line after starting the thread. This multi-thread logic will help to extract data in parallel and at a fast frequency.
The solution to my issue is shared below for everyone who wants it. If you have any doubts and questions, please let me know.
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 5 00:00:51 2021
#author: Tahasanul Abraham
"""
#%% Initialization of Libraries
import sys, os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
parentdir_1up = os.path.dirname(parentdir)
sys.path.insert(0,parentdir_1up)
from queue import Queue
from threading import Thread, Lock
class Worker(Thread):
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.lock = Lock()
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
if func.lower() == "terminate":
break
except:
try:
with self.lock:
func(*args, **kargs)
except Exception as exception:
print(exception)
self.tasks.task_done()
class ThreadPool:
def __init__(self, num_threads, num_queue=None):
if num_queue is None or num_queue < num_threads:
num_queue = num_threads
self.tasks = Queue(num_queue)
self.threads = num_threads
for _ in range(num_threads): Worker(self.tasks)
# This function can be called to terminate all the worker threads of the queue
def terminate(self):
self.wait_completion()
for _ in range(self.threads): self.add_task("terminate")
return None
# This function can be called to add new work to the queue
def add_task(self, func, *args, **kargs):
self.tasks.put((func, args, kargs))
# This function can be called to wait till all the workers are done processing the pending works. If this function is called, the main will not process any new lines unless all the workers are done with the pending works.
def wait_completion(self):
self.tasks.join()
# This function can be called to check if there are any pending/running works in the queue. If there are any works pending, the call will return Boolean True or else it will return Boolean False
def is_alive(self):
if self.tasks.unfinished_tasks == 0:
return False
else:
return True
#%% Standalone Run
if __name__ == "__main__":
import time
def test_return(x,d):
print (str(x) + " - pool completed")
d[str(x)] = x
time.sleep(5)
# 2 thread and 10000000000 FIFO queues
pool = ThreadPool(2,1000000000)
r ={}
for i in range(10):
pool.add_task(test_return, i, r)
print (str(i) + " - pool added")
print ("Waiting for completion")
pool.wait_completion()
print ("pool done")
# 1 thread and 2 FIFO queues
pool = ThreadPool(1,2)
r ={}
for i in range(10):
pool.add_task(test_return, i, r)
print (str(i) + " - pool added")
print ("Waiting for completion")
pool.wait_completion()
print ("pool done")
# 2 thread and 1 FIFO queues
pool = ThreadPool(2,1)
r ={}
for i in range(10):
pool.add_task(test_return, i, r)
print (str(i) + " - pool added")
print ("Waiting for completion")
pool.wait_completion()
print ("pool done")
Making a new Pool
Using the above classes, one can make a pool of their own choise with the number of parallel threads they want and the size of the queue. Example of creating a pool of 10 threads with 200 queue size.
pool = ThreadPool(10,200)
Adding work to Pool
Once a pool is created, one can use that pool.add_task to do sub-routine works. In my example version i used the pool to call a function and its arguments. Example, I called the test_return fucntion with its arguments i and r.
pool.add_task(test_return, i, r)
Waiting for the pool to complete its work
If a pool is given some work to do, the user can either move to other code lines or wait for the pool to finish its work before the next lines ar being read. To wait for the pool to finish the work and then return back, a call for wait_completion is required. Example:
pool.wait_completion()
Terminate and close down the pool threads
Once the requirement of the pool threads are done, it is possible to terminate and close down the pool threads to save up memory and release the blocked threads. This can be done by calling the following function.
pool.terminate()
Checking if there are any pending works from the pool
There is a function that can be called to check if there are any pending/running works in the queue. If there are any works pending, the call will return Boolean True, or else it will return Boolean False. To check if the pool is working or not call the folling function.
pool.is_alive()
I want to do a infinite loop function.
Here is my code
def do_request():
# my code here
print(result)
while True:
do_request()
When use while True to do this, it's a little slow, so I want to use a thread pool to concurrently execute the function do_request(). How to do this ?
Just like use ab (Apache Bench) to test HTTP server.
Finally, I've solved this problem. I use a variable to limit the thread number.
Here is my final code, solved my problem.
import threading
import time
thread_num = 0
lock = threading.Lock()
def do_request():
global thread_num
# -------------
# my code here
# -------------
with lock:
thread_num -= 1
while True:
if thread_num <= 50:
with lock:
thread_num += 1
t = threading.Thread(target=do_request)
t.start()
else:
time.sleep(0.01)
Thanks for all replies.
You can use threading in Python to implement this.
Can be something similar to this (when using two extra threads only):
import threading
# define threads
task1 = threading.Thread(target = do_request)
task2 = threading.Thread(target = do_request)
# start both threads
task1.start()
task2.start()
# wait for threads to complete
task1.join()
task2.join()
Basically, you start as many threads as you need (make sure you don't get too many, so your system can handle it), then you .join() them to wait for tasks to complete.
Or you can get fancier with multiprocessing Python module.
Try the following code:
import multiprocessing as mp
import time
def do_request():
while(True):
print('I\'m making requests')
time.sleep(0.5)
p = mp.Process(target=do_request)
p.start()
for ii in range(10):
print 'I\'m also doing other things though'
time.sleep(0.7)
print 'Now it is time to kill the service thread'
p.terminate()
The main thread stars a service thread that does the request and goes on until it has to, and then it finishes up the service thread.
Maybe you can use the concurrent.futures.ThreadPoolExecutor
from concurrent.futures import ThreadPoolExecutor
import time
def wait_on_b(hello):
time.sleep(1)
print(hello) # b will never complete because it is waiting on a.
return 5
def wait_on_a():
time.sleep(1)
print(a.result()) # a will never complete because it is waiting on b.
return 6
executor = ThreadPoolExecutor(max_workers=2)
a = executor.submit(wait_on_b, 3)
b = executor.submit(wait_on_a)
How about this?
from threading import Thread, Event
class WorkerThread(Thread):
def __init__(self, logger, func):
Thread.__init__(self)
self.stop_event = Event()
self.logger = logger
self.func = func
def run(self):
self.logger("Going to start the infinite loop...")
#Your code
self.func()
concur_task = WorkerThread(logger, func = do_request)
concur_task.start()
To end this thread...
concur_task.stop_event.set()
concur_task.join(10) #or any value you like
I have a list of X thread (potentially more than 100)
I want to run no more than five at the same time.
I came up with this :
import os
from os import listdir
from os.path import isfile, join
import shutil
import Image
import math
import threading
CAMERA_NUMBER = 21 #there is 21 cameras, from 1 to 21
ORDERED_SCAN_OUTPUT_FOLDER = "scanData"
PRETTY_PRINT_OUTPUT_FOLDER = "preview"
ROTATION_ANGLE = 90
RATIO = 0.4
IMAGE_PER_ROW = 7
MAX_THREAD = 5
def getNumberOfScanToProcess(absolute_folder):
folder_list = get_all_folders_from(absolute_folder)
return len(folder_list)
""" you have a thread list and you only want to run them 5 by 5, use this """
def runThreadListBlockByBlock(thread_list, number_of_simultanious_thread):
print ""
print "lauching thread list by run of " + str(number_of_simultanious_thread) + " Thread(s)"
thread_counter = 0
initial_count = 0
for thread_id in range(0, len(thread_list)):
print "lauching thread " + str(thread_id)
thread_list[thread_id].start()
thread_counter = thread_counter+1
if initial_count+number_of_simultanious_thread == thread_counter:
for thread_number in range(initial_count, thread_counter):
print "waiting for thread " + str(thread_number)
thread_list[thread_number].join()
initial_count = thread_counter
class prettyPrintThread(threading.Thread):
def __init__(self, folder_to_process, ratio, rotation_angle, image_per_row, output_folder, thread_id):
super(prettyPrintThread, self).__init__()
self.ratio = ratio
self.rotation_angle = rotation_angle
self.image_per_row = image_per_row
self.output_folder = output_folder
self.thread_id = thread_id
self.folder_to_process = folder_to_process
def run(self):
pretty_print(self.folder_to_process, self.ratio, self.rotation_angle, self.image_per_row, self.output_folder, self.thread_id)
script_absolute_folder = os.path.abspath(os.getcwd())
stored_scan_absolute_folder = join(script_absolute_folder, ORDERED_SCAN_OUTPUT_FOLDER)
scan_count = getNumberOfScanToProcess(stored_scan_absolute_folder)
thread_list = []
#Making the thread list
for thread_number in range(0, scan_count):
print "preparing thread number " + str(thread_number)
thread_list.append(prettyPrintThread(join(ORDERED_SCAN_OUTPUT_FOLDER, str(thread_number).zfill(4)), RATIO, ROTATION_ANGLE, IMAGE_PER_ROW, PRETTY_PRINT_OUTPUT_FOLDER, 1))
#launch 5 thread, wait for them to finish then launch the 5 other and so on.
runThreadListBlockByBlock(thread_list, MAX_THREAD)
But the problem is that it wait that the 5 threads are finished,
I could launch an other thread as soon as one of them has finished.
Is there a way like event/listener in java to raise some kind of
flag when a thread is finished?
Thanks
The best way to do this is probably to use a Semaphore object. Create a Semaphore with an initial value of five then have your main thread (the one that controls the others) call the Semaphore's acquire() method (presumably in a loop) before starting a thread. This will block when five threads have been started.
The threads should call the Semaphore's release() method when they are done, and this will wake up the main thread by allowing it's acquire() call to proceed, whereupon it will start another thread, and so on until there's nothing left to do.
Once you have finished starting threads do be careful to join() with the final threads to ensure they terminate before exiting the main thread.
A BoundedSemaphore would also allow you to detect errors where the thread was released more than it was acquired.
Use ThreadPoolExecutor from concurrent.futures library (it has been backported to Python 2.7).
The usage is moreless as follows:
executor = ThreadPoolExecutor(max_workers=5)
futures = [
executor.submit(callable_which_gets_the_job_done, some_argument=foo)
for foo in bar
]
for foo, future in zip(bar, futures):
print "callable_which_gets_the_job_done(some_argument=%s) returned %s" % (
foo,
future.result(),
)
Executor will run callable_which_gets_the_job_than(some_argument=foo) with foo values from bar. Each call will be in a separate thread, the number of threads running at the same time will not exceed 5.
I don't know why I'm having such a problem with this, basically, I want to have a Queue that is constantly running during the program called "Worker" this then works, however, every 10 seconds or so.. Another method called "Process" comes in and processes the data. Let's assume the following, data is captured every 10 seconds.. (0, 1, 2, 3, ..... n) and then the "Proces" function receives this, processes the data, ends, and then the "Worker" goes back to work and does their job until the program has ended.
I have the following code:
import multiprocessing as mp
import time
DELAY_SIZE = 10
def Worker(q):
print "I'm working..."
def Process(q):
print "I'm processing.."
queue = mp.Queue(maxsize=DELAY_SIZE)
p = mp.Process(target=Worker, args=(queue,))
p.start()
while True:
d = queue.get()
time.sleep(10)
Process()
In this example, it would look like the following:
I'm working...
I'm working...
I'm working...
...
...
...
I'm working...
I'm processing...
I'm processing...
I'm processing...
...
...
I'm working..
I'm working..
Any ideas?
Here is an alternative way using threads:
import threading
import Queue
import time
class Worker(threading.Thread):
def __init__(self, q):
threading.Thread.__init__(self)
self._q = q
def run(self):
# here, worker does its job
# results are pushed to the shared queue
while True:
print 'I am working'
time.sleep(1)
result = time.time() # just an example
self._q.put(result)
def process(q):
while True:
if q.empty():
time.sleep(10)
print 'I am processing'
worker_result = q.get()
# do whatever you want with the result...
print " ", worker_result
if __name__ == '__main__':
shared_queue = Queue.Queue()
worker = Worker(shared_queue)
worker.start()
process(shared_queue)