How to log to single file with multiprocessing.Pool.apply_async - python

I can't get logging to a single file working with multprocess.Pool.apply_async.
I'm trying to adapt this example from the Logging Cookbook, but it only works for multiprocessing.Process. Passing the logging queue into apply_async doesn't seem to have effect.
I would like to use a Pool so that I can easily manage the number of simultaneous threads.
The following adapted example with multiprocessing.Process works ok for me, except I am not getting log messages from the main process, and I don't think it will work well when I have 100 large jobs.
import logging
import logging.handlers
import numpy as np
import time
import multiprocessing
import pandas as pd
log_file = 'PATH_TO_FILE/log_file.log'
def listener_configurer():
root = logging.getLogger()
h = logging.FileHandler(log_file)
f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
h.setFormatter(f)
root.addHandler(h)
# This is the listener process top-level loop: wait for logging events
# (LogRecords)on the queue and handle them, quit when you get a None for a
# LogRecord.
def listener_process(queue, configurer):
configurer()
while True:
try:
record = queue.get()
if record is None: # We send this as a sentinel to tell the listener to quit.
break
logger = logging.getLogger(record.name)
logger.handle(record) # No level or filter logic applied - just do it!
except Exception:
import sys, traceback
print('Whoops! Problem:', file=sys.stderr)
traceback.print_exc(file=sys.stderr)
def worker_configurer(queue):
h = logging.handlers.QueueHandler(queue) # Just the one handler needed
root = logging.getLogger()
root.addHandler(h)
# send all messages, for demo; no other level or filter logic applied.
root.setLevel(logging.DEBUG)
# This is the worker process top-level loop, which just logs ten events with
# random intervening delays before terminating.
# The print messages are just so you know it's doing something!
def worker_function(sleep_time, name, queue, configurer):
configurer(queue)
start_message = 'Worker {} started and will now sleep for {}s'.format(name, sleep_time)
logging.info(start_message)
time.sleep(sleep_time)
success_message = 'Worker {} has finished sleeping for {}s'.format(name, sleep_time)
logging.info(success_message)
def main_with_process():
start_time = time.time()
single_thread_time = 0.
queue = multiprocessing.Queue(-1)
listener = multiprocessing.Process(target=listener_process,
args=(queue, listener_configurer))
listener.start()
workers = []
for i in range(10):
name = str(i)
sleep_time = np.random.randint(10) / 2
single_thread_time += sleep_time
worker = multiprocessing.Process(target=worker_function,
args=(sleep_time, name, queue, worker_configurer))
workers.append(worker)
worker.start()
for w in workers:
w.join()
queue.put_nowait(None)
listener.join()
end_time = time.time()
final_message = "Script execution time was {}s, but single-thread time was {}s".format(
(end_time - start_time),
single_thread_time
)
print(final_message)
if __name__ == "__main__":
main_with_process()
But I can't get the following adaptation to work:
def main_with_pool():
start_time = time.time()
queue = multiprocessing.Queue(-1)
listener = multiprocessing.Process(target=listener_process,
args=(queue, listener_configurer))
listener.start()
pool = multiprocessing.Pool(processes=3)
job_list = [np.random.randint(10) / 2 for i in range(10)]
single_thread_time = np.sum(job_list)
for i, sleep_time in enumerate(job_list):
name = str(i)
pool.apply_async(worker_function,
args=(sleep_time, name, queue, worker_configurer))
queue.put_nowait(None)
listener.join()
end_time = time.time()
print("Script execution time was {}s, but single-thread time was {}s".format(
(end_time - start_time),
single_thread_time
))
if __name__ == "__main__":
main_with_pool()
I've tried many slight variations, using multiprocessing.Manager, multiprocessing.Queue, multiprocessing.get_logger, apply_async.get(), but haven't gotten any to work.
I would think there would be an off-the-shelf solution for this. Should I try Celery instead?
thanks

There are actually two separate problems here, which are intertwined:
You cannot pass a multiprocessing.Queue() object as an argument to a Pool-based function (you can pass it to the worker you start directly, but not any "further in" as it were).
You must wait for all the asynchronous workers to complete before you send the None through to your listener process.
To fix the first one, replace:
queue = multiprocessing.Queue(-1)
with:
queue = multiprocessing.Manager().Queue(-1)
as a manager-managed Queue() instance can be passed through.
To fix the second, either collect each result from each asynchronous call, or close the pool and wait for it, e.g.:
pool.close()
pool.join()
queue.put_nowait(None)
or the more complex:
getters = []
for i, sleep_time in enumerate(job_list):
name = str(i)
getters.append(
pool.apply_async(worker_function,
args=(sleep_time, name, queue, worker_configurer))
)
while len(getters):
getters.pop().get()
# optionally, close and join pool here (generally a good idea anyway)
queue.put_nowait(None)
(You should also consider replacing your put_nowait with a waiting version of put and not using unlimited length queues.)

Consider using two queues. The first queue is where you put the data for the workers. Each worker after job completion pushes the results to the second queue. Now consume this second queue to write the log to the file.

[ADDENDUM] Regarding maxtasksperchild=1
you don't really need it. The reason for repeated messages were due to:
you were repeatedly adding queuehandlers to the root logger of a child process. The following code checks if any handlers exist before adding another:
def worker_configurer(queue):
root = logging.getLogger()
# print(f'{root.handlers=}')
if len(root.handlers) == 0:
h = logging.handlers.QueueHandler(queue)
root.addHandler(h)
root.setLevel(logging.DEBUG)

Related

Async IO switch coroutine on multiprocessing.queue.get() not ready

I am writing some code where I have 3 processes (spawned from the main). The first one is a process that uses Async IO to create 3 coroutines and switch between them. The last two processes run independently and generate two outputs that are used in one of the coroutines of the first process.
The communication has been managed using multiprocessing.queue(), the main puts the input data inside queue_source_position_hrir_calculator and queue_source_position_cutoff_calculator, then these two queues are emptied by p2_hrir_computation_process and p3_cutoff_computation_process. These two processes outputs their computation results in two output queues queue_computed_hrirs and queue_computed_cutoff
Finally these two queues are consumed by the Async IO process, in particular inside the input_parameters_coroutine function.
The full code is the following (I will highlight the key parts in following snippets):
import asyncio
import multiprocessing
import numpy as np
import time
from classes.HRIR_interpreter_min_phase_linear_interpolation import HRIR_interpreter_min_phase_linear_interpolation
from classes.object_renderer import ObjectRenderer
#Useful resources: https://bbc.github.io/cloudfit-public-docs/asyncio/asyncio-part-2
#https://realpython.com/async-io-python/
Fs = 44100
# region Async_IO functions
async def audio_input_coroutine(overlay):
for i in range(0,100):
print('Executing audio input coroutine')
print(overlay)
await asyncio.sleep(1/(Fs*4))
async def input_parameters_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff):
for i in range(0,10):
print('Executing audio input_parameters coroutine')
#print(overlay)
current_hrir = queue_computed_hrirs.get()
print('got current hrir')
current_cutoff = queue_computed_cutoff.get()
print('got current cutoff')
await asyncio.sleep(0.5)
async def audio_output_coroutine(overlay):
for i in range(0,10):
print('Executing audio_output coroutine')
#print(overlay)
await asyncio.sleep(0.5)
async def main_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff):
await asyncio.gather(audio_input_coroutine(overlay), input_parameters_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff), audio_output_coroutine(overlay))
def async_IO_main_process(queue_computed_hrirs,queue_computed_cutoff):
overlay = 10
asyncio.run(main_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff))
# endregion
# region HRIR_computation_process
def compute_hrir(queue_source_position, queue_computed_hrirs):
print('computing hrir')
SOFA_filename = '../HRTF_data/HUTUBS_min_phase.sofa'
# loading the simulated dataset using the support class HRIRInterpreter
HRIRInterpreter = HRIR_interpreter_min_phase_linear_interpolation(SOFA_filename=SOFA_filename)
# variable to check if I have other positions in my input queue
eof_source_position = False
# Un-comment following line to return when no more messages
while not eof_source_position:
#while True:
# print('inside while loop')
time.sleep(1)
# print('state of the queue', queue_source_position.empty())
if not eof_source_position:
position = queue_source_position.get()
if position is None:
eof_source_position = True # end of messages indicator
else:
required_IR = HRIRInterpreter.get_interpolated_IR(position[0], position[1], 1)
queue_computed_hrirs.put(required_IR)
# print('printing computed HRIR:', required_IR)
print('completed hrir computation, adding none to queue')
queue_computed_hrirs.put(None) # end of messages indicator
print('completed hrir process')
# endregion
# region cutoff_computation_process
def compute_cutoff(queue_source_position, queue_computed_cutoff):
print('computing cutoff')
cutoff = 20000
object_renderer = ObjectRenderer()
object_positions = np.array([(20, 0), (40, 0), (100, 0), (225, 0)])
eof_source_position = False
# Un-comment following line to return when no more messages
while not eof_source_position:
#while True:
time.sleep(1)
object_renderer.update_object_position(object_positions)
if not eof_source_position:
print('inside source position update')
source_position = queue_source_position.get()
if source_position is None: # end of messages indicator
eof_source_position = True
else:
cutoff = object_renderer.get_cutoff(azimuth=source_position[0], elevation=source_position[1])
queue_computed_cutoff.put(cutoff)
queue_computed_cutoff.put(None) # end of messages indicator
# endregion
if __name__ == "__main__":
import time
queue_source_position_hrir_calculator = multiprocessing.Queue()
queue_source_position_cutoff_calculator = multiprocessing.Queue()
queue_computed_hrirs = multiprocessing.Queue()
queue_computed_cutoff = multiprocessing.Queue()
i = 0.0
#Basically here I am writing a sequence of positions into the queue
#then I add a None value to detect when I am done with the simulation so the process can end
for _ in range(10):
# print('into main while-> source_position:', source_position[0])
source_position = np.array([i, 0.0])
queue_source_position_hrir_calculator.put(source_position)
queue_source_position_cutoff_calculator.put(source_position)
i += 10
queue_source_position_hrir_calculator.put(None) # "end of messages" indicator
queue_source_position_cutoff_calculator.put(None) # "end of messages" indicator
p1_async_IO_process = multiprocessing.Process(target=async_IO_main_process, args=(queue_computed_hrirs,queue_computed_cutoff)) #process that manages the ASYNC_IO coroutines between DMAs
p2_hrir_computation_process = multiprocessing.Process(target=compute_hrir, args=(queue_source_position_hrir_calculator, queue_computed_hrirs))
p3_cutoff_computation_process = multiprocessing.Process(target=compute_hrir, args=(queue_source_position_cutoff_calculator, queue_computed_cutoff))
p1_async_IO_process.start()
p2_hrir_computation_process.start()
p3_cutoff_computation_process.start()
#temp cycle to join processes
#for _ in range(2):
# current_hrir = queue_computed_hrirs.get()
# current_cutoff = queue_computed_cutoff.get()
print('joining async_IO process')
p1_async_IO_process.join()
print('joined async_IO process')
#NB: to join a process, its qeues must be empty. So before calling the join on p2, I should get the values from the queue_computed_hrirs queue
print('joining hrir computation process')
p2_hrir_computation_process.join()
print('joined hrir computation process')
print('joining hrir computation process')
p2_hrir_computation_process.join()
print('joined hrir computation process')
print('joining cutoff computation process')
p3_cutoff_computation_process.join()
print('joined cutoff computation process')
print("completed main")
The important part of the code is:
async def input_parameters_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff):
for i in range(0,10):
print('Executing audio input_parameters coroutine')
#print(overlay)
current_hrir = queue_computed_hrirs.get()
print('got current hrir')
current_cutoff = queue_computed_cutoff.get()
print('got current cutoff')
await asyncio.sleep(0.5)
This coroutine receives as input 3 variables overlay (which is a dummy variable I am using for future developments) and the two multiprocessing.Queue() classes, queue_computed_hrirs and queue_computed_cutoff.
At the moment my input_parameters_coroutine gets "stuck" while executing current_hrir = queue_computed_hrirs.get() and current_cutoff = queue_computed_cutoff.get(). I said "stuck" because the code works fine and complete its execution, the problem is that those two commands are blocking, thus my coroutine stops until it has something to get from the queue.
What I would like to achieve is: try to execute current_hrir = queue_computed_hrirs.get(), if it is not possible at that moment, switch to another coroutine and let it execute what it wants, then go back and check if it possible to execute current_hrir = queue_computed_hrirs.get(), if yes go on, if not switch again to another coroutine and let it do its job.
I saw that there are some problems in making async IO and multiprocessing communicate ( What kind of problems (if any) would there be combining asyncio with multiprocessing? , Can I somehow share an asynchronous queue with a subprocess? ) but I wasn't able to find a smart solution to my problem.

processing very large text files in parallel using multiprocessing and threading

I have found several other questions that touch on this topic but none that are quite like my situation.
I have several very large text files (3+ gigabytes in size).
I would like to process them (say 2 documents) in parallel using multiprocessing. As part of my processing (within a single process) I need to make an API call and because of this would like to have each process have it's own threads to run asynchronously.
I have came up with a simplified example ( I have commented the code to try to explain what I think it should be doing):
import multiprocessing
from threading import Thread
import threading
from queue import Queue
import time
def process_huge_file(*, file_, batch_size=250, num_threads=4):
# create APICaller instance for each process that has it's own Queue
api_call = APICaller()
batch = []
# create threads that will run asynchronously to make API calls
# I expect these to immediately block since there is nothing in the Queue (which is was
# the api_call.run depends on to make a call
threads = []
for i in range(num_threads):
thread = Thread(target=api_call.run)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
####
# start processing the file line by line
for line in file_:
# if we are at our batch size, add the batch to the api_call to to let the threads do
# their api calling
if i % batch_size == 0:
api_call.queue.put(batch)
else:
# add fake line to batch
batch.append(fake_line)
class APICaller:
def __init__(self):
# thread safe queue to feed the threads which point at instances
of these APICaller objects
self.queue = Queue()
def run(self):
print("waiting for something to do")
self.queue.get()
print("processing item in queue")
time.sleep(0.1)
print("finished processing item in queue")
if __name__ == "__main__":
# fake docs
fake_line = "this is a fake line of some text"
# two fake docs with line length == 1000
fake_docs = [[fake_line] * 1000 for i in range(2)]
####
num_processes = 2
procs = []
for idx, doc in enumerate(fake_docs):
proc = multiprocessing.Process(target=process_huge_file, kwargs=dict(file_=doc))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
As the code is now, "waiting for something to do" prints 8 times (makes sense 4 threads per process) and then it stops or "deadlocks" which is not what I expect - I expect it to start sharing time with the threads as soon as I start putting items in the Queue but the code does not appear to make it this far. I ordinarily would step through to find a hang up but I still don't have a solid understanding of how to best debug using Threads (another topic for another day).
In the meantime, can someone help me figure out why my code is not doing what it should be doing?
I have made a few adjustments and additions and the code appears to do what it is supposed to now. The main adjustments are: adding a CloseableQueue class (from Brett Slatkins Effective Python Item 55), and ensuring that I call close and join on the queue so that the threads properly exit. Full code with these changes below:
import multiprocessing
from threading import Thread
import threading
from queue import Queue
import time
from concurrency_utils import CloseableQueue
def sync_process_huge_file(*, file_, batch_size=250):
batch = []
for idx, line in enumerate(file_):
# do processing on the text
if idx % batch_size == 0:
time.sleep(0.1)
batch = []
# api_call.queue.put(batch)
else:
computation = 0
for i in range(100000):
computation += i
batch.append(line)
def process_huge_file(*, file_, batch_size=250, num_threads=4):
api_call = APICaller()
batch = []
# api call threads
threads = []
for i in range(num_threads):
thread = Thread(target=api_call.run)
threads.append(thread)
thread.start()
for idx, line in enumerate(file_):
# do processing on the text
if idx % batch_size == 0:
api_call.queue.put(batch)
else:
computation = 0
for i in range(100000):
computation += i
batch.append(line)
for _ in threads:
api_call.queue.close()
api_call.queue.join()
for thread in threads:
thread.join()
class APICaller:
def __init__(self):
self.queue = CloseableQueue()
def run(self):
for item in self.queue:
print("waiting for something to do")
pass
print("processing item in queue")
time.sleep(0.1)
print("finished processing item in queue")
print("exiting run")
if __name__ == "__main__":
# fake docs
fake_line = "this is a fake line of some text"
# two fake docs with line length == 1000
fake_docs = [[fake_line] * 10000 for i in range(2)]
####
time_s = time.time()
num_processes = 2
procs = []
for idx, doc in enumerate(fake_docs):
proc = multiprocessing.Process(target=process_huge_file, kwargs=dict(file_=doc))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
time_e = time.time()
print(f"took {time_e-time_s} ")
class CloseableQueue(Queue):
SENTINEL = object()
def __init__(self, **kwargs):
super().__init__(**kwargs)
def close(self):
self.put(self.SENTINEL)
def __iter__(self):
while True:
item = self.get()
try:
if item is self.SENTINEL:
return # exit thread
yield item
finally:
self.task_done()
As expected this is a great speedup from running synchronously - 120 seconds vs 50 seconds.

How to stop running my threads after a period of time?

I need to stop running my threads after a period of time, In this example I put only 120 seconds. I try by using this methods by it does not work.
from threading import Thread
from Queue import Queue
import os
import time
timeout = 120 # [seconds]
timeout_start = time.time()
#while True :
def OpenWSN ():
os.system("./toto")
def Wireshark():
os.system(" tshark -i tun0 -T ek -w /home/ptl/PCAP_Brouillon/Sim_Run3rd.pcap > /dev/null ")
def wrapper1(func, queue):
queue.put(func())
def wrapper2(func, queue):
queue.put(func())
q = Queue()
Thread(target=wrapper1, args=(OpenWSN, q)).start()
Thread(target=wrapper2, args=(Wireshark, q)).start()
#print (time.time())
print ("***************** End Simulation *************************")
os.system("quit")
I think this is what you are trying to achieve:
import threading
from queue import Queue
import os
import time
timeout = 120 # [seconds]
timeout_start = time.time()
def OpenWSN ():
print( "OpenWSN:")
os.system("echo -OpenWSN-")
def Wireshark():
print( "Wireshark:")
os.system("echo -Wireshark-")
def wrapper1(func, queue):
queue.put(func())
def wrapper2(func, queue):
queue.put(func())
q = Queue()
threading.Thread(target=wrapper1, args=(OpenWSN, q)).start()
threading.Thread(target=wrapper2, args=(Wireshark, q)).start()
cv = threading.Condition()
cv.acquire()
cv.wait( timeout )
print ("***************** End Simulation *************************")
print (" Simulation Time: {0}s".format( time.time() - timeout_start) )
os.system("echo -exit-")
This produces the following output:
C:\temp\StackExchange\StopRunningThread>python -B stop-running-thread.py
OpenWSN:
Wireshark:
-OpenWSN-
-Wireshark-
***************** End Simulation *************************
Simulation Time: 120.04460144042969s
-exit-
What is happening there - you are starting two threads, each starts separate process in the system. After the said threads were started, you return to your main thread, allocate a "lock" and wait until this lock is signaled, or time out takes place.
In this particular case nobody signals the lock, so the only chance to finish the application is to wait until the time out happens.
I would extend your application that it signals the lock in each thread function, so we can terminate the main thread only if both of thread functions terminate.
But that was not the part of your question, so I assume you can leave without signalling.

kill threads later a time in python

I have a python code with threads, and i need that if in for example 1 hour the threads are not finished, finish all threads and finish the script, and if the hour are not complete wait that all my threads finish.
I try with a daemon thread, and with a sleep of the hour, and if the hour is complete use a: sys.exit() but it not works to me, because always wait to my sleep threadh, then my script wait until the thread finished and the sys.exit() does not work.
import socket, threading, time, sys
from sys import argv
import os
acc_time=0
transactions_ps=5
ins = open(sys.argv[1],'r')
msisdn_list = []
for line in ins:
msisdn_list.append (line.strip('\n'))
# print line
ins.close()
def worker(msisdn_list):
semaphore.acquire()
global transactions_ps
print " ***** ", threading.currentThread().getName(), "Lanzado"
count=1
acc_time=0
print "len: ",len(msisdn_list)
for i in msisdn_list:
try:
init=time.time()
time.sleep(2)
print "sleeping...",i
time.sleep(4)
final=time.time()
acc_time = acc_time+final-init
print acc_time
except IOError:
print "Connection failed",sys.exc_info()[0]
print "Deteniendo ",threading.currentThread().getName()
semaphore.release()
def kill_process(secs_to_die):
time.sleep(secs_to_die)
sys.exit()
seconds_to_die=3600
thread_kill = threading.Thread(target = kill_process, args=(seconds_to_die,))
thread_kill.start()
max_con=5
semaphore = threading.BoundedSemaphore(max_con)
for i in range(0,28,transactions_ps):
w = threading.Thread(target=worker, args=(msisdn_list[i:i+transactions_ps-1],))
w.setDaemon(True)
w.start()
How can to do it
A minimal change to your code that would fix the issue is threading.Barrier:
barrier = Barrier(number_of_threads, timeout=3600)
# create (number_of_threads - 1) threads, pass them barrier
# each thread calls barrier.wait() on exit
barrier.wait() # after number_of_threads .wait() calls or on timeout it returns
A simpler alternative is to use multiprocessing.dummy.Pool that creates daemon threads:
from multiprocessing.dummy import Pool # use threads
start = timer()
endtime = start + 3600
for result in pool.imap_unordered(work, args):
if timer() > endtime:
exit("timeout")
The code doesn't timeout until a work item is done i.e., it expects that processing a single item from the list doesn't take long.
Complete example:
#!/usr/bin/env python3
import logging
import multiprocessing as mp
from multiprocessing.dummy import Pool
from time import monotonic as timer, sleep
info = mp.get_logger().info
def work(i):
info("start %d", i)
sleep(1)
info("end %d", i)
seconds_to_die = 3600
max_con = 5
mp.log_to_stderr().setLevel(logging.INFO) # enable logging
pool = Pool(max_con) # no more than max_con at a time
start = timer()
endtime = start + seconds_to_die
for _ in pool.imap_unordered(work, range(10000)):
if timer() > endtime:
exit("timeout")
You may refer to this implementation of KThread:
http://python.todaysummary.com/q_python_45717.html

Threads not stop in python

The purpose of my program is to download files with threads. I define the unit, and using len/unit threads, the len is the length of the file which is going to be downloaded.
Using my program, the file can be downloaded, but the threads are not stopping. I can't find the reason why.
This is my code...
#! /usr/bin/python
import urllib2
import threading
import os
from time import ctime
class MyThread(threading.Thread):
def __init__(self,func,args,name=''):
threading.Thread.__init__(self);
self.func = func;
self.args = args;
self.name = name;
def run(self):
apply(self.func,self.args);
url = 'http://ubuntuone.com/1SHQeCAQWgIjUP2945hkZF';
request = urllib2.Request(url);
response = urllib2.urlopen(request);
meta = response.info();
response.close();
unit = 1000000;
flen = int(meta.getheaders('Content-Length')[0]);
print flen;
if flen%unit == 0:
bs = flen/unit;
else :
bs = flen/unit+1;
blocks = range(bs);
cnt = {};
for i in blocks:
cnt[i]=i;
def getStr(i):
try:
print 'Thread %d start.'%(i,);
fout = open('a.zip','wb');
fout.seek(i*unit,0);
if (i+1)*unit > flen:
request.add_header('Range','bytes=%d-%d'%(i*unit,flen-1));
else :
request.add_header('Range','bytes=%d-%d'%(i*unit,(i+1)*unit-1));
#opener = urllib2.build_opener();
#buf = opener.open(request).read();
resp = urllib2.urlopen(request);
buf = resp.read();
fout.write(buf);
except BaseException:
print 'Error';
finally :
#opener.close();
fout.flush();
fout.close();
del cnt[i];
# filelen = os.path.getsize('a.zip');
print 'Thread %d ended.'%(i),
print cnt;
# print 'progress : %4.2f'%(filelen*100.0/flen,),'%';
def main():
print 'download at:',ctime();
threads = [];
for i in blocks:
t = MyThread(getStr,(blocks[i],),getStr.__name__);
threads.append(t);
for i in blocks:
threads[i].start();
for i in blocks:
# print 'this is the %d thread;'%(i,);
threads[i].join();
#print 'size:',os.path.getsize('a.zip');
print 'download done at:',ctime();
if __name__=='__main__':
main();
Could someone please help me understand why the threads aren't stopping.
I can't really address your code example because it is quite messy and hard to follow, but a potential reason you are seeing the threads not end is that a request will stall out and never finish. urllib2 allows you to specify timeouts for how long you will allow the request to take.
What I would recommend for your own code is that you split your work up into a queue, start a fixed number of thread (instead of a variable number), and let the worker threads pick up work until it is done. Make the http requests have a timeout. If the timeout expires, try again or put the work back into the queue.
Here is a generic example of how to use a queue, a fixed number of workers and a sync primitive between them:
import threading
import time
from Queue import Queue
def worker(queue, results, lock):
local_results = []
while True:
val = queue.get()
if val is None:
break
# pretend to do work
time.sleep(.1)
local_results.append(val)
with lock:
results.extend(local_results)
print threading.current_thread().name, "Done!"
num_workers = 4
threads = []
queue = Queue()
lock = threading.Lock()
results = []
for i in xrange(100):
queue.put(i)
for _ in xrange(num_workers):
# Use None as a sentinel to signal the threads to end
queue.put(None)
t = threading.Thread(target=worker, args=(queue,results,lock))
t.start()
threads.append(t)
for t in threads:
t.join()
print sorted(results)
print "All done"

Categories

Resources