Multi-processed file reading in python - python

I want a file to be read in a multi-processed way. Each process will be a class instance doing some specific task, but the class where the file is being opened will be a singleton class.
We don't want to batch the entire file into a number of processes instead we want each process to asynchronously read a batch of lines from the text file where no two processes will have the same line.
I know this is quite a task to execute and I have tried multiple ways to achieve this task but was unsuccessful.
Below is the code snippet where I tried to achieve the above mentioned
class ListFromTextFile():
def __init__(self, config: str):
self.file_pointer = open(file_path, "r")
self.batch_size = config["batch_size"]
self.__lock = Lock()
self.__lock_aquire = False
def get_list_of_pids(self):
function_name = "get_list_of_pids"
pids = []
try:
for line in self.file_pointer:
if self.__lock_aquire:
pid = line.strip("\n")
pids.append(pid)
else:
self.__lock.acquire()
log_message(function_name, "Lock aquired !!!")
self.__lock_aquire = True
if len(pids) == self.batch_size:
self.__lock.release()
log_message(function_name, "Lock released !!!")
self.__lock_aquire = False
yield pids
pids = []
if len(pids):
self.__lock.release()
log_message(function_name, "Lock released !!!")
yield pids
except Exception as e:
err = str(e)
trace_back = traceback.format_exc()
log_exception()(self.function_name, err, trace_back)
Here ListFromTextFile is a singleton class and, processes will call get_list_of_pids() which will yield a list of pids. The task here is every batch generated and yielded by all processes must be unique.
Any suggestions to improve the above code or new ideas are welcome.

What you want is not necessarily a singleton but rather a class instance that can be sharable among multiple processes. The most straightforward way I know of doing this (other people might have other ideas) is to create a managed class from ListTextFile. I would first modify its definition so that method get_list_of_pids is not a generator function (these cannot be pickled). Instead you can just call it repeatedly to return the next batch of pids until an empty list is returned signifying end of file on the input file. As a managed object running within a manager's process and method calls are handled by threads, it is sufficent to to run get_list_of_pids as a critical section using a thread.Lock.
For demo purposes my input file, test.txt, consists of 7 lines:
1
2
3
4
5
6
7
The Demo
from threading import Lock
from multiprocessing import Process, current_process
from multiprocessing.managers import BaseManager
file_path = 'test.txt'
class ListFromTextFile():
def __init__(self, config: dict):
self.file_pointer = open(file_path, "r")
self.batch_size = config["batch_size"]
self.eof = False
self.__lock = Lock()
def get_list_of_pids(self):
function_name = "get_list_of_pids"
pids = []
if self.eof:
# Returning an empty list signifies end of file:
return pids;
with self.__lock:
while True:
line = self.file_pointer.readline()
if line == '':
self.eof = True
# Return what we have if anything.
return pids
pids.append(line.strip('\n'))
if len(pids) == self.batch_size:
return pids
class MyManager(BaseManager):
pass
def worker(reader):
import time
while True:
pids = reader.get_list_of_pids()
if not pids:
# Empty list returned -> end of file
return
print(f'Current pid: {current_process().pid}, pids: {pids}', flush=True)
time.sleep(.2) # Give other process a chance to run
def demo():
MyManager.register('ListFromTextFile', ListFromTextFile)
with MyManager() as manager:
config = {'batch_size': 3}
reader = manager.ListFromTextFile(config)
p1 = Process(target=worker, args=(reader,))
p2 = Process(target=worker, args=(reader,))
p1.start()
p2.start()
p1.join()
p2.join()
if __name__ == '__main__':
demo()
Prints:
Current pid: 1492, pids: ['1', '2', '3']
Current pid: 29072, pids: ['4', '5', '6']
Current pid: 1492, pids: ['7']
But Is There an Alternate Approach?
If each process does the processing for a single batch and then terminates (those details have not been provided), the main process can be doing all the batching and providing each batch to a multiprocessing pool:
from multiprocessing import Pool, current_process
file_path = 'test.txt'
def create_batch(config):
batch_size = config["batch_size"]
pids = []
with open(file_path, "r") as f:
for line in f:
pids.append(line.strip('\n'))
if len(pids) == batch_size:
yield pids
pids = []
if pids:
yield pids
def worker(pids):
import time
print(f'Current pid: {current_process().pid}, pids: {pids}', flush=True)
time.sleep(.2) # Give other process a chance to run
def demo():
pool = Pool(2)
# lazily evaluate the iterable using imap_unordered
# as a memory-savings technique. Specify a chunksize if
# the number of batches are huge:
config = {'batch_size': 3}
pool.imap_unordered(worker, create_batch(config))
# Wait for all tasks to complete:
pool.close()
pool.join()
if __name__ == '__main__':
demo()
Prints:
Current pid: 4316, pids: ['1', '2', '3']
Current pid: 13424, pids: ['4', '5', '6']
Current pid: 4316, pids: ['7']

Related

Join timeout in multiprocessing

I have a dummy example, I want to apply multiprocessing in it. Consider a scenario where you have a stream of numbers(which I call frame) incoming one by one. And I want to assign it to any single process that is available currently. So I am creating 4 processes that are running a while loop, seeing if any element in queue, than apply function on it.
The problem is that when I join it, it gets stuck in any while loop, even though I close the while loop before it. But somehow it gets stuck inside it.
Code:
# step 1, 4 processes
import multiprocessing as mp
import os
import time
class MpListOperations:
def __init__(self):
self.results_queue = mp.Manager().Queue()
self.frames_queue = mp.Manager().Queue()
self.flag = mp.Manager().Value(typecode='b',value=True)
self.list_nums = list(range(0,5000))
def process_list(self):
print(f"Process id {os.getpid()} started")
while self.flag.value:
# print(self.flag.value)
if self.frames_queue.qsize():
self.results_queue.put(self.frames_queue.get()**2)
def create_processes(self, no_of_processes = mp.cpu_count()):
print("Creating Processes")
self.processes = [mp.Process(target=self.process_list) for _ in range(no_of_processes)]
def start_processes(self):
print(f"starting processes")
for process in self.processes:
process.start()
def join_process(self):
print("Joining Processes")
while True:
if not self.frames_queue.qsize():
self.flag.value=False
print("JOININNG HERE")
for process in self.processes:
exit_code = process.join()
print(exit_code)
print("BREAKING DONE")
break
def stream_frames(self):
print("Streaming Frames")
for frame in self.list_nums:
self.frames_queue.put(frame)
if __name__=="__main__":
start = time.time()
mp_ops = MpListOperations()
mp_ops.create_processes()
mp_ops.start_processes()
mp_ops.stream_frames()
mp_ops.join_process()
print(time.time()-start)
Now if I add a timeout parameter in join, even 0, i.e exit_code = process.join(0) it works. I want to understand in this scenario, if this code is correct, what should be the value of timeout? Why is it working with timeout and not without it? What is the proper way to implement multiprocessing with it?
If you look at the documentation for a managed queue you will see that the qsize method only returns an approximate size. I would therefore not use it for testing when all the items have been taken of the frames queue. Presumably you want to let the processes run until all frames have been processed. The simplest way I know would be to put N sentinel items on the frames queue after the actual frames have been put where N is the number of processes getting from the queue. A sentinel item is a special value that cannot be mistaken for an actual frame and signals to the process that there are no more items for it to get from the queue (i.e. a quasi end-of-file item). In this case we can use None as the sentinel items. Each process then just continues to do get operations on the queue until it sees a sentinel item and then terminates. There is therefore no need for the self.flag attribute.
Here is the updated and simplified code. I have made some other minor changes that have been commented:
import multiprocessing as mp
import os
import time
class MpListOperations:
def __init__(self):
# Only create one manager process:
manager = mp.Manager()
self.results_queue = manager.Queue()
self.frames_queue = manager.Queue()
# No need to convert range to a list:
self.list_nums = range(0, 5000)
def process_list(self):
print(f"Process id {os.getpid()} started")
while True:
frame = self.frames_queue.get()
if frame is None: # Sentinel?
# Yes, we are done:
break
self.results_queue.put(frame ** 2)
def create_processes(self, no_of_processes = mp.cpu_count()):
print("Creating Processes")
self.no_of_processes = no_of_processes
self.processes = [mp.Process(target=self.process_list) for _ in range(no_of_processes)]
def start_processes(self):
print("Starting Processes")
for process in self.processes:
process.start()
def join_processes(self):
print("Joining Processes")
for process in self.processes:
# join returns None:
process.join()
def stream_frames(self):
print("Streaming Frames")
for frame in self.list_nums:
self.frames_queue.put(frame)
# Put sentinels:
for _ in range(self.no_of_processes):
self.frames_queue.put(None)
if __name__== "__main__":
start = time.time()
mp_ops = MpListOperations()
mp_ops.create_processes()
mp_ops.start_processes()
mp_ops.stream_frames()
mp_ops.join_processes()
print(time.time()-start)
Prints:
Creating Processes
Starting Processes
Process id 28 started
Process id 29 started
Streaming Frames
Process id 33 started
Process id 31 started
Process id 38 started
Process id 44 started
Process id 42 started
Process id 45 started
Joining Processes
2.3660173416137695
Note for Windows
I have modified method start_processes to temporarily set attribute self.processes to None:
def start_processes(self):
print("Starting Processes")
processes = self.processes
# Don't try to pickle list of processes:
self.processes = None
for process in processes:
process.start()
# Restore attribute:
self.processes = processes
Otherwise under Windows we get a pickle error trying to serialize/deserialize a list of processes containing two or more multiprocessing.Process instances. The error is "TypeError: cannot pickle 'weakref' object." This can be demonstrated with the following code where we first try to pickle a list of 1 process and then a list of 2 processes:
import multiprocessing as mp
import os
class Foo:
def __init__(self, number_of_processes):
self.processes = [mp.Process(target=self.worker) for _ in range(number_of_processes)]
self.start_processes()
self.join_processes()
def start_processes(self):
processes = self.processes
for process in self.processes:
process.start()
def join_processes(self):
for process in self.processes:
process.join()
def worker(self):
print(f"Process id {os.getpid()} started")
print(f"Process id {os.getpid()} ended")
if __name__== "__main__":
foo = Foo(1)
foo = Foo(2)
Prints:
Process id 7540 started
Process id 7540 ended
Traceback (most recent call last):
File "C:\Booboo\test\test.py", line 26, in <module>
foo = Foo(2)
File "C:\Booboo\test\test.py", line 7, in __init__
self.start_processes()
File "C:\Booboo\test\test.py", line 13, in start_processes
process.start()
File "C:\Program Files\Python38\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python38\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'weakref' object
Process id 18152 started
Process id 18152 ended
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
The target loop is stuck in the get() method of your loop. This is because multiple processes could see that the queue wasn't empty, but only 1 of them was able to get the last item. The remaining processes are waiting for the next item to be available from the queue.
You might need to add a Lock when you are reading the size of the Queue object And getting the object of that queue.
Or alternatively, you avoid reading the size of the queue by simply using the queue.get() method with a timeout that allows us to check the flag regularly
import queue
TIMEOUT = 1 # seconds
class MpListOperations:
#[...]
def process_list(self):
print(f"Process id {os.getpid()} started")
previous = self.flag.value
while self.flag.value:
try:
got = self.frames_queue.get(timeout=TIMEOUT)
except queue.Empty:
pass
else:
print(f"Gotten {got}")
self.results_queue.put(got**2)
_next = self.flag.value
if previous != _next:
print(f"Flag change: {_next}")
$ python ./test_mp.py
Creating Processes
starting processes
Process id 36566 started
Streaming Frames
Process id 36565 started
Process id 36564 started
Process id 36570 started
Process id 36567 started
Gotten 0
Process id 36572 started
Gotten 1
Gotten 2
Gotten 3
Process id 36579 started
Gotten 4
Gotten 5
Gotten 6
Process id 36583 started
Gotten 7
# [...]
Gotten 4997
Joining Processes
Gotten 4998
Gotten 4999
JOININNG HERE
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
BREAKING DONE
1.4375360012054443
Alternatively, using a multiprocessing.Pool object:
def my_func(arg):
time.sleep(0.002)
return arg**2
def get_input():
for i in range(5000):
yield i
time.sleep(0.001)
if __name__=="__main__":
start = time.time()
mp_pool = mp.Pool()
result = mp_pool.map(my_func, get_input())
mp_pool.close()
mp_pool.join()
print(len(result))
print(f"Duration: {time.time()-start}")
Giving:
$ python ./test_mp.py
5000
Duration: 6.847279787063599

processing very large text files in parallel using multiprocessing and threading

I have found several other questions that touch on this topic but none that are quite like my situation.
I have several very large text files (3+ gigabytes in size).
I would like to process them (say 2 documents) in parallel using multiprocessing. As part of my processing (within a single process) I need to make an API call and because of this would like to have each process have it's own threads to run asynchronously.
I have came up with a simplified example ( I have commented the code to try to explain what I think it should be doing):
import multiprocessing
from threading import Thread
import threading
from queue import Queue
import time
def process_huge_file(*, file_, batch_size=250, num_threads=4):
# create APICaller instance for each process that has it's own Queue
api_call = APICaller()
batch = []
# create threads that will run asynchronously to make API calls
# I expect these to immediately block since there is nothing in the Queue (which is was
# the api_call.run depends on to make a call
threads = []
for i in range(num_threads):
thread = Thread(target=api_call.run)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
####
# start processing the file line by line
for line in file_:
# if we are at our batch size, add the batch to the api_call to to let the threads do
# their api calling
if i % batch_size == 0:
api_call.queue.put(batch)
else:
# add fake line to batch
batch.append(fake_line)
class APICaller:
def __init__(self):
# thread safe queue to feed the threads which point at instances
of these APICaller objects
self.queue = Queue()
def run(self):
print("waiting for something to do")
self.queue.get()
print("processing item in queue")
time.sleep(0.1)
print("finished processing item in queue")
if __name__ == "__main__":
# fake docs
fake_line = "this is a fake line of some text"
# two fake docs with line length == 1000
fake_docs = [[fake_line] * 1000 for i in range(2)]
####
num_processes = 2
procs = []
for idx, doc in enumerate(fake_docs):
proc = multiprocessing.Process(target=process_huge_file, kwargs=dict(file_=doc))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
As the code is now, "waiting for something to do" prints 8 times (makes sense 4 threads per process) and then it stops or "deadlocks" which is not what I expect - I expect it to start sharing time with the threads as soon as I start putting items in the Queue but the code does not appear to make it this far. I ordinarily would step through to find a hang up but I still don't have a solid understanding of how to best debug using Threads (another topic for another day).
In the meantime, can someone help me figure out why my code is not doing what it should be doing?
I have made a few adjustments and additions and the code appears to do what it is supposed to now. The main adjustments are: adding a CloseableQueue class (from Brett Slatkins Effective Python Item 55), and ensuring that I call close and join on the queue so that the threads properly exit. Full code with these changes below:
import multiprocessing
from threading import Thread
import threading
from queue import Queue
import time
from concurrency_utils import CloseableQueue
def sync_process_huge_file(*, file_, batch_size=250):
batch = []
for idx, line in enumerate(file_):
# do processing on the text
if idx % batch_size == 0:
time.sleep(0.1)
batch = []
# api_call.queue.put(batch)
else:
computation = 0
for i in range(100000):
computation += i
batch.append(line)
def process_huge_file(*, file_, batch_size=250, num_threads=4):
api_call = APICaller()
batch = []
# api call threads
threads = []
for i in range(num_threads):
thread = Thread(target=api_call.run)
threads.append(thread)
thread.start()
for idx, line in enumerate(file_):
# do processing on the text
if idx % batch_size == 0:
api_call.queue.put(batch)
else:
computation = 0
for i in range(100000):
computation += i
batch.append(line)
for _ in threads:
api_call.queue.close()
api_call.queue.join()
for thread in threads:
thread.join()
class APICaller:
def __init__(self):
self.queue = CloseableQueue()
def run(self):
for item in self.queue:
print("waiting for something to do")
pass
print("processing item in queue")
time.sleep(0.1)
print("finished processing item in queue")
print("exiting run")
if __name__ == "__main__":
# fake docs
fake_line = "this is a fake line of some text"
# two fake docs with line length == 1000
fake_docs = [[fake_line] * 10000 for i in range(2)]
####
time_s = time.time()
num_processes = 2
procs = []
for idx, doc in enumerate(fake_docs):
proc = multiprocessing.Process(target=process_huge_file, kwargs=dict(file_=doc))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
time_e = time.time()
print(f"took {time_e-time_s} ")
class CloseableQueue(Queue):
SENTINEL = object()
def __init__(self, **kwargs):
super().__init__(**kwargs)
def close(self):
self.put(self.SENTINEL)
def __iter__(self):
while True:
item = self.get()
try:
if item is self.SENTINEL:
return # exit thread
yield item
finally:
self.task_done()
As expected this is a great speedup from running synchronously - 120 seconds vs 50 seconds.

Python multiprocessing and too many open files

I have problem with multiprocessing in python. In code below I call 7 workers (multiprocessing.Process) and one result threading.Thread. Before and after processing of data (extracting some metadata from files), I run:
lsof | grep ' <user> ' | grep 'python3'
And I get some open handles as:
python3 17291 ivo DEL REG 0,20 5288943 /dev/shm/ZMcs2H
python3 17291 ivo DEL REG 0,20 5288942 /dev/shm/3iMR4q
python3 17291 ivo DEL REG 0,20 5288941 /dev/shm/XPYh79
and when running multiprocessing many times in loop (processing some continuous messages) I get
OSError: [Errno 24] Too many open files
Is there something wrong with dealing with multiprocessing package?
def worker_process_results(meta_queue, res_dict):
while True:
try:
(path, meta) = meta_queue.get()
res_dict[path] = meta
finally:
meta_queue.task_done()
def multiprocess_get_metadata(paths, thread_count = 7):
""" Scan files for metadata (multiprocessing). """
file_queue = multiprocessing.JoinableQueue()
meta_queue = multiprocessing.JoinableQueue()
res_dict = dict()
# result thread
meta_thread = threading.Thread(target = lambda: worker_process_results(meta_queue, res_dict))
meta_thread.daemon = True
meta_thread.start()
workers = []
for _ in range(0, min(thread_count, len(paths))):
worker = MetaDataWorker(file_queue, meta_queue)
worker.daemon = True
worker.start()
workers.append(worker)
for path in paths:
file_queue.put(path)
file_queue.join()
meta_queue.join()
for x in workers:
x.terminate()
return res_dict
class MetaDataWorker(multiprocessing.Process):
''' Use library to get meta data from file. '''
def __init__(self, file_queue, meta_queue):
''' Constructor. '''
super().__init__()
self.file_queue = file_queue
self.meta_queue = meta_queue
def run(self):
""" Run. """
while True:
try:
path = self.file_queue.get()
meta = getmetadata(path)
meta = None
self.meta_queue.put((path, meta))
except Exception as err:
print("Thread end.")
print("{0}".format(err))
finally:
self.file_queue.task_done()
Already solved, I needed to send some ending signals to workers and result thread to stop never-ending loop

Multiprocessing Queue.get() hangs

I'm trying to implement basic multiprocessing and I've run into an issue. The python script is attached below.
import time, sys, random, threading
from multiprocessing import Process
from Queue import Queue
from FrequencyAnalysis import FrequencyStore, AnalyzeFrequency
append_queue = Queue(10)
database = FrequencyStore()
def add_to_append_queue(_list):
append_queue.put(_list)
def process_append_queue():
while True:
item = append_queue.get()
database.append(item)
print("Appended to database in %.4f seconds" % database.append_time)
append_queue.task_done()
return
def main():
database.load_db()
print("Database loaded in %.4f seconds" % database.load_time)
append_queue_process = Process(target=process_append_queue)
append_queue_process.daemon = True
append_queue_process.start()
#t = threading.Thread(target=process_append_queue)
#t.daemon = True
#t.start()
while True:
path = raw_input("file: ")
if path == "exit":
break
a = AnalyzeFrequency(path)
a.analyze()
print("Analyzed file in %.4f seconds" % a._time)
add_to_append_queue(a.get_results())
append_queue.join()
#append_queue_process.join()
database.save_db()
print("Database saved in %.4f seconds" % database.save_time)
sys.exit(0)
if __name__=="__main__":
main()
The AnalyzeFrequency analyzes the frequencies of words in a file and get_results() returns a sorted list of said words and frequencies. The list is very large, perhaps 10000 items.
This list is then passed to the add_to_append_queue method which adds it to a queue. The process_append_queue takes the items one by one and adds the frequencies to a "database". This operation takes a bit longer than the actual analysis in main() so I am trying to use a seperate process for this method. When I try and do this with the threading module, everything works perfectly fine, no errors. When I try and use Process, the script hangs at item = append_queue.get().
Could someone please explain what is happening here, and perhaps direct me toward a fix?
All answers appreciated!
UPDATE
The pickle error was my fault, it was just a typo. Now I am using the Queue class within multiprocessing but the append_queue.get() method still hangs.
NEW CODE
import time, sys, random
from multiprocessing import Process, Queue
from FrequencyAnalysis import FrequencyStore, AnalyzeFrequency
append_queue = Queue()
database = FrequencyStore()
def add_to_append_queue(_list):
append_queue.put(_list)
def process_append_queue():
while True:
database.append(append_queue.get())
print("Appended to database in %.4f seconds" % database.append_time)
return
def main():
database.load_db()
print("Database loaded in %.4f seconds" % database.load_time)
append_queue_process = Process(target=process_append_queue)
append_queue_process.daemon = True
append_queue_process.start()
#t = threading.Thread(target=process_append_queue)
#t.daemon = True
#t.start()
while True:
path = raw_input("file: ")
if path == "exit":
break
a = AnalyzeFrequency(path)
a.analyze()
print("Analyzed file in %.4f seconds" % a._time)
add_to_append_queue(a.get_results())
#append_queue.join()
#append_queue_process.join()
print str(append_queue.qsize())
database.save_db()
print("Database saved in %.4f seconds" % database.save_time)
sys.exit(0)
if __name__=="__main__":
main()
UPDATE 2
This is the database code:
class FrequencyStore:
def __init__(self):
self.sorter = Sorter()
self.db = {}
self.load_time = -1
self.save_time = -1
self.append_time = -1
self.sort_time = -1
def load_db(self):
start_time = time.time()
try:
file = open("results.txt", 'r')
except:
raise IOError
self.db = {}
for line in file:
word, count = line.strip("\n").split("=")
self.db[word] = int(count)
file.close()
self.load_time = time.time() - start_time
def save_db(self):
start_time = time.time()
_db = []
for key in self.db:
_db.append([key, self.db[key]])
_db = self.sort(_db)
try:
file = open("results.txt", 'w')
except:
raise IOError
file.truncate(0)
for x in _db:
file.write(x[0] + "=" + str(x[1]) + "\n")
file.close()
self.save_time = time.time() - start_time
def create_sorted_db(self):
_temp_db = []
for key in self.db:
_temp_db.append([key, self.db[key]])
_temp_db = self.sort(_temp_db)
_temp_db.reverse()
return _temp_db
def get_db(self):
return self.db
def sort(self, _list):
start_time = time.time()
_list = self.sorter.mergesort(_list)
_list.reverse()
self.sort_time = time.time() - start_time
return _list
def append(self, _list):
start_time = time.time()
for x in _list:
if x[0] not in self.db:
self.db[x[0]] = x[1]
else:
self.db[x[0]] += x[1]
self.append_time = time.time() - start_time
Comments suggest you're trying to run this on Windows. As I said in a comment,
If you're running this on Windows, it can't work - Windows doesn't
have fork(), so each process gets its own Queue and they have nothing
to do with each other. The entire module is imported "from scratch" by
each process on Windows. You'll need to create the Queue in main(),
and pass it as an argument to the worker function.
Here's fleshing out what you need to do to make it portable, although I removed all the database stuff because it's irrelevant to the problems you've described so far. I also removed the daemon fiddling, because that's usually just a lazy way to avoid shutting down things cleanly, and often as not will come back to bite you later:
def process_append_queue(append_queue):
while True:
x = append_queue.get()
if x is None:
break
print("processed %d" % x)
print("worker done")
def main():
import multiprocessing as mp
append_queue = mp.Queue(10)
append_queue_process = mp.Process(target=process_append_queue, args=(append_queue,))
append_queue_process.start()
for i in range(100):
append_queue.put(i)
append_queue.put(None) # tell worker we're done
append_queue_process.join()
if __name__=="__main__":
main()
The output is the "obvious" stuff:
processed 0
processed 1
processed 2
processed 3
processed 4
...
processed 96
processed 97
processed 98
processed 99
worker done
Note: because Windows doesn't (can't) fork(), it's impossible for worker processes to inherit any Python object on Windows. Each process runs the entire program from its start. That's why your original program couldn't work: each process created its own Queue, wholly unrelated to the Queue in the other process. In the approach shown above, only the main process creates a Queue, and the main process passes it (as an argument) to the worker process.
queue.Queue is thread-safe, but doesn't work across processes. This is quite easy to fix, though. Instead of:
from multiprocessing import Process
from Queue import Queue
You want:
from multiprocessing import Process, Queue

process stop working while queue is not empty

I try to write a script in python to convert url into its corresponding ip. Since the url file is huge (nearly 10GB), so I'm trying to use multiprocessing lib.
I create one process to write output to file and a set of processes to convert url.
Here is my code:
import multiprocessing as mp
import socket
import time
num_processes = mp.cpu_count()
sentinel = None
def url2ip(inqueue, output):
v_url = inqueue.get()
print 'v_url '+v_url
try:
v_ip = socket.gethostbyname(v_url)
output_string = v_url+'|||'+v_ip+'\n'
except:
output_string = v_url+'|||-1'+'\n'
print 'output_string '+output_string
output.put(output_string)
print output.full()
def handle_output(output):
f_ip = open("outputfile", "a")
while True:
output_v = output.get()
if output_v:
print 'output_v '+output_v
f_ip.write(output_v)
else:
break
f_ip.close()
if __name__ == '__main__':
output = mp.Queue()
inqueue = mp.Queue()
jobs = []
proc = mp.Process(target=handle_output, args=(output, ))
proc.start()
print 'run in %d processes' % num_processes
for i in range(num_processes):
p = mp.Process(target=url2ip, args=(inqueue, output))
jobs.append(p)
p.start()
for line in open('inputfile','r'):
print 'ori '+line.strip()
inqueue.put(line.strip())
for i in range(num_processes):
# Send the sentinal to tell Simulation to end
inqueue.put(sentinel)
for p in jobs:
p.join()
output.put(None)
proc.join()
However, it did not work. It did produce several outputs (4 out of 10 urls in the test file) but it just suddenly stops while queues are not empty (I did check queue.empty())
Could anyone suggest what's wrong?Thanks
You're workers exit after processing a single url each, they need to loop internally until they get the sentinel. However, you should probably just look at multiprocessing.pool instead, as that does the bookkeeping for you.

Categories

Resources