I have following code:
import multiprocessing
import time
import os
# WHEN SEMAPHORE IS DEFINED HERE THEN IT IT WORKS
semaphore = multiprocessing.Semaphore(1)
def producer(num, output):
semaphore.acquire()
time.sleep(1)
element = "PROCESS: %d PID: %d PPID: %d" % (num, os.getpid(), os.getppid())
print "WRITE -> " + element
output.put(element)
time.sleep(1)
semaphore.release()
if __name__ == '__main__':
"""
Reads elements as soon as they are are put inside queue
"""
output = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool(4)
lst = range(40)
# WHEN SEMAPHORE IS DEFINED HERE THEN IT DOES NOT WORKS
# semaphore = multiprocessing.Semaphore(1)
for i in lst:
pool.apply_async(producer, (i, output))
# print "%d Do not wait!" % i
# res.get()
counter = 0
while True:
try:
print "READ <- " + output.get_nowait()
counter += 1
if (counter == len(lst)):
print "Break"
break
except:
print "READ <- NOTHING IN BUFFER"
pass
time.sleep(1)
This code is working as expected and it prints:
READ <- NOTHING IN BUFFER
WRITE -> PROCESS: 0 PID: 15803 PPID: 15798
READ <- NOTHING IN BUFFER
READ <- PROCESS: 0 PID: 15803 PPID: 15798
READ <- NOTHING IN BUFFER
WRITE -> PROCESS: 1 PID: 15806 PPID: 15798
READ <- PROCESS: 1 PID: 15806 PPID: 15798
...
Then I have this version which is not working (It is basically the same as first one except the definition of semaphore is in another place):
import multiprocessing
import time
import os
# WHEN SEMAPHORE IS DEFINED HERE THEN IT IT WORKS
# semaphore = multiprocessing.Semaphore(1)
def producer(num, output):
print hex(id(semaphore))
semaphore.acquire()
time.sleep(1)
element = "PROCESS: %d PID: %d PPID: %d" % (num, os.getpid(), os.getppid())
print "WRITE -> " + element
output.put(element)
time.sleep(1)
semaphore.release()
if __name__ == '__main__':
"""
Reads elements as soon as they are are put inside queue
"""
output = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool(4)
lst = range(40)
# WHEN SEMAPHORE IS DEFINED HERE THEN IT DOES NOT WORKS
semaphore = multiprocessing.Semaphore(1)
for i in lst:
pool.apply_async(producer, (i, output))
# print "%d Do not wait!" % i
# res.get()
counter = 0
while True:
try:
print "READ <- " + output.get_nowait()
counter += 1
if (counter == len(lst)):
print "Break"
break
except:
print "READ <- NOTHING IN BUFFER"
pass
time.sleep(1)
This version prints:
READ <- NOTHING IN BUFFER
READ <- NOTHING IN BUFFER
READ <- NOTHING IN BUFFER
READ <- NOTHING IN BUFFER
READ <- NOTHING IN BUFFER
READ <- NOTHING IN BUFFER
READ <- NOTHING IN BUFFER
...
It seems like if producer never writes anything to Queue. I've read somewhere that apply_sync does not print error messages. So I've changed pool.apply_async(producer, (i, output)) to pool.apply(producer, (i, output)) in second code, to see what is going on. It seems that semaphore is not defined, here is the output:
Traceback (most recent call last):
File "glob_var_wrong.py", line 31, in <module>
pool.apply(producer, (i, output))
File "/usr/lib/python2.7/multiprocessing/pool.py", line 244, in apply
return self.apply_async(func, args, kwds).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
NameError: global name 'semaphore' is not defined
However following code runs correctly and print 10 (value defined inside __main__):
global_var = 20
def print_global_var():
print global_var
if __name__ == '__main__':
global_var = 10
print_global_var()
It seems that in this code global variable can be defined inside __main__ while in previous codes it is not possible. First I was assuming that variables defined inside __main__ are not shared between processes but it only affects semaphore and not output, pool, lst. Why is this happening?
When you create a new process using Multiprocessing.Process (used underneath the hood by Pool, it copies the local scope, pickles it, and sends it to a new process to evaluate.
Because you did not define the variable semaphore before calling Pool(4), the variable is undefined (in those OTHER processes where the code gets evaluated) and the function producer will throw an exception.
To see this, change the definition
def producer(num, output):
print hex(id(semaphore))
try:
semaphore.acquire()
except Exception as e:
print e
time.sleep(1)
element = "PROCESS: %d PID: %d PPID: %d" % (num, os.getpid(), os.getppid())
print "WRITE -> " + element
output.put(element)
time.sleep(1)
semaphore.release()
and now your failing code will print out a bunch (40) of errors that looks like
global name 'semaphore' is not defined
This is why semaphore has to be defined BEFORE calling Pool
It is because you execute the code on Windows.
You will get the expected results on Linux.
That's the difference between fork and spawn.
Related
I create 3 processes and want the function wirte1 to write value 'A,B,C' to queue1 ,and function read1 read value from queue1 and put it to queue2 ,in the same time, function read2 read value from queue2, but value B,C can't read from queue2 in time and the process finished.
from multiprocessing import Process, Queue,Manager,Pool,Lock
import os, time, random
#向队列1写数据
def write1(q1,lock):
lock.acquire()
for value in ['A', 'B', 'C']:
print ('Put %s to queue111...%s' % (value,str(os.getpid())))
q1.put(value)
time.sleep(1)
lock.release()
#从队列1读取数据并写入队列2
def read1(q1,q2,lock):
lock.acquire()
while True:
time.sleep(1)
value=q1.get()
# if value is None:break
print('Get %s from queue111.%s' % (value,str(os.getpid())))
q2.put(value)
print('Put %s to queue222...%s' % (value,str(os.getpid())))
lock.release()
def read2(q2,lock):
lock.acquire()
while True:
# if not q2.empty() or not q1.empty():
time.sleep(2)
value=q2.get(True)
print('Get %s from queue222.%s' % (value,os.getpid()))
lock.release()
if __name__=='__main__':
manager = Manager()
# 父进程创建Queue,并传给各个子进程:
q1 = manager.Queue()
q2 = manager.Queue()
lock1 = manager.Lock()
lock2 = manager.Lock()
lock3 = manager.Lock()
start=time.time()
p = Pool()
# pw = p.apply_async(write1, args=(q1,lock1,))
pw = Process(target=write1,args=(q1,lock1,))
# time.sleep(0.5)
# pr = p.apply_async(read1, args=(q1,q2,lock2,))
# pr2 = p.apply_async(read2, args=(q2,lock3))
pr=Process(target=read1,args=(q1,q2,lock2,))
pr2 = Process(target=read2,args=(q2,lock3,))
pw.start()
pr.start()
pr2.start()
# p.close()
# p.join()
pw.join()
pr.terminate()
pr2.terminate()
end=time.time()
# print
print('finished!!')
print(end-start)
the output is:
Put A to queue111...77678 Put B to queue111...77678 Get A from queue111.77680 Put A to queue222...77680 Put C to queue111...77678 Get A from queue222.77681 Get B from queue111.77680 Put B to queue222...77680 Get C from queue111.77680 Put C to queue222...77680 finished!! 3.025238275527954
You can’t use terminate to control a system like this: it races with completing the actual work. Instead, make your loops not be infinite, probably by using a sentinel value in each Queue (as in one commented-out line).
I have the following Python script which I use to ping a list of IPs passed on the command line.
#! /usr/bin/python
import sys, time
from threading import Thread
import subprocess
from Queue import Queue
num_threads = 255
queue = Queue()
p=0
f=0
t=0
def timestamp():
lt = time.localtime(time.time())
return "%02d.%02d.%04d %02d:%02d:%02d" % (lt[2], lt[1], lt[0], lt[3], lt[4], lt[5])
def pinger(i, q):
global p
global f
while True:
ip = q.get()
ret = subprocess.call("ping -c 2 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
print(ip+"\tpassed")
time.sleep(0.1)
p+=1
else:
print(ip+"\tfailed")
time.sleep(0.1)
f+=1
q.task_done()
for i in range(num_threads):
worker = Thread(target=pinger, args=(i, queue))
worker.setDaemon(True)
worker.start()
print("\nStarted at "+timestamp())
for ip in open(sys.argv[1]).readlines():
ip=ip.strip()
queue.put(ip)
t+=1
queue.join()
print(str(t) + " IPs pinged, " + str(p) + " passed, " + str(f) + " failed\n")
print("\nFinished at "+timestamp())
For the most part the output is perfect like this, tab separated ready for Excel
192.168.188.1 failed
192.168.199.107 passed
192.168.7.2 passed
192.168.199.108 failed
But every time, and at random, I get problems with the output for example odd indent on a line
192.168.164.173 failed
192.168.164.190 failed
Or odd indent in one field - here the IP is indented but not the message
172.29.19.132 failed
or concatenation followed by a blank line
172.29.9.37 passed172.29.19.133 passed
172.29.9.39 passed
And combinations of these not always the same IPs.
I've ensured the ip list is clean, and tried reducing the thread count, introducing delays to printing, and flushing STDOUT to no avail. Any ideas please?
I hope that all child processes finished, and then main process exit, but it can not exit, why?
#!/usr/bin/env python
# coding=utf-8
import os
from multiprocessing import Manager
from multiprocessing import Pool
def write_file_name_to_queue(q, src_folder):
print('Process to write: %s' % os.getpid())
if not os.path.exists(src_folder):
print "Please input folder path"
return
for (dirpath, dirnames, filelist) in os.walk(src_folder):
for name in filelist:
if name[0] == '.':
continue
q.put(os.path.join(dirpath, name))
def read_file_name_from_queue(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__ == "__main__":
mg = Manager()
q = mg.Queue()
p = Pool()
p.apply_async(func=write_file_name_to_queue, args=(q, "./test/"))
for i in xrange(8):
p.apply_async(func=read_file_name_from_queue, args=(q,))
p.close()
p.join()
Run it and get the follow result:
➜ check python check_process.py
Process to write: 3918
Process to read: 3919
Process to read: 3920
Get ./test/a from queue.
Get ./test/b from queue.
Get ./test/c from queue.
Get ./test/e from queue.
Get ./test/f from queue.
Process to read: 3921
Process to read: 3918
The process still waits.
I'm trying to implement basic multiprocessing and I've run into an issue. The python script is attached below.
import time, sys, random, threading
from multiprocessing import Process
from Queue import Queue
from FrequencyAnalysis import FrequencyStore, AnalyzeFrequency
append_queue = Queue(10)
database = FrequencyStore()
def add_to_append_queue(_list):
append_queue.put(_list)
def process_append_queue():
while True:
item = append_queue.get()
database.append(item)
print("Appended to database in %.4f seconds" % database.append_time)
append_queue.task_done()
return
def main():
database.load_db()
print("Database loaded in %.4f seconds" % database.load_time)
append_queue_process = Process(target=process_append_queue)
append_queue_process.daemon = True
append_queue_process.start()
#t = threading.Thread(target=process_append_queue)
#t.daemon = True
#t.start()
while True:
path = raw_input("file: ")
if path == "exit":
break
a = AnalyzeFrequency(path)
a.analyze()
print("Analyzed file in %.4f seconds" % a._time)
add_to_append_queue(a.get_results())
append_queue.join()
#append_queue_process.join()
database.save_db()
print("Database saved in %.4f seconds" % database.save_time)
sys.exit(0)
if __name__=="__main__":
main()
The AnalyzeFrequency analyzes the frequencies of words in a file and get_results() returns a sorted list of said words and frequencies. The list is very large, perhaps 10000 items.
This list is then passed to the add_to_append_queue method which adds it to a queue. The process_append_queue takes the items one by one and adds the frequencies to a "database". This operation takes a bit longer than the actual analysis in main() so I am trying to use a seperate process for this method. When I try and do this with the threading module, everything works perfectly fine, no errors. When I try and use Process, the script hangs at item = append_queue.get().
Could someone please explain what is happening here, and perhaps direct me toward a fix?
All answers appreciated!
UPDATE
The pickle error was my fault, it was just a typo. Now I am using the Queue class within multiprocessing but the append_queue.get() method still hangs.
NEW CODE
import time, sys, random
from multiprocessing import Process, Queue
from FrequencyAnalysis import FrequencyStore, AnalyzeFrequency
append_queue = Queue()
database = FrequencyStore()
def add_to_append_queue(_list):
append_queue.put(_list)
def process_append_queue():
while True:
database.append(append_queue.get())
print("Appended to database in %.4f seconds" % database.append_time)
return
def main():
database.load_db()
print("Database loaded in %.4f seconds" % database.load_time)
append_queue_process = Process(target=process_append_queue)
append_queue_process.daemon = True
append_queue_process.start()
#t = threading.Thread(target=process_append_queue)
#t.daemon = True
#t.start()
while True:
path = raw_input("file: ")
if path == "exit":
break
a = AnalyzeFrequency(path)
a.analyze()
print("Analyzed file in %.4f seconds" % a._time)
add_to_append_queue(a.get_results())
#append_queue.join()
#append_queue_process.join()
print str(append_queue.qsize())
database.save_db()
print("Database saved in %.4f seconds" % database.save_time)
sys.exit(0)
if __name__=="__main__":
main()
UPDATE 2
This is the database code:
class FrequencyStore:
def __init__(self):
self.sorter = Sorter()
self.db = {}
self.load_time = -1
self.save_time = -1
self.append_time = -1
self.sort_time = -1
def load_db(self):
start_time = time.time()
try:
file = open("results.txt", 'r')
except:
raise IOError
self.db = {}
for line in file:
word, count = line.strip("\n").split("=")
self.db[word] = int(count)
file.close()
self.load_time = time.time() - start_time
def save_db(self):
start_time = time.time()
_db = []
for key in self.db:
_db.append([key, self.db[key]])
_db = self.sort(_db)
try:
file = open("results.txt", 'w')
except:
raise IOError
file.truncate(0)
for x in _db:
file.write(x[0] + "=" + str(x[1]) + "\n")
file.close()
self.save_time = time.time() - start_time
def create_sorted_db(self):
_temp_db = []
for key in self.db:
_temp_db.append([key, self.db[key]])
_temp_db = self.sort(_temp_db)
_temp_db.reverse()
return _temp_db
def get_db(self):
return self.db
def sort(self, _list):
start_time = time.time()
_list = self.sorter.mergesort(_list)
_list.reverse()
self.sort_time = time.time() - start_time
return _list
def append(self, _list):
start_time = time.time()
for x in _list:
if x[0] not in self.db:
self.db[x[0]] = x[1]
else:
self.db[x[0]] += x[1]
self.append_time = time.time() - start_time
Comments suggest you're trying to run this on Windows. As I said in a comment,
If you're running this on Windows, it can't work - Windows doesn't
have fork(), so each process gets its own Queue and they have nothing
to do with each other. The entire module is imported "from scratch" by
each process on Windows. You'll need to create the Queue in main(),
and pass it as an argument to the worker function.
Here's fleshing out what you need to do to make it portable, although I removed all the database stuff because it's irrelevant to the problems you've described so far. I also removed the daemon fiddling, because that's usually just a lazy way to avoid shutting down things cleanly, and often as not will come back to bite you later:
def process_append_queue(append_queue):
while True:
x = append_queue.get()
if x is None:
break
print("processed %d" % x)
print("worker done")
def main():
import multiprocessing as mp
append_queue = mp.Queue(10)
append_queue_process = mp.Process(target=process_append_queue, args=(append_queue,))
append_queue_process.start()
for i in range(100):
append_queue.put(i)
append_queue.put(None) # tell worker we're done
append_queue_process.join()
if __name__=="__main__":
main()
The output is the "obvious" stuff:
processed 0
processed 1
processed 2
processed 3
processed 4
...
processed 96
processed 97
processed 98
processed 99
worker done
Note: because Windows doesn't (can't) fork(), it's impossible for worker processes to inherit any Python object on Windows. Each process runs the entire program from its start. That's why your original program couldn't work: each process created its own Queue, wholly unrelated to the Queue in the other process. In the approach shown above, only the main process creates a Queue, and the main process passes it (as an argument) to the worker process.
queue.Queue is thread-safe, but doesn't work across processes. This is quite easy to fix, though. Instead of:
from multiprocessing import Process
from Queue import Queue
You want:
from multiprocessing import Process, Queue
I try to write a script in python to convert url into its corresponding ip. Since the url file is huge (nearly 10GB), so I'm trying to use multiprocessing lib.
I create one process to write output to file and a set of processes to convert url.
Here is my code:
import multiprocessing as mp
import socket
import time
num_processes = mp.cpu_count()
sentinel = None
def url2ip(inqueue, output):
v_url = inqueue.get()
print 'v_url '+v_url
try:
v_ip = socket.gethostbyname(v_url)
output_string = v_url+'|||'+v_ip+'\n'
except:
output_string = v_url+'|||-1'+'\n'
print 'output_string '+output_string
output.put(output_string)
print output.full()
def handle_output(output):
f_ip = open("outputfile", "a")
while True:
output_v = output.get()
if output_v:
print 'output_v '+output_v
f_ip.write(output_v)
else:
break
f_ip.close()
if __name__ == '__main__':
output = mp.Queue()
inqueue = mp.Queue()
jobs = []
proc = mp.Process(target=handle_output, args=(output, ))
proc.start()
print 'run in %d processes' % num_processes
for i in range(num_processes):
p = mp.Process(target=url2ip, args=(inqueue, output))
jobs.append(p)
p.start()
for line in open('inputfile','r'):
print 'ori '+line.strip()
inqueue.put(line.strip())
for i in range(num_processes):
# Send the sentinal to tell Simulation to end
inqueue.put(sentinel)
for p in jobs:
p.join()
output.put(None)
proc.join()
However, it did not work. It did produce several outputs (4 out of 10 urls in the test file) but it just suddenly stops while queues are not empty (I did check queue.empty())
Could anyone suggest what's wrong?Thanks
You're workers exit after processing a single url each, they need to loop internally until they get the sentinel. However, you should probably just look at multiprocessing.pool instead, as that does the bookkeeping for you.