Many threads to write log file at same time in Python - python

I am writing a script to retrieve WMI info from many computers at the same time then write this info in a text file:
f = open("results.txt", 'w+') ## to clean the results file before the start
def filesize(asset):
f = open("results.txt", 'a+')
c = wmi.WMI(asset)
wql = 'SELECT FileSize,Name FROM CIM_DataFile where (Drive="D:" OR Drive="E:") and Caption like "%file%"'
for item in c.query(wql):
print >> f, item.Name.split("\\")[2].strip().upper(), str(item.FileSize)
class myThread (threading.Thread):
def __init__(self,name):
threading.Thread.__init__(self)
self.name = name
def run(self):
pythoncom.CoInitialize ()
print "Starting " + self.name
filesize(self.name)
print "Exiting " + self.name
thread1 = myThread('10.24.2.31')
thread2 = myThread('10.24.2.32')
thread3 = myThread('10.24.2.33')
thread4 = myThread('10.24.2.34')
thread1.start()
thread2.start()
thread3.start()
thread4.start()
The problem is that all threads writing at the same time.

You can simply create your own locking mechanism to ensure that only one thread is ever writing to a file.
import threading
lock = threading.Lock()
def write_to_file(f, text, file_size):
lock.acquire() # thread blocks at this line until it can obtain lock
# in this section, only one thread can be present at a time.
print >> f, text, file_size
lock.release()
def filesize(asset):
f = open("results.txt", 'a+')
c = wmi.WMI(asset)
wql = 'SELECT FileSize,Name FROM CIM_DataFile where (Drive="D:" OR Drive="E:") and Caption like "%file%"'
for item in c.query(wql):
write_to_file(f, item.Name.split("\\")[2].strip().upper(), str(item.FileSize))
You may want to consider placing the lock around the entire for loop for item in c.query(wql): to allow each thread to do a larger chunk of work before releasing the lock.

print is not thread safe. Use the logging module instead (which is):
import logging
import threading
import time
FORMAT = '[%(levelname)s] (%(threadName)-10s) %(message)s'
logging.basicConfig(level=logging.DEBUG,
format=FORMAT)
file_handler = logging.FileHandler('results.log')
file_handler.setFormatter(logging.Formatter(FORMAT))
logging.getLogger().addHandler(file_handler)
def worker():
logging.info('Starting')
time.sleep(2)
logging.info('Exiting')
t1 = threading.Thread(target=worker)
t2 = threading.Thread(target=worker)
t1.start()
t2.start()
Output (and contents of results.log):
[INFO] (Thread-1 ) Starting
[INFO] (Thread-2 ) Starting
[INFO] (Thread-1 ) Exiting
[INFO] (Thread-2 ) Exiting
Instead of using the default name (Thread-n), you can set your own name using the name keyword argument, which the %(threadName) formatting directive then will then use:
t = threading.Thread(name="My worker thread", target=worker)
(This example was adapted from an example from Doug Hellmann's excellent article about the threading module)

For another solution, use a Pool to calculate data, returning it to the parent process. This parent then writes all data to a file. Since there's only one proc writing to the file at a time, there's no need for additional locking.
Note the following uses a pool of processes, not threads. This makes the code much simpler and easier than putting something together using the threading module. (There is a ThreadPool object, but it's not documented.)
source
import glob, os, time
from multiprocessing import Pool
def filesize(path):
time.sleep(0.1)
return (path, os.path.getsize(path))
paths = glob.glob('*.py')
pool = Pool() # default: proc per CPU
with open("results.txt", 'w+') as dataf:
for (apath, asize) in pool.imap_unordered(
filesize, paths,
):
print >>dataf, apath,asize
output in results.txt
zwrap.py 122
usercustomize.py 38
tpending.py 2345
msimple4.py 385
parse2.py 499

Related

Deadlock in Python's multiprocessing upon early termination

I'm creating a multiprocessing.Queue in Python and adding multiprocessing.Process instances to this Queue.
I would like to add a function call that is executed after every job, which checks if a specific task has succeeded. If so, I would like to empty the Queue and terminate execution.
My Process class is:
class Worker(multiprocessing.Process):
def __init__(self, queue, check_success=None, directory=None, permit_nonzero=False):
super(Worker, self).__init__()
self.check_success = check_success
self.directory = directory
self.permit_nonzero = permit_nonzero
self.queue = queue
def run(self):
for job in iter(self.queue.get, None):
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
# Terminate all remaining jobs here
pass
And my Queue is setup here:
class LocalJobServer(object):
#staticmethod
def sub(command, check_success=None, directory=None, nproc=1, permit_nonzero=False, time=None, *args, **kwargs):
if check_success and not callable(check_success):
msg = "check_success option requires a callable function/object: {0}".format(check_success)
raise ValueError(msg)
# Create a new queue
queue = multiprocessing.Queue()
# Create workers equivalent to the number of jobs
workers = []
for _ in range(nproc):
wp = Worker(queue, check_success=check_success, directory=directory, permit_nonzero=permit_nonzero)
wp.start()
workers.append(wp)
# Add each command to the queue
for cmd in command:
queue.put(cmd, timeout=time)
# Stop workers from exiting without completion
for _ in range(nproc):
queue.put(None)
for wp in workers:
wp.join()
The function call mbkit.dispatch.cexectools.cexec() is a wrapper around subprocess.Popen and returns p.stdout.
In the Worker class, I've written the conditional to check if a job succeeded, and tried emptying the remaining jobs in the Queue using a while loop, i.e. my Worker.run() function looked like this:
def run(self):
for job in iter(self.queue.get, None):
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
break
while not self.queue.empty():
self.queue.get()
Although this works sometimes, it usually deadlocks and my only option is to Ctrl-C. I am aware that .empty() is unreliable, thus my question.
Any advice on how I can implement such an early termination functionality?
You do not have a deadlock here. It is just linked to the behavior of multiprocessing.Queue, as the get method is blocking by default. Thus when you call get on an empty queue, the call stall, waiting for the next element to be ready. You can see that some of your workers will stall because when you use your loop while not self.queue.empty() to empty it, you remove all the None sentinel and some of your workers will block on the empty Queue, like in this code:
from multiprocessing import Queue
q = Queue()
for e in iter(q.get, None):
print(e)
To be notified when the queue is empty, you need to use non blocking call. You can for instance use q.get_nowait, or use a timeout in q.get(timeout=1). Both throw a multiprocessing.queues.Empty exception when the queue is empty. So you should replace your Worker for job in iter(...): loop by something like:
while not queue.empty():
try:
job = queue.get(timeout=.1)
except multiprocessing.queues.Empty:
continue
# Do stuff with your job
If you do not want to be stuck at any point.
For the synchronization part, I would recommend using a synchronization primitive such as multiprocessing.Condition or an multiprocessing.Event. This is cleaner than the Value are they are design for this purpose. Something like this should help
def run(self):
while not queue.empty():
try:
job = queue.get(timeout=.1)
except multiprocessing.queues.Empty:
continue
if self.event.is_set():
continue
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
self.event.set()
print("Worker {} terminated cleanly".format(self.name))
with event = multiprocessing.Event().
Note that it is also possible to use a multiprocessing.Pool to get avoid dealing with the queue and the workers. But as you need some synchronization primitive, it might be a bit more complicated to set up. Something like this should work:
def worker(job, success, check_success=None, directory=None, permit_nonzero=False):
if sucess.is_set():
return False
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
success.set()
return True
# ......
# In the class LocalJobServer
# .....
def sub(command, check_success=None, directory=None, nproc=1, permit_nonzero=False):
mgr = multiprocessing.Manager()
success = mgr.Event()
pool = multiprocessing.Pool(nproc)
run_args = [(cmd, success, check_success, directory, permit_nonzero)]
result = pool.starmap(worker, run_args)
pool.close()
pool.join()
Note here that I use a Manager as you cannot pass multiprocessing.Event directly as arguments. You could also use the arguments initializer and initargs of the Pool to initiate global success event in each worker and avoid relying on the Manager but it is slightly more complicated.
This might not be the optimal solution, and any other suggestion is much appreciated, but I managed to solve the problem as such:
class Worker(multiprocessing.Process):
"""Simple manual worker class to execute jobs in the queue"""
def __init__(self, queue, success, check_success=None, directory=None, permit_nonzero=False):
super(Worker, self).__init__()
self.check_success = check_success
self.directory = directory
self.permit_nonzero = permit_nonzero
self.success = success
self.queue = queue
def run(self):
"""Method representing the process's activity"""
for job in iter(self.queue.get, None):
if self.success.value:
continue
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
self.success.value = int(True)
time.sleep(1)
class LocalJobServer(object):
"""A local server to execute jobs via the multiprocessing module"""
#staticmethod
def sub(command, check_success=None, directory=None, nproc=1, permit_nonzero=False, time=None, *args, **kwargs):
if check_success and not callable(check_success):
msg = "check_success option requires a callable function/object: {0}".format(check_success)
raise ValueError(msg)
# Create a new queue
queue = multiprocessing.Queue()
success = multiprocessing.Value('i', int(False))
# Create workers equivalent to the number of jobs
workers = []
for _ in range(nproc):
wp = Worker(queue, success, check_success=check_success, directory=directory, permit_nonzero=permit_nonzero)
wp.start()
workers.append(wp)
# Add each command to the queue
for cmd in command:
queue.put(cmd)
# Stop workers from exiting without completion
for _ in range(nproc):
queue.put(None)
# Start the workers
for wp in workers:
wp.join(time)
Basically I'm creating a Value and providing that to each Process. Once a job is marked as successful, this variable gets updated. Each Process checks in if self.success.value: continue whether we have a success and if so, just iterates over the remaining jobs in the Queue until empty.
The time.sleep(1) call is required to account for potential syncing delays amongst the processes. This is certainly not the most efficient approach but it works.

python threading running process in backend

I was trying to make some text report file from some data source which takes enormous time and to simulate this I wrote the following code
I planned to do it using thread and thought t.daemon = True would
solve the purpose, but the program doesn't exit till the operation is
complete
import random
import threading
import time
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def worker():
"""thread worker function"""
t = threading.currentThread()
tag = random.randint(1, 64)
file_name = "/tmp/t-%d.txt" % (tag)
logging.debug('started writing file - %s', file_name)
f = open(file_name, 'w')
for x in xrange(2 ** tag): # total no of lines is 2**tag
f.write("%d\n" % x)
logging.debug('ending')
f.close()
return
# to simulate 5 files
for i in range(5):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is main_thread:
continue
logging.debug('joining %s', t.getName())
t.join()
When I removed t.join() then some of the data written till program exits and the program
exits quickly, but adding t.join() keeps program running till end. Is there any way to exit from program but the
process should still be running to complete the task in backend.
You aren't looking for a daemon. In fact you want to make sure your process isn't a daemon because it will get killed once that's all that's left and your program exists. You are looking to detach your thread.
Note: lowered max to 28 in case I forgot to kill processes (and so it won't take my entire disk). You will need to kill each process individually if you want them to stop! ie "kill 13345" if you had the message "exiting main 13345" (where that thread is over 2**25)
Also note: thread joining will keep going until the end because your program is not done running and is waiting to join the threads.
Here's what you want:
import logging
import random
import multiprocessing
import time
import sys
#Make sure you don't write to stdout after this program stopped running and sub-processes are logging!
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def detach():
p = multiprocessing.current_process()
name = "worker" + str(p.pid)
cc = multiprocessing.Process(name=name, target=worker)
cc.daemon = False
cc.start()
logging.debug('Detached process: %s %s', p.name, p.pid)
sys.stdout.flush()
def worker():
"""thread worker function"""
#Should probably make sure there isn't already a thread processing this file already...
tag = random.randint(5, 33) #Stop at 33 to make sure we don't take over the harddrive (8GB)
file_name = "/tmp/t-%d.txt" % (tag)
if tag > 26:
logging.warning('\n\nThe detached process resulting from this may need to be killed by hand.\n')
logging.debug('started writing file - %s', file_name)
#Changed your code to use "with", available in any recent python version
with open(file_name, 'w') as f:
for x in xrange(2 ** tag): # total no of lines is 2**tag
f.write("%d\n" % x)
return
#Stackoverflow: Keep scrolling to see more code!
# to simulate 5 files
for i in range(5):
t = multiprocessing.Process(target=detach)
t.daemon = False
t.start()
time.sleep(0.5)
t.terminate()
logging.debug("Terminating main program")

Python - appending to same file from multiple threads

I'm writing an app that appends lines to the same file from multiple threads.
I have a problem in which some lines are appended without a new line.
Any solution for this?
class PathThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def printfiles(self, p):
for path, dirs, files in os.walk(p):
for f in files:
print(f, file=output)
def run(self):
while True:
path = self.queue.get()
self.printfiles(path)
self.queue.task_done()
pathqueue = Queue.Queue()
paths = getThisFromSomeWhere()
output = codecs.open('file', 'a')
# spawn threads
for i in range(0, 5):
t = PathThread(pathqueue)
t.setDaemon(True)
t.start()
# add paths to queue
for path in paths:
pathqueue.put(path)
# wait for queue to get empty
pathqueue.join()
The solution is to write to the file in one thread only.
import Queue # or queue in Python 3
import threading
class PrintThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def printfiles(self, p):
for path, dirs, files in os.walk(p):
for f in files:
print(f, file=output)
def run(self):
while True:
result = self.queue.get()
self.printfiles(result)
self.queue.task_done()
class ProcessThread(threading.Thread):
def __init__(self, in_queue, out_queue):
threading.Thread.__init__(self)
self.in_queue = in_queue
self.out_queue = out_queue
def run(self):
while True:
path = self.in_queue.get()
result = self.process(path)
self.out_queue.put(result)
self.in_queue.task_done()
def process(self, path):
# Do the processing job here
pathqueue = Queue.Queue()
resultqueue = Queue.Queue()
paths = getThisFromSomeWhere()
output = codecs.open('file', 'a')
# spawn threads to process
for i in range(0, 5):
t = ProcessThread(pathqueue, resultqueue)
t.setDaemon(True)
t.start()
# spawn threads to print
t = PrintThread(resultqueue)
t.setDaemon(True)
t.start()
# add paths to queue
for path in paths:
pathqueue.put(path)
# wait for queue to get empty
pathqueue.join()
resultqueue.join()
the fact that you never see jumbled text on the same line or new lines in the middle of a line is a clue that you actually dont need to syncronize appending to the file. the problem is that you use print to write to a single file handle. i suspect print is actually doing 2 operations to the file handle in one call and those operations are racing between the threads. basically print is doing something like:
file_handle.write('whatever_text_you_pass_it')
file_handle.write(os.linesep)
and because different threads are doing this simultaneously on the same file handle sometimes one thread will get in the first write and the other thread will then get in its first write and then you'll get two carriage returns in a row. or really any permutation of these.
the simplest way to get around this is to stop using print and just use write directly. try something like this:
output.write(f + os.linesep)
this still seems dangerous to me. im not sure what gaurantees you can expect with all the threads using the same file handle object and contending for its internal buffer. personally id side step the whole issue and just have every thread get its own file handle. also note that this works because the default for write buffer flushes is line-buffered, so when it does a flush to the file it ends on an os.linesep. to force it to use line-buffered send a 1 as the third argument of open. you can test it out like this:
#!/usr/bin/env python
import os
import sys
import threading
def hello(file_name, message, count):
with open(file_name, 'a', 1) as f:
for i in range(0, count):
f.write(message + os.linesep)
if __name__ == '__main__':
#start a file
with open('some.txt', 'w') as f:
f.write('this is the beginning' + os.linesep)
#make 10 threads write a million lines to the same file at the same time
threads = []
for i in range(0, 10):
threads.append(threading.Thread(target=hello, args=('some.txt', 'hey im thread %d' % i, 1000000)))
threads[-1].start()
for t in threads:
t.join()
#check what the heck the file had
uniq_lines = set()
with open('some.txt', 'r') as f:
for l in f:
uniq_lines.add(l)
for u in uniq_lines:
sys.stdout.write(u)
The output looks like this:
hey im thread 6
hey im thread 7
hey im thread 9
hey im thread 8
hey im thread 3
this is the beginning
hey im thread 5
hey im thread 4
hey im thread 1
hey im thread 0
hey im thread 2
And maybe some more newlines where they shouldn't be?
You should have in mind the fact that a shared resource should not be accessed by more than one thread at a time or otherwise unpredictable consequences might happen (it's called using 'atomic operations' while using threads).
Take a look at this page for a little intuition: Thread Synchronization Mechanisms in Python

Using Python's Multiprocessing module to execute simultaneous and separate SEAWAT/MODFLOW model runs

I'm trying to complete 100 model runs on my 8-processor 64-bit Windows 7 machine. I'd like to run 7 instances of the model concurrently to decrease my total run time (approx. 9.5 min per model run). I've looked at several threads pertaining to the Multiprocessing module of Python, but am still missing something.
Using the multiprocessing module
How to spawn parallel child processes on a multi-processor system?
Python Multiprocessing queue
My Process:
I have 100 different parameter sets I'd like to run through SEAWAT/MODFLOW to compare the results. I have pre-built the model input files for each model run and stored them in their own directories. What I'd like to be able to do is have 7 models running at a time until all realizations have been completed. There needn't be communication between processes or display of results. So far I have only been able to spawn the models sequentially:
import os,subprocess
import multiprocessing as mp
ws = r'D:\Data\Users\jbellino\Project\stJohnsDeepening\model\xsec_a'
files = []
for f in os.listdir(ws + r'\fieldgen\reals'):
if f.endswith('.npy'):
files.append(f)
## def work(cmd):
## return subprocess.call(cmd, shell=False)
def run(f,def_param=ws):
real = f.split('_')[2].split('.')[0]
print 'Realization %s' % real
mf2k = r'c:\modflow\mf2k.1_19\bin\mf2k.exe '
mf2k5 = r'c:\modflow\MF2005_1_8\bin\mf2005.exe '
seawatV4 = r'c:\modflow\swt_v4_00_04\exe\swt_v4.exe '
seawatV4x64 = r'c:\modflow\swt_v4_00_04\exe\swt_v4x64.exe '
exe = seawatV4x64
swt_nam = ws + r'\reals\real%s\ss\ss.nam_swt' % real
os.system( exe + swt_nam )
if __name__ == '__main__':
p = mp.Pool(processes=mp.cpu_count()-1) #-leave 1 processor available for system and other processes
tasks = range(len(files))
results = []
for f in files:
r = p.map_async(run(f), tasks, callback=results.append)
I changed the if __name__ == 'main': to the following in hopes it would fix the lack of parallelism I feel is being imparted on the above script by the for loop. However, the model fails to even run (no Python error):
if __name__ == '__main__':
p = mp.Pool(processes=mp.cpu_count()-1) #-leave 1 processor available for system and other processes
p.map_async(run,((files[f],) for f in range(len(files))))
Any and all help is greatly appreciated!
EDIT 3/26/2012 13:31 EST
Using the "Manual Pool" method in #J.F. Sebastian's answer below I get parallel execution of my external .exe. Model realizations are called up in batches of 8 at a time, but it doesn't wait for those 8 runs to complete before calling up the next batch and so on:
from __future__ import print_function
import os,subprocess,sys
import multiprocessing as mp
from Queue import Queue
from threading import Thread
def run(f,ws):
real = f.split('_')[-1].split('.')[0]
print('Realization %s' % real)
seawatV4x64 = r'c:\modflow\swt_v4_00_04\exe\swt_v4x64.exe '
swt_nam = ws + r'\reals\real%s\ss\ss.nam_swt' % real
subprocess.check_call([seawatV4x64, swt_nam])
def worker(queue):
"""Process files from the queue."""
for args in iter(queue.get, None):
try:
run(*args)
except Exception as e: # catch exceptions to avoid exiting the
# thread prematurely
print('%r failed: %s' % (args, e,), file=sys.stderr)
def main():
# populate files
ws = r'D:\Data\Users\jbellino\Project\stJohnsDeepening\model\xsec_a'
wdir = os.path.join(ws, r'fieldgen\reals')
q = Queue()
for f in os.listdir(wdir):
if f.endswith('.npy'):
q.put_nowait((os.path.join(wdir, f), ws))
# start threads
threads = [Thread(target=worker, args=(q,)) for _ in range(8)]
for t in threads:
t.daemon = True # threads die if the program dies
t.start()
for _ in threads: q.put_nowait(None) # signal no more files
for t in threads: t.join() # wait for completion
if __name__ == '__main__':
mp.freeze_support() # optional if the program is not frozen
main()
No error traceback is available. The run() function performs its duty when called upon a single model realization file as with mutiple files. The only difference is that with multiple files, it is called len(files) times though each of the instances immediately closes and only one model run is allowed to finish at which time the script exits gracefully (exit code 0).
Adding some print statements to main() reveals some information about active thread-counts as well as thread status (note that this is a test on only 8 of the realization files to make the screenshot more manageable, theoretically all 8 files should be run concurrently, however the behavior continues where they are spawn and immediately die except one):
def main():
# populate files
ws = r'D:\Data\Users\jbellino\Project\stJohnsDeepening\model\xsec_a'
wdir = os.path.join(ws, r'fieldgen\test')
q = Queue()
for f in os.listdir(wdir):
if f.endswith('.npy'):
q.put_nowait((os.path.join(wdir, f), ws))
# start threads
threads = [Thread(target=worker, args=(q,)) for _ in range(mp.cpu_count())]
for t in threads:
t.daemon = True # threads die if the program dies
t.start()
print('Active Count a',threading.activeCount())
for _ in threads:
print(_)
q.put_nowait(None) # signal no more files
for t in threads:
print(t)
t.join() # wait for completion
print('Active Count b',threading.activeCount())
**The line which reads "D:\\Data\\Users..." is the error information thrown when I manually stop the model from running to completion. Once I stop the model running, the remaining thread status lines get reported and the script exits.
EDIT 3/26/2012 16:24 EST
SEAWAT does allow concurrent execution as I've done this in the past, spawning instances manually using iPython and launching from each model file folder. This time around, I'm launching all model runs from a single location, namely the directory where my script resides. It looks like the culprit may be in the way SEAWAT is saving some of the output. When SEAWAT is run, it immediately creates files pertaining to the model run. One of these files is not being saved to the directory in which the model realization is located, but in the top directory where the script is located. This is preventing any subsequent threads from saving the same file name in the same location (which they all want to do since these filenames are generic and non-specific to each realization). The SEAWAT windows were not staying open long enough for me to read or even see that there was an error message, I only realized this when I went back and tried to run the code using iPython which directly displays the printout from SEAWAT instead of opening a new window to run the program.
I am accepting #J.F. Sebastian's answer as it is likely that once I resolve this model-executable issue, the threading code he has provided will get me where I need to be.
FINAL CODE
Added cwd argument in subprocess.check_call to start each instance of SEAWAT in its own directory. Very key.
from __future__ import print_function
import os,subprocess,sys
import multiprocessing as mp
from Queue import Queue
from threading import Thread
import threading
def run(f,ws):
real = f.split('_')[-1].split('.')[0]
print('Realization %s' % real)
seawatV4x64 = r'c:\modflow\swt_v4_00_04\exe\swt_v4x64.exe '
cwd = ws + r'\reals\real%s\ss' % real
swt_nam = ws + r'\reals\real%s\ss\ss.nam_swt' % real
subprocess.check_call([seawatV4x64, swt_nam],cwd=cwd)
def worker(queue):
"""Process files from the queue."""
for args in iter(queue.get, None):
try:
run(*args)
except Exception as e: # catch exceptions to avoid exiting the
# thread prematurely
print('%r failed: %s' % (args, e,), file=sys.stderr)
def main():
# populate files
ws = r'D:\Data\Users\jbellino\Project\stJohnsDeepening\model\xsec_a'
wdir = os.path.join(ws, r'fieldgen\reals')
q = Queue()
for f in os.listdir(wdir):
if f.endswith('.npy'):
q.put_nowait((os.path.join(wdir, f), ws))
# start threads
threads = [Thread(target=worker, args=(q,)) for _ in range(mp.cpu_count()-1)]
for t in threads:
t.daemon = True # threads die if the program dies
t.start()
for _ in threads: q.put_nowait(None) # signal no more files
for t in threads: t.join() # wait for completion
if __name__ == '__main__':
mp.freeze_support() # optional if the program is not frozen
main()
I don't see any computations in the Python code. If you just need to execute several external programs in parallel it is sufficient to use subprocess to run the programs and threading module to maintain constant number of processes running, but the simplest code is using multiprocessing.Pool:
#!/usr/bin/env python
import os
import multiprocessing as mp
def run(filename_def_param):
filename, def_param = filename_def_param # unpack arguments
... # call external program on `filename`
def safe_run(*args, **kwargs):
"""Call run(), catch exceptions."""
try: run(*args, **kwargs)
except Exception as e:
print("error: %s run(*%r, **%r)" % (e, args, kwargs))
def main():
# populate files
ws = r'D:\Data\Users\jbellino\Project\stJohnsDeepening\model\xsec_a'
workdir = os.path.join(ws, r'fieldgen\reals')
files = ((os.path.join(workdir, f), ws)
for f in os.listdir(workdir) if f.endswith('.npy'))
# start processes
pool = mp.Pool() # use all available CPUs
pool.map(safe_run, files)
if __name__=="__main__":
mp.freeze_support() # optional if the program is not frozen
main()
If there are many files then pool.map() could be replaced by for _ in pool.imap_unordered(safe_run, files): pass.
There is also mutiprocessing.dummy.Pool that provides the same interface as multiprocessing.Pool but uses threads instead of processes that might be more appropriate in this case.
You don't need to keep some CPUs free. Just use a command that starts your executables with a low priority (on Linux it is a nice program).
ThreadPoolExecutor example
concurrent.futures.ThreadPoolExecutor would be both simple and sufficient but it requires 3rd-party dependency on Python 2.x (it is in the stdlib since Python 3.2).
#!/usr/bin/env python
import os
import concurrent.futures
def run(filename, def_param):
... # call external program on `filename`
# populate files
ws = r'D:\Data\Users\jbellino\Project\stJohnsDeepening\model\xsec_a'
wdir = os.path.join(ws, r'fieldgen\reals')
files = (os.path.join(wdir, f) for f in os.listdir(wdir) if f.endswith('.npy'))
# start threads
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
future_to_file = dict((executor.submit(run, f, ws), f) for f in files)
for future in concurrent.futures.as_completed(future_to_file):
f = future_to_file[future]
if future.exception() is not None:
print('%r generated an exception: %s' % (f, future.exception()))
# run() doesn't return anything so `future.result()` is always `None`
Or if we ignore exceptions raised by run():
from itertools import repeat
... # the same
# start threads
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
executor.map(run, files, repeat(ws))
# run() doesn't return anything so `map()` results can be ignored
subprocess + threading (manual pool) solution
#!/usr/bin/env python
from __future__ import print_function
import os
import subprocess
import sys
from Queue import Queue
from threading import Thread
def run(filename, def_param):
... # define exe, swt_nam
subprocess.check_call([exe, swt_nam]) # run external program
def worker(queue):
"""Process files from the queue."""
for args in iter(queue.get, None):
try:
run(*args)
except Exception as e: # catch exceptions to avoid exiting the
# thread prematurely
print('%r failed: %s' % (args, e,), file=sys.stderr)
# start threads
q = Queue()
threads = [Thread(target=worker, args=(q,)) for _ in range(8)]
for t in threads:
t.daemon = True # threads die if the program dies
t.start()
# populate files
ws = r'D:\Data\Users\jbellino\Project\stJohnsDeepening\model\xsec_a'
wdir = os.path.join(ws, r'fieldgen\reals')
for f in os.listdir(wdir):
if f.endswith('.npy'):
q.put_nowait((os.path.join(wdir, f), ws))
for _ in threads: q.put_nowait(None) # signal no more files
for t in threads: t.join() # wait for completion
Here is my way to maintain the minimum x number of threads in the memory. Its an combination of threading and multiprocessing modules. It may be unusual to other techniques like respected fellow members have explained above BUT may be worth considerable. For the sake of explanation, I am taking a scenario of crawling a minimum of 5 websites at a time.
so here it is:-
#importing dependencies.
from multiprocessing import Process
from threading import Thread
import threading
# Crawler function
def crawler(domain):
# define crawler technique here.
output.write(scrapeddata + "\n")
pass
Next is threadController function. This function will control the flow of threads to the main memory. It will keep activating the threads to maintain the threadNum "minimum" limit ie. 5. Also it won't exit until, all Active threads(acitveCount) are finished up.
It will maintain a minimum of threadNum(5) startProcess function threads (these threads will eventually start the Processes from the processList while joining them with a time out of 60 seconds). After staring threadController, there would be 2 threads which are not included in the above limit of 5 ie. the Main thread and the threadController thread itself. thats why threading.activeCount() != 2 has been used.
def threadController():
print "Thread count before child thread starts is:-", threading.activeCount(), len(processList)
# staring first thread. This will make the activeCount=3
Thread(target = startProcess).start()
# loop while thread List is not empty OR active threads have not finished up.
while len(processList) != 0 or threading.activeCount() != 2:
if (threading.activeCount() < (threadNum + 2) and # if count of active threads are less than the Minimum AND
len(processList) != 0): # processList is not empty
Thread(target = startProcess).start() # This line would start startThreads function as a seperate thread **
startProcess function, as a separate thread, would start Processes from the processlist. The purpose of this function (**started as a different thread) is that It would become a parent thread for Processes. So when It will join them with a timeout of 60 seconds, this would stop the startProcess thread to move ahead but this won't stop threadController to perform. So this way, threadController will work as required.
def startProcess():
pr = processList.pop(0)
pr.start()
pr.join(60.00) # joining the thread with time out of 60 seconds as a float.
if __name__ == '__main__':
# a file holding a list of domains
domains = open("Domains.txt", "r").read().split("\n")
output = open("test.txt", "a")
processList = [] # thread list
threadNum = 5 # number of thread initiated processes to be run at one time
# making process List
for r in range(0, len(domains), 1):
domain = domains[r].strip()
p = Process(target = crawler, args = (domain,))
processList.append(p) # making a list of performer threads.
# starting the threadController as a seperate thread.
mt = Thread(target = threadController)
mt.start()
mt.join() # won't let go next until threadController thread finishes.
output.close()
print "Done"
Besides maintaining a minimum number of threads in the memory, my aim was to also have something which could avoid stuck threads or processes in the memory. I did this using the time out function.
My apologies for any typing mistake.
I hope this construction would help anyone in this world.
Regards,
Vikas Gautam

python interprocess querying/control

I have this Python based service daemon which is doing a lot of multiplexed IO (select).
From another script (also Python) I want to query this service daemon about status/information and/or control the processing (e.g. pause it, shut it down, change some parameters, etc).
What is the best way to send control messages ("from now on you process like this!") and query processed data ("what was the result of that?") using python?
I read somewhere that named pipes might work, but don't know that much about named pipes, especially in python - and whether there are any better alternatives.
Both the background service daemon AND the frontend will be programmed by me, so all options are open :)
I am using Linux.
Pipes and Named pipes are good solution to communicate between different processes.
Pipes work like shared memory buffer but has an interface that mimics a simple file on each of two ends. One process writes data on one end of the pipe, and another reads that data on the other end.
Named pipes are similar to above , except that this pipe is actually associated with a real file in your computer.
More details at
http://www.softpanorama.org/Scripting/pipes.shtml
In Python, named pipe files are created with the os.mkfifo call
x = os.mkfifo(filename)
In child and parent open this pipe as file
out = os.open(filename, os.O_WRONLY)
in = open(filename, 'r')
To write
os.write(out, 'xxxx')
To read
lines = in.readline( )
Edit: Adding links from SO
Create a temporary FIFO (named pipe) in Python?
https://stackoverflow.com/search?q=python+named+pipes
You may want to read more on "IPC and Python"
http://www.freenetpages.co.uk/hp/alan.gauld/tutipc.htm
The best way to do IPC is using message Queue in python as bellow
server process server.py (run this before running client.py and interact.py)
from multiprocessing.managers import BaseManager
import Queue
queue1 = Queue.Queue()
queue2 = Queue.Queue()
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1', callable=lambda:queue1)
QueueManager.register('get_queue2', callable=lambda:queue2)
m = QueueManager(address=('', 50000), authkey='abracadabra')
s = m.get_server()
s.serve_forever()
The inter-actor which is for I/O interact.py
from multiprocessing.managers import BaseManager
import threading
import sys
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1')
QueueManager.register('get_queue2')
m = QueueManager(address=('localhost', 50000),authkey='abracadabra')
m.connect()
queue1 = m.get_queue1()
queue2 = m.get_queue2()
def read():
while True:
sys.stdout.write(queue2.get())
def write():
while True:
queue1.put(sys.stdin.readline())
threads = []
threadr = threading.Thread(target=read)
threadr.start()
threads.append(threadr)
threadw = threading.Thread(target=write)
threadw.start()
threads.append(threadw)
for thread in threads:
thread.join()
The client program Client.py
from multiprocessing.managers import BaseManager
import sys
import string
import os
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1')
QueueManager.register('get_queue2')
m = QueueManager(address=('localhost', 50000), authkey='abracadabra')
m.connect()
queue1 = m.get_queue1()
queue2 = m.get_queue2()
class RedirectOutput:
def __init__(self, stdout):
self.stdout = stdout
def write(self, s):
queue2.put(s)
class RedirectInput:
def __init__(self, stdin):
self.stdin = stdin
def readline(self):
return queue1.get()
# redirect standard output
sys.stdout = RedirectOutput(sys.stdout)
sys.stdin = RedirectInput(sys.stdin)
# The test program which will take input and produce output
Text=raw_input("Enter Text:")
print "you have entered:",Text
def x():
while True:
x= raw_input("Enter 'exit' to end and some thing else to continue")
print x
if 'exit' in x:
break
x()
this can be used to communicate between two process in network or on same machine
remember that inter-actor and server process will not terminate until you manually kill it.

Categories

Resources