(New to Python and OO - I apologize in advance if I'm being stupid here)
I'm trying to define a Python 3 class such that when an instance is created two subprocesses are also created. These subprocesses do some work in the background (sending and listening for UDP packets). The subprocesses also need to communicate with each other and with the instance (updating instance attributes based on what is received from UDP, among other things).
I am creating my subprocesses with os.fork because I don't understand how to use the subprocess module to send multiple file descriptors to child processes - maybe this is part of my problem.
The problem I am running into is how to kill the child processes when the instance is destroyed. My understanding is I shouldn't use destructors in Python because stuff should get cleaned up and garbage collected automatically by Python. In any case, the following code leaves the children running after it exits.
What is the right approach here?
import os
from time import sleep
class A:
def __init__(self):
sfp, pts = os.pipe() # senderFromParent, parentToSender
pfs, stp = os.pipe() # parentFromSender, senderToParent
pfl, ltp = os.pipe() # parentFromListener, listenerToParent
sfl, lts = os.pipe() # senderFromListener, listenerToSender
pid = os.fork()
if pid:
# parent
os.close(sfp)
os.close(stp)
os.close(lts)
os.close(ltp)
os.close(sfl)
self.pts = os.fdopen(pts, 'w') # allow creator of A inst to
self.pfs = os.fdopen(pfs, 'r') # send and receive messages
self.pfl = os.fdopen(pfl, 'r') # to/from sender and
else: # listener processes
# sender or listener
os.close(pts)
os.close(pfs)
os.close(pfl)
pid = os.fork()
if pid:
# sender
os.close(ltp)
os.close(lts)
sender(self, sfp, stp, sfl)
else:
# listener
os.close(stp)
os.close(sfp)
os.close(sfl)
listener(self, ltp, lts)
def sender(a, sfp, stp, sfl):
sfp = os.fdopen(sfp, 'r') # receive messages from parent
stp = os.fdopen(stp, 'w') # send messages to parent
sfl = os.fdopen(sfl, 'r') # received messages from listener
while True:
# send UDP packets based on messages from parent and process
# responses from listener (some responses passed back to parent)
print("Sender alive")
sleep(1)
def listener(a, ltp, lts):
ltp = os.fdopen(ltp, 'w') # send messages to parent
lts = os.fdopen(lts, 'w') # send messages to sender
while True:
# listen for and process incoming UDP packets, sending some
# to sender and some to parent
print("Listener alive")
sleep(1)
a = A()
Running the above produces:
Sender alive
Listener alive
Sender alive
Listener alive
...
Actually, you should use destructors. Python objects have a __del__ method, which is called just before the object is garbage-collected.
In your case, you should define
def __del__(self):
...
within your class A that sends the appropriate kill signals to your child processes. Don't forget to store the child PIDs in your parent process, of course.
As suggested here, you can create a child process using multiprocessing module with flag daemon=True.
Example:
from multiprocessing import Process
p = Process(target=f, args=('bob',))
p.daemon = True
p.start()
There's no point trying to reinvent the wheel. subprocess does all you want and more, though multiprocessing will simply the process, so we'll use that.
You can use multiprocessing.Pipe to create connections and can send messages back and forth between a pair of processes. You can make a pipe "duplex", so both ends can send and receive if that's what you need. You can use multiprocessing.Manager to create a shared Namespace between processes (sharing a state between listener, sender and parent). There is a warning with using multiprocessing.list, multiprocessing.dict or multiprocessing.Namespace. Any mutable object assigned to them will not see changes made to that object until it is reassigned to the managed object.
eg.
namespace.attr = {}
# change below not cascaded to other processes
namespace.attr["key"] = "value"
# force change to other processes
namespace.attr = namespace.attr
If you need to have more than one process write to the same attribute then you will need to use synchronisation to prevent concurrent modification by one processes wiping out changes made by another process.
Example code:
from multiprocessing import Process, Pipe, Manager
class Reader:
def __init__(self, writer_conn, namespace):
self.writer_conn = writer_conn
self.namespace = namespace
def read(self):
self.namespace.msgs_recv = 0
with self.writer_conn:
try:
while True:
obj = self.writer_conn.recv()
self.namespace.msgs_recv += 1
print("Reader got:", repr(obj))
except EOFError:
print("Reader has no more data to receive")
class Writer:
def __init__(self, reader_conn, namespace):
self.reader_conn = reader_conn
self.namespace = namespace
def write(self, msgs):
self.namespace.msgs_sent = 0
with self.reader_conn:
for msg in msgs:
self.reader_conn.send(msg)
self.namespace.msgs_sent += 1
def create_child_processes(reader, writer, msgs):
p_write = Process(target=Writer.write, args=(writer, msgs))
p_write.start()
# This is very important otherwise reader will hang after writer has finished.
# The order of this statement coming after p_write.start(), but after
# p_read.start() is also important. Look up file descriptors and how they
# are inherited by child processes on Unix and how a any valid fd to the
# write side of a pipe will keep all read ends open
writer.reader_conn.close()
p_read = Process(target=Reader.read, args=(reader,))
p_read.start()
return p_read, p_write
def run_mp_pipe():
manager = Manager()
namespace = manager.Namespace()
read_conn, write_conn = Pipe()
reader = Reader(read_conn, namespace)
writer = Writer(write_conn, namespace)
p_read, p_write = create_child_processes(reader, writer,
msgs=["hello", "world", {"key", "value"}])
print("starting")
p_write.join()
p_read.join()
print("done")
print(namespace)
assert namespace.msgs_sent == namespace.msgs_recv
if __name__ == "__main__":
run_mp_pipe()
Output:
starting
Reader got: 'hello'
Reader got: 'world'
Reader got: {'key', 'value'}
Reader has no more data to receive
done
Namespace(msgs_recv=3, msgs_sent=3)
Related
I have 4 different Python custom objects and an events queue. Each obect has a method that allows it to retrieve an event from the shared events queue, process it if the type is the desired one and then puts a new event on the same events queue, allowing other processes to process it.
Here's an example.
import multiprocessing as mp
class CustomObject:
def __init__(events_queue: mp.Queue) -> None:
self.events_queue = event_queue
def process_events_queue() -> None:
event = self.events_queue.get()
if type(event) == SpecificEventDataTypeForThisClass:
# do something and create a new_event
self.events_queue.put(new_event)
else:
self.events_queue.put(event)
# there are other methods specific to each object
These 4 objects have specific tasks to do, but they all share this same structure. Since I need to "simulate" the production condition, I want them to run all at the same time, indipendently from eachother.
Here's just an example of what I want to do, if possible.
import multiprocessing as mp
import CustomObject
if __name__ == '__main__':
events_queue = mp.Queue()
data_provider = mp.Process(target=CustomObject, args=(events_queue,))
portfolio = mp.Process(target=CustomObject, args=(events_queue,))
engine = mp.Process(target=CustomObject, args=(events_queue,))
broker = mp.Process(target=CustomObject, args=(events_queue,))
while True:
data_provider.process_events_queue()
portfolio.process_events_queue()
engine.process_events_queue()
broker.process_events_queue()
My idea is to run each object in a separate process, allowing them to communicate with events shared through the events_queue. So my question is, how can I do that?
The problem is that obj = mp.Process(target=CustomObject, args=(events_queue,)) returns a Process instance and I can't access the CustomObject methods from it. Also, is there a smarter way to achieve what I want?
Processes require a function to run, which defines what the process is actually doing. Once this function exits (and there are no non-daemon threads) the process is done. This is similar to how Python itself always executes a __main__ script.
If you do mp.Process(target=CustomObject, args=(events_queue,)) that just tells the process to call CustomObject - which instantiates it once and then is done. This is not what you want, unless the class actually performs work when instantiated - which is a bad idea for other reasons.
Instead, you must define a main function or method that handles what you need: "communicate with events shared through the events_queue". This function should listen to the queue and take action depending on the events received.
A simple implementation looks like this:
import os, time
from multiprocessing import Queue, Process
class Worker:
# separate input and output for simplicity
def __init__(self, commands: Queue, results: Queue):
self.commands = commands
self.results = results
# our main function to be run by a process
def main(self):
# each process should handle more than one command
while True:
value = self.commands.get()
# pick a well-defined signal to detect "no more work"
if value is None:
self.results.put(None)
break
# do whatever needs doing
result = self.do_stuff(value)
print(os.getpid(), ':', self, 'got', value, 'put', result)
time.sleep(0.2) # pretend we do something
# pass on more work if required
self.results.put(result)
# placeholder for what needs doing
def do_stuff(self, value):
raise NotImplementedError
This is a template for a class that just keeps on processing events. The do_stuff method must be overloaded to define what actually happens.
class AddTwo(Worker):
def do_stuff(self, value):
return value + 2
class TimesThree(Worker):
def do_stuff(self, value):
return value * 3
class Printer(Worker):
def do_stuff(self, value):
print(value)
This already defines fully working process payloads: Process(target=TimesThree(in_queue, out_queue).main) schedules the main method in a process, listening for and responding to commands.
Running this mainly requires connecting the individual components:
if __name__ == '__main__':
# bookkeeping of resources we create
processes = []
start_queue = Queue()
# connect our workers via queues
queue = start_queue
for element in (AddTwo, TimesThree, Printer):
instance = element(queue, Queue())
# we run the main method in processes
processes.append(Process(target=instance.main))
queue = instance.results
# start all processes
for process in processes:
process.start()
# send input, but do not wait for output
start_queue.put(1)
start_queue.put(248124)
start_queue.put(-256)
# send shutdown signal
start_queue.put(None)
# wait for processes to shutdown
for process in processes:
process.join()
Note that you do not need classes for this. You can also compose functions for a similar effect, as long as everything is pickle-able:
import os, time
from multiprocessing import Queue, Process
def main(commands, results, do_stuff):
while True:
value = commands.get()
if value is None:
results.put(None)
break
result = do_stuff(value)
print(os.getpid(), ':', do_stuff, 'got', value, 'put', result)
time.sleep(0.2)
results.put(result)
def times_two(value):
return value * 2
if __name__ == '__main__':
in_queue, out_queue = Queue(), Queue()
worker = Process(target=main, args=(in_queue, out_queue, times_two))
worker.start()
for message in (1, 3, 5, None):
in_queue.put(message)
while True:
reply = out_queue.get()
if reply is None:
break
print('result:', reply)
I'm creating a multiprocessing.Queue in Python and adding multiprocessing.Process instances to this Queue.
I would like to add a function call that is executed after every job, which checks if a specific task has succeeded. If so, I would like to empty the Queue and terminate execution.
My Process class is:
class Worker(multiprocessing.Process):
def __init__(self, queue, check_success=None, directory=None, permit_nonzero=False):
super(Worker, self).__init__()
self.check_success = check_success
self.directory = directory
self.permit_nonzero = permit_nonzero
self.queue = queue
def run(self):
for job in iter(self.queue.get, None):
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
# Terminate all remaining jobs here
pass
And my Queue is setup here:
class LocalJobServer(object):
#staticmethod
def sub(command, check_success=None, directory=None, nproc=1, permit_nonzero=False, time=None, *args, **kwargs):
if check_success and not callable(check_success):
msg = "check_success option requires a callable function/object: {0}".format(check_success)
raise ValueError(msg)
# Create a new queue
queue = multiprocessing.Queue()
# Create workers equivalent to the number of jobs
workers = []
for _ in range(nproc):
wp = Worker(queue, check_success=check_success, directory=directory, permit_nonzero=permit_nonzero)
wp.start()
workers.append(wp)
# Add each command to the queue
for cmd in command:
queue.put(cmd, timeout=time)
# Stop workers from exiting without completion
for _ in range(nproc):
queue.put(None)
for wp in workers:
wp.join()
The function call mbkit.dispatch.cexectools.cexec() is a wrapper around subprocess.Popen and returns p.stdout.
In the Worker class, I've written the conditional to check if a job succeeded, and tried emptying the remaining jobs in the Queue using a while loop, i.e. my Worker.run() function looked like this:
def run(self):
for job in iter(self.queue.get, None):
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
break
while not self.queue.empty():
self.queue.get()
Although this works sometimes, it usually deadlocks and my only option is to Ctrl-C. I am aware that .empty() is unreliable, thus my question.
Any advice on how I can implement such an early termination functionality?
You do not have a deadlock here. It is just linked to the behavior of multiprocessing.Queue, as the get method is blocking by default. Thus when you call get on an empty queue, the call stall, waiting for the next element to be ready. You can see that some of your workers will stall because when you use your loop while not self.queue.empty() to empty it, you remove all the None sentinel and some of your workers will block on the empty Queue, like in this code:
from multiprocessing import Queue
q = Queue()
for e in iter(q.get, None):
print(e)
To be notified when the queue is empty, you need to use non blocking call. You can for instance use q.get_nowait, or use a timeout in q.get(timeout=1). Both throw a multiprocessing.queues.Empty exception when the queue is empty. So you should replace your Worker for job in iter(...): loop by something like:
while not queue.empty():
try:
job = queue.get(timeout=.1)
except multiprocessing.queues.Empty:
continue
# Do stuff with your job
If you do not want to be stuck at any point.
For the synchronization part, I would recommend using a synchronization primitive such as multiprocessing.Condition or an multiprocessing.Event. This is cleaner than the Value are they are design for this purpose. Something like this should help
def run(self):
while not queue.empty():
try:
job = queue.get(timeout=.1)
except multiprocessing.queues.Empty:
continue
if self.event.is_set():
continue
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
self.event.set()
print("Worker {} terminated cleanly".format(self.name))
with event = multiprocessing.Event().
Note that it is also possible to use a multiprocessing.Pool to get avoid dealing with the queue and the workers. But as you need some synchronization primitive, it might be a bit more complicated to set up. Something like this should work:
def worker(job, success, check_success=None, directory=None, permit_nonzero=False):
if sucess.is_set():
return False
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
success.set()
return True
# ......
# In the class LocalJobServer
# .....
def sub(command, check_success=None, directory=None, nproc=1, permit_nonzero=False):
mgr = multiprocessing.Manager()
success = mgr.Event()
pool = multiprocessing.Pool(nproc)
run_args = [(cmd, success, check_success, directory, permit_nonzero)]
result = pool.starmap(worker, run_args)
pool.close()
pool.join()
Note here that I use a Manager as you cannot pass multiprocessing.Event directly as arguments. You could also use the arguments initializer and initargs of the Pool to initiate global success event in each worker and avoid relying on the Manager but it is slightly more complicated.
This might not be the optimal solution, and any other suggestion is much appreciated, but I managed to solve the problem as such:
class Worker(multiprocessing.Process):
"""Simple manual worker class to execute jobs in the queue"""
def __init__(self, queue, success, check_success=None, directory=None, permit_nonzero=False):
super(Worker, self).__init__()
self.check_success = check_success
self.directory = directory
self.permit_nonzero = permit_nonzero
self.success = success
self.queue = queue
def run(self):
"""Method representing the process's activity"""
for job in iter(self.queue.get, None):
if self.success.value:
continue
stdout = mbkit.dispatch.cexectools.cexec([job], directory=self.directory, permit_nonzero=self.permit_nonzero)
with open(job.rsplit('.', 1)[0] + '.log', 'w') as f_out:
f_out.write(stdout)
if callable(self.check_success) and self.check_success(job):
self.success.value = int(True)
time.sleep(1)
class LocalJobServer(object):
"""A local server to execute jobs via the multiprocessing module"""
#staticmethod
def sub(command, check_success=None, directory=None, nproc=1, permit_nonzero=False, time=None, *args, **kwargs):
if check_success and not callable(check_success):
msg = "check_success option requires a callable function/object: {0}".format(check_success)
raise ValueError(msg)
# Create a new queue
queue = multiprocessing.Queue()
success = multiprocessing.Value('i', int(False))
# Create workers equivalent to the number of jobs
workers = []
for _ in range(nproc):
wp = Worker(queue, success, check_success=check_success, directory=directory, permit_nonzero=permit_nonzero)
wp.start()
workers.append(wp)
# Add each command to the queue
for cmd in command:
queue.put(cmd)
# Stop workers from exiting without completion
for _ in range(nproc):
queue.put(None)
# Start the workers
for wp in workers:
wp.join(time)
Basically I'm creating a Value and providing that to each Process. Once a job is marked as successful, this variable gets updated. Each Process checks in if self.success.value: continue whether we have a success and if so, just iterates over the remaining jobs in the Queue until empty.
The time.sleep(1) call is required to account for potential syncing delays amongst the processes. This is certainly not the most efficient approach but it works.
Why this code
import multiprocessing
import time
class Bot(multiprocessing.Process):
def __init__(self):
self.val = 0
multiprocessing.Process.__init__(self)
def setVal(self):
self.val = 99
def run(self):
while True:
print 'IN: ', self.val
time.sleep(2)
if __name__ == '__main__':
bot = Bot()
bot.start()
bot.setVal()
while True:
print 'OUT: ', bot.val
time.sleep(2)
gives following output?
OUT: 99
IN: 0
OUT: 99
IN: 0
OUT: 99
IN: 0
OUT: 99
IN: 0
OUT: 99
IN: 0
OUT: 99
IN: 0
...
As you may guess i expect to get all 99, IN and OUT. But i do not. Why? What am i missing?
Once you've called start() on your object, the stuff inside that object is running in a separate process, and using methods of that class to "communicate" with it are not really the best way. What you need to do is called inter-process communication (IPC for short) and there is special machinery for doing it correctly.
For Python's multiprocessing module there are two mechanisms for communicating between processes: Pipe and Queue. I would suggest looking into those (e.g. here).
To use the Pipe mechanism in your example, you might do it this way (just a quick illustration) :
class Bot(multiprocessing.Process):
def __init__(self, pipe):
multiprocessing.Process.__init__(self)
self.val = 0
self.ipcPipe = pipe
def run(self):
while True:
newData = self.ipcPipe.recv()
self.val = newData[0]
print 'IN: ', self.val
self.ipcPipe.send([self.val])
time.sleep(2)
if __name__ == '__main__':
parent_conn, child_conn = multiprocessing.Pipe()
bot = Bot(child_conn)
bot.start()
value = 0
while True:
value += 1
parent_conn.send([value])
outVal = parent_conn.recv()
print 'OUT: ', outVal[0]
time.sleep(2)
See what's been done here: We create parent and child "ends" of the Pipe, and give the child end to your object. Then from the parent process you use send() to communicate a new value to the object, and recv() to get an updated value back. Likewise inside your object (a separate process, remember) you conversely use send() and recv() on the pipe's other end to communicate with the parent process.
Also, I would recommend calling Process.__init__(self) in your class __init__ method before doing any other initialization. Since you're inheriting from Process it's a good idea to make sure all the process-y stuff under the hood gets initialized correctly before you do anything in your own class.
Hope this helps.
The problem is that once you start the second process, you are printing from 2 different processes.
The parent process has the original instance of bot, with the value then set to 99. The parent process is printing OUT which is why you get the value 99 printed.
The (new) subprocess starts with state copied from the bot object as it was when you called the multiprocessing method start(). Because of this, it has a state of 0. You never call setVal in the subprocess and so it's value remains 0, and the IN prints print 0.
If you want to share state information like this between the parent process and the subprocess, have a read of this:
http://docs.python.org/2/library/multiprocessing.html#sharing-state-between-processes
bot = Bot() # creates a bot object in this process
bot.start() # creates an other process and creates a bot object there
These are two different objects.
If you want to have a look at how to share state between two processes: http://docs.python.org/2/library/multiprocessing.html#namespace-objects
Or at the pipes also proposed by DMH:
http://docs.python.org/2/library/multiprocessing.html#sharing-state-between-processes
I have two threads, Reader and Writer.
The Writer gets data from the network and sends it then over a socket to some executable. When this is done the writer should block up to 70 seconds which I specify with a Event.wait(askrate).
This should give the executable enough time to compute the result and then submit the output. If the computation is finished I used Event.set() to release the lock on the Writer
thread so that it can read the next data that is forwared to the executeable and so on.
The problem that I have is, that the Writer thread still keeps reading data while the Reader thread is waiting for the result coming through the serial interface.
Anyone an idea why this blocking meachnism is not proberly working between these two threads?
askrate = 70
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect("/tmp/demo_socket")
class Reader(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
def run(self):
while True:
nonce = s.recv(4)
if len(nonce) == 4:
submitter = Submitter(writer.block, nonce)
#submit result and release thread lock in Writer class
golden.set()
class Writer(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
def run(self):
while True:
work = bc.getwork()
self.block = work['data']
self.midstate = work['midstate']
payload = self.midstate.decode('hex') + self.block.decode('hex')
s.send(payload)
result = golden.wait(askrate)
if result:
golden.clear()
golden = Event()
reader = Reader()
writer = Writer()
reader.start()
writer.start()
I'm pretty sure that it's not how you are supposed to use AF_UNIX sockets. You are supposed to open the pseudo-file twice (from the same of different processes); then writes to one side appear as reads on the other side, and vice-versa. In your code, you open the pseudo-file only once. Any write is probably blocking, waiting for another process to open the pseudo-file a second time.
In your case, you should use socket.socketpair(), which returns you two sockets at once, playing the role of the two ends. Use one end in each thread.
I have this Python based service daemon which is doing a lot of multiplexed IO (select).
From another script (also Python) I want to query this service daemon about status/information and/or control the processing (e.g. pause it, shut it down, change some parameters, etc).
What is the best way to send control messages ("from now on you process like this!") and query processed data ("what was the result of that?") using python?
I read somewhere that named pipes might work, but don't know that much about named pipes, especially in python - and whether there are any better alternatives.
Both the background service daemon AND the frontend will be programmed by me, so all options are open :)
I am using Linux.
Pipes and Named pipes are good solution to communicate between different processes.
Pipes work like shared memory buffer but has an interface that mimics a simple file on each of two ends. One process writes data on one end of the pipe, and another reads that data on the other end.
Named pipes are similar to above , except that this pipe is actually associated with a real file in your computer.
More details at
http://www.softpanorama.org/Scripting/pipes.shtml
In Python, named pipe files are created with the os.mkfifo call
x = os.mkfifo(filename)
In child and parent open this pipe as file
out = os.open(filename, os.O_WRONLY)
in = open(filename, 'r')
To write
os.write(out, 'xxxx')
To read
lines = in.readline( )
Edit: Adding links from SO
Create a temporary FIFO (named pipe) in Python?
https://stackoverflow.com/search?q=python+named+pipes
You may want to read more on "IPC and Python"
http://www.freenetpages.co.uk/hp/alan.gauld/tutipc.htm
The best way to do IPC is using message Queue in python as bellow
server process server.py (run this before running client.py and interact.py)
from multiprocessing.managers import BaseManager
import Queue
queue1 = Queue.Queue()
queue2 = Queue.Queue()
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1', callable=lambda:queue1)
QueueManager.register('get_queue2', callable=lambda:queue2)
m = QueueManager(address=('', 50000), authkey='abracadabra')
s = m.get_server()
s.serve_forever()
The inter-actor which is for I/O interact.py
from multiprocessing.managers import BaseManager
import threading
import sys
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1')
QueueManager.register('get_queue2')
m = QueueManager(address=('localhost', 50000),authkey='abracadabra')
m.connect()
queue1 = m.get_queue1()
queue2 = m.get_queue2()
def read():
while True:
sys.stdout.write(queue2.get())
def write():
while True:
queue1.put(sys.stdin.readline())
threads = []
threadr = threading.Thread(target=read)
threadr.start()
threads.append(threadr)
threadw = threading.Thread(target=write)
threadw.start()
threads.append(threadw)
for thread in threads:
thread.join()
The client program Client.py
from multiprocessing.managers import BaseManager
import sys
import string
import os
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1')
QueueManager.register('get_queue2')
m = QueueManager(address=('localhost', 50000), authkey='abracadabra')
m.connect()
queue1 = m.get_queue1()
queue2 = m.get_queue2()
class RedirectOutput:
def __init__(self, stdout):
self.stdout = stdout
def write(self, s):
queue2.put(s)
class RedirectInput:
def __init__(self, stdin):
self.stdin = stdin
def readline(self):
return queue1.get()
# redirect standard output
sys.stdout = RedirectOutput(sys.stdout)
sys.stdin = RedirectInput(sys.stdin)
# The test program which will take input and produce output
Text=raw_input("Enter Text:")
print "you have entered:",Text
def x():
while True:
x= raw_input("Enter 'exit' to end and some thing else to continue")
print x
if 'exit' in x:
break
x()
this can be used to communicate between two process in network or on same machine
remember that inter-actor and server process will not terminate until you manually kill it.