main0 doesn't hang but main1 hangs. Why??? I thought wrapping the thing into a class should be harmless ...
The child process should simply send the msg it receives back to the main process.
Python3 code:
from multiprocessing import Process, Pipe
def child(conn):
print("child started")
while True:
msg = conn.recv()
if msg == "quit":
break
print("child recv:"+msg)
print("child sending:" + msg)
conn.send(msg)
conn.close()
print("child ended")
def main0():
parent_conn, child_conn = Pipe()
p = Process(target=child, args=(child_conn,))
p.start()
parent_conn.send("ping")
print(parent_conn.recv())
parent_conn.send("quit")
print("#parent ended#")
p.join()
class Parent(object):
def __init__(self):
self.parent_conn = None
self.child_conn = None
self.p = None
def start(self):
self.parent_conn, self.child_conn = Pipe()
self.p = Process(target=child, args=(self.child_conn,))
self.p.start() # <--- i initially missed this line
print("started")
def send(self, msg):
print("try to send: " + msg)
self.parent_conn.send(msg)
return self.parent_conn.recv()
def close(self):
self.parent_conn.send("quit")
self.p.join()
def main1():
a = Parent()
a.start()
print(a.send("ping"))
print(a.send("quit"))
a.close()
if __name__ == '__main__':
main0() # doesn't hang
main1() # hangs.
output:
~~~ main 0 ~~~
child started
child recv:ping
child sending:ping
ping
#parent ended#
child ended
~~~ main 1 ~~~
started <Process(Process-1, started)>
try to send: ping
waiting to recv
child started
child recv:ping
child sending:ping
ping
try to send: quit
waiting to recv
child ended
*still hangs ... after adding self.p.start()*
I missed self.p.start() line at start method.
Also sending quit message to child would not return any message.
The correct main1() should be:
def main1():
a = Parent()
a.start()
print(a.send("ping"))
a.close()
Then this is fixed.
Related
I'm trying to run a "long-running" process as root (because I have to), in a thread, in python, then kill it, and then access to its output.
The process in question is "babeld", and when I just launch it in a terminal, it outputs text on stdout. Yet, when I run the following code, I do not have any access to stderr or stdout:
% ./example.py
Waiting for output
Pgid: 13445, pid: 13445
Stopping task
Permission Error!
Calling sudo kill 13445
B
None
None
End
The code:
#!/usr/bin/env python3
import subprocess
import threading
import time
import os
def main():
task = TaskManager()
task.launch()
time.sleep(2)
task.stop()
print(task.stdout)
print(task.stderr)
class TaskManager(threading.Thread):
def __init__(self):
super().__init__()
self.start_event = threading.Event()
self.stderr = None
self.stdout = None
self.pgid = None
self.task = None
self.start()
def run(self):
self.start_event.wait()
self.task = subprocess.Popen(["sudo", "babeld", "-d", "2", "wlp2s0"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid)
self.pgid = os.getpgid(self.task.pid)
print("Waiting for output")
self.stdout, self.stderr = self.task.communicate()
print("End")
def launch(self):
self.start_event.set()
def stop(self):
print("Pgid: %s, pid: %s" % (self.pgid, self.task.pid))
try:
print("Stopping task")
self.task.terminate()
except PermissionError:
print("Permission Error!")
print("Calling sudo kill %d" % self.pgid)
subprocess.check_call(["sudo", "kill", str(self.pgid)])
print("B")
if __name__ == '__main__':
main()
How to properly kill processes running as root, while having access to their stderr and stdout?
Thanks,
The recipe is simple: do not use communicate. You may replace your self.stdout and self.stderr class attributes with the following getters:
#property
def stdout(self):
return self.task.stdout.read()
#property
def stderr(self):
return self.task.stderr.read()
BTW, this approach gives you an opportunity to reject threading usage in your code. Example:
# !/usr/bin/env python3
import subprocess
import time
import os
def main():
task = TaskManager()
task.launch()
time.sleep(2)
task.stop()
print(task.stdout)
print(task.stderr)
class TaskManager:
def __init__(self):
self.pgid = None
self.task = None
#property
def stdout(self):
return self.task.stdout.read()
#property
def stderr(self):
return self.task.stderr.read()
def launch(self):
self.task = subprocess.Popen(["sudo", "babeld", "-d", "2", "wlp2s0"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid)
self.pgid = os.getpgid(self.task.pid)
print("Waiting for output")
def stop(self):
print("Pgid: %s, pid: %s" % (self.pgid, self.task.pid))
try:
print("Stopping task")
self.task.terminate()
except PermissionError:
print("Permission Error!")
print("Calling sudo kill %d" % self.pgid)
subprocess.check_call(["sudo", "kill", str(self.pgid)])
print("B")
if __name__ == '__main__':
main()
I have a parent process that runs a child process in a class. The child process takes much longer to complete. I want to make sure that the child process is not going to terminate when the parent process is terminated. How do I do that?
Here's a very simplified version of my code:
# myClass.py
from multiprocess import Process
class myClass(self):
def __init__(self):
print ('setup the object')
def parentProcess(self, idx)
p = Process(target=childFunc)
p.start()
time.sleep(3)
print ('parent is done with ' + str(idx))
def childProcess(self):
print ('do some childish stuff')
time.sleep(8)
And this is how I run the parent process
# main.py
from multiprocessing import Process
myClass import myClass
myC = myClass()
for i in range(10):
p = Process(target=myC.parentProcess, args=i)
p.start()
p.join()
Your program will not terminate until all the processes are done. Try this:
from multiprocessing import Process
import time
def foo():
time.sleep(2)
print("Now I am done")
if __name__ == "__main__":
p = Process(target=foo)
p.start()
print("I am done.")
However, to control the execution of your processes:
Use child.join() to wait for the child process to end.
You should use two loops, one for starting the processes and one for joining them
Try this:
from multiprocessing import Process
import time
class MyClass():
def __init__(self, idx):
self.idx = idx
def start_parent(self):
p = Process(target=self.child_func)
p.start()
time.sleep(1)
print('parent is done, waiting for child', self.idx)
p.join()
print('parent exiting', self.idx)
def child_func(self):
print('child start', self.idx)
time.sleep(2)
print('child end', self.idx)
if __name__ == "__main__":
parents = []
for i in range(10):
o = MyClass(i)
p = Process(target=o.start_parent)
parents.append(p)
p.start()
for p in parents:
p.join()
print("all done")
Or even better, subclass Process and implement run():
from multiprocessing import Process
import time
class ParentProcess(Process):
def __init__(self, idx, *args, **kwargs):
super().__init__(*args, **kwargs)
self.idx = idx
def run(self):
print('parent start', self.idx)
child = ChildProcess(self)
child.start()
time.sleep(1)
print('waiting for child', self.idx)
child.join()
print('parent end', self.idx)
class ChildProcess(Process):
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent = parent
def run(self):
print('child start', self.parent.idx)
time.sleep(5)
print('child end', self.parent.idx)
if __name__ == "__main__":
parents = [ParentProcess(i) for i in range(10)]
for p in parents:
p.start()
for p in parents:
p.join()
print("all done")
This is my current code, the main issue is I use Semphore to control the output of two process, but it seems like the Semphore does not change globaly, i.e. when process "producer" change the Semphore to 2 the process "consumer" still think the Semphore is zero , which cause it to wait forever.
from multiprocessing import Process, Semaphore, Queue
import time
from random import random
buffer = Queue(10)
empty = Semaphore(2)
full = Semaphore(0)
class Consumer(Process):
def run(self):
global buffer, empty, full
while True:
time.sleep(4)
print(full)
full.acquire()
buffer.get()
print('Consumer get')
time.sleep(1)
empty.release()
class Producer(Process):
def run(self):
global buffer, empty, full
while True:
empty.acquire()
print ('Producer put ')
time.sleep(1)
full.release()
buffer.put(1)
print(full)
if __name__ == '__main__':
p = Producer()
c = Consumer()
p.daemon = c.daemon = True
p.start()
c.start()
p.join()
c.join()
print ('Ended!')
and the output is
Producer put
<Semaphore(value=1)>
Producer put
<Semaphore(value=2)>
<Semaphore(value=0)>
I don't know what should I do to let "consumer" process detect the change.
Your two processes have both their own copy of both the semaphores, because each process runs the whole code in the script when it is instantiated.
You must move the semaphores and queue definitions inside the if __name__ == '__main__': and pass the instances of the semaphores to the Producer and Consumer constructors so that they both use the same instance of the three objects.
from multiprocessing import Process, Semaphore, Lock, Queue
import time
from random import random
class Consumer(Process):
def __init__(self, empty, full, buffer):
super(Consumer, self).__init__()
self.empty = empty
self.full = full
self.buffer = buffer
def run(self):
while True:
time.sleep(4)
print("Consumer: {}".format(self.full), flush=True)
print("Consumer: buf {}".format(self.buffer.qsize()), flush=True)
self.full.acquire()
self.buffer.get()
print('Consumer get', flush=True)
time.sleep(1)
self.empty.release()
class Producer(Process):
def __init__(self, empty, full, buffer):
super(Process, self).__init__()
self.empty = empty
self.full = full
self.buffer = buffer
def run(self):
while True:
self.empty.acquire()
print ('Producer put ', flush=True)
self.buffer.put('a') #<<<<<<<<<<< you forgot this in your code. If the queue is empty, get() will block on the consumer
time.sleep(1)
self.full.release()
print(self.full, flush=True)
if __name__ == '__main__':
buffer = Queue(10)
empty = Semaphore(2)
full = Semaphore(0)
p = Producer(empty, full, buffer)
c = Consumer(empty, full, buffer)
p.daemon = c.daemon = True
p.start()
c.start()
p.join()
c.join()
print ('Ended!')
My code is processing some parallel perforce tasks while showing a progress bar and letting user to terminate the job whenever he wants, the problem is when user clicks the close button the thread function is not being killed but the lock is released and the main UI thread is being unlocked.
The p4.run_sync() is not terminating when Cancel button is clicked.
def P4SyncLibrary(args, que):
syncType = args[0]
view = args[1]
p4 = P4CreateConnection(disable_tmp_cleanup=True)
try:
p4.run_sync(view)
except P4Exception:
for e in p4.errors:
print "SyncError: - %s" %e
p4.disconnect()
que.put(None)
class CreateJob(QtGui.QDialog):
def __init__(self, thread, args):
QtGui.QDialog.__init__(self)
self.ui=Ui_ProgressBar()
self.ui.setupUi(self)
self.ui.cancel.clicked.connect(self.closeEvent)
self.ui.cancel.setIcon(QtGui.QIcon(QtGui.QPixmap("%s/delete.xpm" %resources)))
self.threadControl = ThreadControl(thread=thread, args=args)
self.connect(self.threadControl, QtCore.SIGNAL("__updateProgressBar(int)"), self.__updateProgressBar)
self.threadControl.finished.connect(self.closeEvent)
self.threadControl.start()
#QtCore.pyqtSlot(int)
def __updateProgressBar(self,val):
self.ui.progressBar.setValue(val)
self.setWindowTitle("Processing: {0}%".format(val))
def closeEvent(self, QCloseEvent=None):
if self.threadControl.isRunning():
self.threadControl.stop()
self.threadControl.wait()
if QCloseEvent: QtGui.QDialog.closeEvent(self, QCloseEvent)
else: self.close()
def getResults(self):
return self.threadControl.resultDict
class ThreadControl(QtCore.QThread):
stopFlag = 0
def __init__(self, thread=None, args=None):
super(ThreadControl, self).__init__()
self.args = args
self.thread = thread
self.resultDict = []
def run(self):
threads = {}
queue = multiprocessing.Queue()
for arg in self.args:
process = multiprocessing.Process(target=self.thread, args=(arg, queue))
process.start()
threads[process] = 1 ## ACTIVE thread
# WAIT TILL ALL PROCESSES COMPLETE
completedThreads = 0
total = len(threads.keys())
while completedThreads != total:
if self.stopFlag:
for t in threads.keys():
if threads[t] == 1:
t.terminate()
t.join()
threads[t] = 0
completedThreads += 1
else:
for t in threads.keys():
if self.stopFlag: break ## Process threads termination
elif threads[t] == 1 and not t.is_alive():
threads[t] = 0
completedThreads += 1
self.resultDict.append(queue.get())
self.emit(QtCore.SIGNAL('__updateProgressBar(int)'),(completedThreads*100)/total)
sleep(0.5) ## Prevent CPU from overloading
def stop(self):
self.stopFlag=1
a job is being created using instance of CreateJob
CreateJob(thread=P4SyncLibrary, args=P4Libraries).exec_()
The only solution I could give is to pass p4 object to calling thread as argument so that p4 server connection can disconnect when user wants to cancel the job.
def P4SyncLibrary(p4, args, que):
syncType = args[0]
view = args[1]
try:
p4.run_sync(view)
except P4Exception:
for e in p4.errors:
print "SyncError: - %s" %e
que.put(None)
class ThreadControl(QtCore.QThread):
...
def run(self):
threads = {}
queue = multiprocessing.Queue()
for arg in self.args:
connection = P4CreateConnection(disable_tmp_cleanup=True)
if connection.connected():
process = multiprocessing.Process(target=self.thread, args=(connection, arg, queue))
process.start()
threads[process] = {
'isAlive': True,
'connection': connection
}
# WAIT TILL ALL PROCESSES COMPLETE
completedThreads = 0
total = len(threads.keys())
while completedThreads != total:
if self._stop:
for t in threads.keys():
if threads[t]['isAlive']:
threads[t]['connection'].disconnect()
t.terminate()
t.join()
threads[t]['isAlive'] = False
completedThreads += 1
else:
for t in threads.keys():
if self._stop: break ## Process threads termination
elif threads[t]['isAlive'] and not t.is_alive():
threads[t]['connection'].disconnect()
threads[t]['isAlive'] = False
completedThreads += 1
self.results.append(queue.get())
self.emit(QtCore.SIGNAL('__updateProgressBar(int)'),(completedThreads*100)/total)
sleep(0.5) ## Prevent CPU from overloading
I'm new to rabbitmq and pika, and is having trouble with stopping consuming.
channel and queue setting:
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue=new_task_id, durable=True, auto_delete=True)
Basically, consumer and producer are like this:
consumer:
def task(task_id):
def callback(channel, method, properties, body):
if body != "quit":
print(body)
else:
print(body)
channel.stop_consuming(task_id)
channel.basic_consume(callback, queue=task_id, no_ack=True)
channel.start_consuming()
print("finish")
return "finish"
producer:
proc = Popen(['app/sample.sh'], shell=True, stdout=PIPE)
while proc.returncode is None: # running
line = proc.stdout.readline()
if line:
channel.basic_publish(
exchange='',
routing_key=self.request.id,
body=line
)
else:
channel.basic_publish(
exchange='',
routing_key=self.request.id,
body="quit"
)
break
consumer task gave me output:
# ... output from sample.sh, as expected
quit
�}q(UstatusqUSUCCESSqU tracebackqNUresultqNUtask_idqU
1419350416qUchildrenq]u.
However, "finish" didn't get printed, so I'm guessing it's because channel.stop_consuming(task_id) didn't stop consuming. If so, what is the correct way to do? Thank you.
I had the same problem. It seems to be caused by the fact that internally, start_consuming calls self.connection.process_data_events(time_limit=None). This time_limit=None makes it hang.
I managed to workaround this problem by replacing the call to channel.start_consuming() with its implemenation, hacked:
while channel._consumer_infos:
channel.connection.process_data_events(time_limit=1) # 1 second
I have a class defined with member variables of channel and connection. These are initialized by a seperate thread. The consumer of MyClient Class uses the close() method and the the connection and consumer is stopped!
class MyClient:
def __init__(self, unique_client_code):
self.Channel = None
self.Conn: pika.BlockingConnection = None
self.ClientThread = self.init_client_driver()
def _close_callback(self):
self.Channel.stop_consuming()
self.Channel.close()
self.Conn.close()
def _client_driver_thread(self, tmout=None):
print("Starting Driver Thread...")
self.Conn = pika.BlockingConnection(pika.ConnectionParameters("localhost"))
self.Channel = self.Conn.channel()
def init_client_driver(self, tmout=None):
kwargs = {'tmout': tmout}
t = threading.Thread(target=self._client_driver_thread, kwargs=kwargs)
t.daemon = True
t.start()
return t
def close(self):
self.Conn.add_callback_threadsafe(self._close_callback)
self.ClientThread.join()