Why does the main process code not execute after join() method. I think the code line "print('mater gone')" should be execute after two sub threads running finished, but it's not actually.
My step like this:
# cc.py
import time, multiprocessing, signal, threading, random, time, queue
class Master(multiprocessing.Process):
def __init__(self):
super(Master,self).__init__();
signal.signal(signal.SIGTERM, self.handler);
self.live = {'stat':True};
def handler(self, signum, frame):
print('signal:',signum);
self.live['stat'] = 0;
def run(self):
print('PID:',self.pid);
cond = threading.Condition(threading.Lock());
q = queue.Queue();
sender = Sender(cond, self.live, q);
geter = Geter(cond, self.live, q);
sender.start();
geter.start();
while threading.activeCount()-1:
time.sleep(2);
print('checking live', threading.activeCount());
print('mater gone');
sender.join();
geter.join();
class Sender(threading.Thread):
def __init__(self, cond, live, queue):
super(Sender, self).__init__(name='sender');
self.cond = cond;
self.queue = queue;
self.live = live
def run(self):
cond = self.cond;
while self.live['stat']:
cond.acquire();
i = random.randint(0,100);
self.queue.put(i,False);
if not self.queue.full():
print('sender add:',i);
cond.notify();
cond.release();
time.sleep(random.randint(1,3));
print('sender done')
class Geter(threading.Thread):
def __init__(self, cond, live, queue):
super(Geter, self).__init__(name='geter');
self.cond = cond;
self.queue = queue;
self.live = live
def run(self):
cond = self.cond;
while self.live['stat']:
cond.acquire();
if not self.queue.empty():
i = self.queue.get();
print('geter get:',i);
cond.wait(3);
cond.release();
time.sleep(random.randint(1,3));
print('geter done')
if __name__ == '__main__':
master = Master();
master.start();
--session1
C:\PrivateData>python cc.py
The outout like this:
PID: 14320
sender add: 75
geter get: 75
checking live 3
sender add: 67
checking live 3
sender add: 79
geter get: 67
checking live 3
checking live 3
sender add: 69
--session2
>>> import os,signal,time
>>> os.kill(14320,signal.SIGTERM)
When I send os.kill command in session2, The output in session1 stop immediately without 'mater gone', I think the main process shouldn't end after sub threads stop, am i have any problems?
Related
It's quite easy to send or receive data through threads using Queue's module when doing each thing at a time, but I didn't figure out how to send something to a thread, then expect for a return properly.
In the below example, I was expecting to send something to thread in order to be processed, then harvest the result, but the t.queue.get() in the main function receives what what just sent above instead of waiting for the thread to return. How can I get around it?
#!/usr/bin/env python3
from threading import Thread
from queue import Queue
class MyThread(Thread):
queue:Queue
def __init__(self, *args, **kwargs):
super().__init__()
self.queue = Queue()
self.daemon = True
# receives a name, then prints "Hello, name!"
def run(self):
while True:
val = self.queue.get()
if not val:
break
self.queue.put(f'Hello, {val}!')
def main():
t = MyThread()
t.start()
# sends string to thread
t.queue.put('Jurandir')
# expects to receive "Hello, Jurandir!",
# but "Jurandir" is immediately returned
ret = t.queue.get()
print(ret)
if __name__ == '__main__':
main()
Thing is that you are getting the alleged result immediately from the queue, and the worker has still not added the result. You can split into an "input queue" and a "results queue". And then wait in the main thread until there's some output in the queue.
#!/usr/bin/env python3
from threading import Thread, Lock
from queue import Queue
class MyThread(Thread):
def __init__(self, *args, **kwargs):
super().__init__()
self.input_queue = Queue()
self.results_queue = Queue()
self.daemon = True
# receives a name, then prints "Hello, name!"
def run(self):
while True:
val = self.input_queue.get()
if not val:
break
self.results_queue.put(f'Hello, {val}!')
def main():
t = MyThread()
t.start()
# sends string to thread
t.input_queue.put('Jurandir')
ret = t.results_queue.get()
while ret is None:
ret = t.results_queue.get()
print(ret)
if __name__ == '__main__':
main()
I am dealing with the following problem:
I've implemented a dummy 'Thing' class that sleeps for 10 seconds and logs a message ('foo'). This class is instantiated in a worker function for a Processes Pool and the 'foo' method that implements the above mentioned logic is called.
What I want to achieve is a custom signal handling: as long as the processes haven't terminated, if CTRL+C (SIGINT) is sent, each process will log the signal and they will immediately terminate.
Half of the logic is working: while each process is sleeping, on SIGINT, they'll be interrupted and the Pool will be closed.
Problem: if ALL the process end successfully and SIGINT is sent, the message will be logged but the Pool won't be closed.
Code:
import logging
import signal
import os
import time
from multiprocessing import Pool, current_process
logger = logging.getLogger('test')
SIGNAL_NAMES = dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_'))
class Thing(object):
def __init__(self, my_id):
self.my_id = my_id
self.logger = logging.getLogger(str(my_id))
def foo(self):
time.sleep(10)
self.logger.info('[%s] Foo after 10 secs!', self.my_id)
class Daemon(object):
def __init__(self, no_processes, max_count):
signal.signal(signal.SIGINT, self.stop)
self.done = False
self.count = 0
self.max_count = max_count
self.pool = Pool(no_processes, initializer=self.pool_initializer)
def stop(self, signum, _):
""" Stop function for Daemon """
sig = SIGNAL_NAMES.get(signum) or signum
logger.info('[Daemon] Stopping (received signal %s', sig)
self.done = True
def _generate_ids(self):
""" Generator function of the IDs for the Processes Pool """
while not self.done:
if self.count < self.max_count:
my_id = "ID-{}".format(self.count)
logger.info('[Daemon] Generated ID %s', my_id)
time.sleep(3)
yield my_id
self.count += 1
time.sleep(1)
def run(self):
""" Main daemon run function """
pid = os.getpid()
logger.info('[Daemon] Started running on PID %s', str(pid))
my_ids = self._generate_ids()
for res in self.pool.imap_unordered(run_thing, my_ids):
logger.info("[Daemon] Finished %s", res or '')
logger.info('[Daemon] Closing & waiting processes to terminate')
self.pool.close()
self.pool.join()
def pool_initializer(self):
""" Pool initializer function """
signal.signal(signal.SIGINT, self.worker_signal_handler)
#staticmethod
def worker_signal_handler(signum, _):
""" Signal handler for the Process worker """
sig = SIGNAL_NAMES.get(signum) or signum
cp = current_process()
logger.info("[%s] Received in worker %s signal %s", WORKER_THING_ID or '', str(cp), sig)
global WORKER_EXITING
WORKER_EXITING = True
WORKER_EXITING = False
WORKER_THING_ID = None
def run_thing(arg):
""" Worker function for processes """
if WORKER_EXITING:
return
global WORKER_THING_ID
WORKER_THING_ID = arg
run_exception = None
logger.info('[%s] START Thing foo-ing', arg)
logging.getLogger('Thing-{}'.format(arg)).setLevel(logging.INFO)
try:
thing = Thing(arg)
thing.foo()
except Exception as e:
run_exception = e
finally:
WORKER_THING_ID = None
logger.info('[%s] STOP Thing foo-ing', arg)
if run_exception:
logger.error('[%s] EXCEPTION on Thing foo-ing: %s', arg, run_exception)
return arg
if __name__ == '__main__':
logging.basicConfig()
logger.setLevel(logging.INFO)
daemon = Daemon(4, 3)
daemon.run()
Your problem is logic in function _generate_ids(). The function never ends so pool.imap_unordered() never finishes by itself, only needs to be interrupted by CTRL-C.
Change it for something like this:
def _generate_ids(self):
""" Generator function of the IDs for the Processes Pool """
for i in range(self.max_count):
time.sleep(3)
my_id = "ID-{}".format(self.count)
logger.info('[Daemon] Generated ID %s', my_id)
if self.done:
break
self.count += 1
yield my_id
And the processes end by themselves normally.
I'm working with a toy multiprocessing problem, and event signalling is not working as expected. The multiprocessing documentation refers detail description of Event() to the multithreading documentation, and the description of the methods there are precisely what I'm trying to do. I want worker processes (subclassed from multiprocessing.Process) spawned by a parent class, then wait for a start signal from the parent class, do their thing, then terminate. What seems to be happening, however, is that the first process, once running, blocks any others. What's going on here, and how do I fix?
class Worker(Process):
def __init__(self, my_id, caller):
Process.__init__(self)
self.caller = caller
self.my_id = my_id
def run(self):
print("%i started"%self.my_id)
self.caller.start_flag.wait()
print("%i sleeping"%self.my_id)
sleep(2000)
class ParentProcess(object):
def __init__(self, num_procs):
self.procs = []
self.start_flag = Event()
for i in range(num_procs):
self.procs.append(Worker(i, self))
def run(self):
for proc in self.procs:
proc.run()
self.start_flag.set()
for proc in self.procs:
proc.join()
print("%i done"%proc.my_id)
if __name__ == '__main__':
cpus = cpu_count()
world = ParentProcess(cpus)
start = time()
world.run()
end = time()
runtime = end - start
print("Runtime: %3.6f"%runtime)
This is only outputting "0 started", then hanging. It seems the Event.wait() is blocking all other threads, even the caller. The documentation implies this should not happen.
He is a working version of the code. When you subclass process you implement the run method to define what should run in that process. When you actually want the process to start you should call the start method on it (proc.start()).
from multiprocessing import Process, Event
from time import time, sleep
class Worker(Process):
def __init__(self, my_id, caller):
Process.__init__(self)
self.caller = caller
self.my_id = my_id
def run(self):
print("%i started"%self.my_id)
self.caller.start_flag.wait()
print("%i sleeping"%self.my_id)
sleep(5)
class ParentProcess(object):
def __init__(self, num_procs):
self.procs = []
self.start_flag = Event()
for i in range(num_procs):
self.procs.append(Worker(i, self))
def run(self):
for proc in self.procs:
proc.start()
self.start_flag.set()
for proc in self.procs:
proc.join()
print("%i done"%proc.my_id)
if __name__ == '__main__':
cpus = 4
world = ParentProcess(cpus)
start = time()
world.run()
end = time()
runtime = end - start
print(runtime)
Outputs:
0 started
1 started
2 started
2 sleeping
0 sleeping
1 sleeping
3 started
3 sleeping
0 done
1 done
2 done
3 done
5.01037812233
I have the following code:
class MyThread(Thread):
def __init__(self, command, template, env, build_flavor, logger):
Thread.__init__(self)
self.command = command
self.template = template
self.env = env
self.build_flavor = build_flavor
self.logger = logger
def run(self):
self.logger.info('Running (%s)...this may take several minutes. Please be patient' % self.build_flavor)
run_command(self.command, self.template, self.env)
self.logger.info('Complete (%s)' % self.build_flavor)
return
And then in another class, when I create the actual threads:
if self.build_type == 'default':
threads = []
for t in self.template:
modify_template(t)
build_flavor = self.getmatch(t)
thread = MyThread(packer, t, self.new_env, build_flavor, self.logger)
thread.setName(build_flavor)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
vmware_create()
openstack_create()
Unfortunately, after the threads are .join()'d, I'm calling vmware_create() and openstack_create() in serial. I'd like to be able to execute each of those after their respective threads complete so that I'm not waiting for both threads to finish before starting one of the *_create() functions...and then waiting for the first to complete before executing the 2nd
i.e. right now vmware_create() will execute only after BOTH threads are finished, and once vmware_create() is done, only then will openstack_create() begin. I'd like to be able to wait for the respective threads to complete, and then execute the _create() function for whatever thread completed first, all the while waiting for the 2nd thread to finish and then once that's done, immediately executing its _create() function for true parallelization.
I haven't been able to figure out how to do this and need a lil help.
Functions are objects. Just hand them to the thread:
class MyThread(Thread):
def __init__(self, command, template, env, build_flavor, logger, func=None):
Thread.__init__(self)
self.command = command
self.template = template
self.env = env
self.build_flavor = build_flavor
self.logger = logger
self.func = func
def run(self):
self.logger.info('Running (%s)...this may take several minutes. Please be patient' % self.build_flavor)
run_command(self.command, self.template, self.env)
self.logger.info('Complete (%s)' % self.build_flavor)
# call func if it is there
if self.func:
self.func()
return
Now, I supply the first two threads with function to call:
if self.build_type == 'default':
threads = []
funcs = {0: vmware_create, 1: openstack_create}
for i, t in enumerate(self.template):
modify_template(t)
build_flavor = self.getmatch(t)
func = funcs.get(i, None)
thread = MyThread(packer, t, self.new_env, build_flavor,
self.logger, func=func)
thread.setName(build_flavor)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
Of course, you can add them to any other threads.
I created two threads each running different functions.
What i tryed to achieve is if first thread ends then the second should also end ( i tryed achieving it using global variable)
Once both the threads end the same procedure should continue.
The script is not working as expected.
I am using Linux - Centos and python 2.7
#!/usr/bin/python
import threading
import time
import subprocess
import datetime
import os
import thread
command= "strace -o /root/Desktop/a.txt -c ./server"
final_dir = "/root/Desktop/"
exitflag = 0
# Define a function for the thread
def print_time(*args):
os.chdir(final_dir)
print "IN first thread"
proc = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait(70)
exitflag=1
def print_time1(*args):
print "In second thread"
global exitflag
while exitflag:
thread.exit()
#proc = subprocess.Popen(command1,shell=True,stdout=subprocess.PIPE, sterr=subprocess.PIPE)
# Create two threads as follows
while (1):
t1=threading.Thread(target=print_time)
t1.start()
t2=threading.Thread(target=print_time1)
t2=start()
time.sleep(80)
z = t1.isAlive()
z1 = t2.isAlive()
if z:
z.exit()
if z1:
z1.exit()
threading.Thread(target=print_time1).start()
threading.Thread(target=print_time1).start()
print "In try"
Where am i going wrong?
You could create an object to share state, and have the dependent thread check that state. Something like:
import threading
import time
import datetime
class Worker1( threading.Thread ):
def __init__(self, state):
super(Worker1, self).__init__()
self.state = state
def run(self):
print_time_helper("Worker1 Start")
time.sleep(4)
print_time_helper("Worker1 End")
self.state.keepOnRunning = False
class Worker2( threading.Thread ):
def __init__(self, state):
super(Worker2, self).__init__()
self.state = state
def run(self):
while self.state.keepOnRunning:
print_time_helper("Worker2")
time.sleep(1)
class State( object ):
def __init__(self):
self.keepOnRunning = True
def main():
state = State()
thread1 = Worker1(state)
thread2 = Worker2(state)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def print_time_helper(name):
print "{0}: {1}".format(name, datetime.datetime.now().time().strftime("%S"))
which will output something like this (numbers show current time seconds):
Worker1 Start: 39
Worker2: 39
Worker2: 40
Worker2: 41
Worker2: 42
Worker1 End: 43
However, this is a bit simplistic for most situations. You might be better off using message queues - this is a good intro.
Use a threading.Event instead of an int and wait for it to be set.
Also your logic appears to be wrong in print_time1 because your while loop will never run since exitflag is initially 0, but even if it was 1 it would still just exit immediately. It's not actually waiting on anything.