This code:
import multiprocessing as mp
from threading import Thread
import subprocess
import time
class WorkerProcess(mp.Process):
def run(self):
# Simulate long running task
self.subprocess = subprocess.Popen(['python', '-c', 'import time; time.sleep(1000)'])
self.code = self.subprocess.wait()
class ControlThread(Thread):
def run():
jobs = []
for _ in range(2):
job = WorkerProcess()
jobs.append(job)
job.start()
# wait for a while and then kill jobs
time.sleep(2)
for job in jobs:
job.terminate()
if __name__ == "__main__":
controller = ControlThread()
controller.start()
When I terminate the spawned WorkerProcess instances. They die just fine, however the subprocesses python -c 'import time; time.sleep(1000) runs until completition. This is well documented in the official docs, but how do I kill the child processes of a killed process?
A possbile soultion might be:
Wrap WorkerProcess.run() method inside try/except block catching SIGTERM, and terminating the subprocess.call call. But I am not sure how to catch the SIGTERM in the WorkerProcess
I also tried setting signal.signal(signal.SIGINT, handler) in the WorkerProcess, but I am getting ValuError, because it is allowed to be set only in the main thread.
What do I do now?
EDIT: As #svalorzen pointed out in comments this doesn't really work since the reference to self.subprocess is lost.
Finally came to a clean, acceptable solution. Since mp.Process.terminate is a method, we can override it.
class WorkerProcess(mp.Process):
def run(self):
# Simulate long running task
self.subprocess = subprocess.Popen(['python', '-c', 'import time; time.sleep(1000)'])
self.code = self.subprocess.wait()
# HERE
def terminate(self):
self.subprocess.terminate()
super(WorkerProcess, self).terminate()
You can use queues to message to your subprocesses and ask them nicely to terminate their children before exiting themselves. You can't use signals in anywhere else but your main thread, so signals are not suitable for this.
Curiously, when I modify the code like this, even if I interrupt it with control+C, subprocesses will die as well. This may be OS related thing, though.
import multiprocessing as mp
from threading import Thread
import subprocess
import time
from Queue import Empty
class WorkerProcess(mp.Process):
def __init__(self,que):
super(WorkerProcess,self).__init__()
self.queue = que
def run(self):
# Simulate long running task
self.subprocess = subprocess.Popen(['python', '-c', 'import time; time.sleep(1000)'])
while True:
a = self.subprocess.poll()
if a is None:
time.sleep(1)
try:
if self.queue.get(0) == "exit":
print "kill"
self.subprocess.kill()
self.subprocess.wait()
break
else:
pass
except Empty:
pass
print "run"
else:
print "exiting"
class ControlThread(Thread):
def run(self):
jobs = []
queues = []
for _ in range(2):
q = mp.Queue()
job = WorkerProcess(q)
queues.append(q)
jobs.append(job)
job.start()
# wait for a while and then kill jobs
time.sleep(5)
for q in queues:
q.put("exit")
time.sleep(30)
if __name__ == "__main__":
controller = ControlThread()
controller.start()
Hope this helps.
Hannu
Related
I'm trying to code a kind of task manager in Python. It's based on a job queue, the main thread is in charge of adding jobs to this queue. I have made this class to handle the jobs queued, able to limit the number of concurrent processes and handle the output of the finished processes.
Here comes the problem, the _check_jobs function I don't get updated the returncode value of each process, independently of its status (running, finished...) job.returncode is always None, therefore I can't run if statement and remove jobs from the processing job list.
I know it can be done with process.communicate() or process.wait() but I don't want to block the thread that launches the processes. Is there any other way to do it, maybe using a ProcessPoolExecutor? The queue can be hit by processes at any time and I need to be able to handle them.
Thank you all for your time and support :)
from queue import Queue
import subprocess
from threading import Thread
from time import sleep
class JobQueueManager(Queue):
def __init__(self, maxsize: int):
super().__init__(maxsize)
self.processing_jobs = []
self.process = None
self.jobs_launcher=Thread(target=self._worker_job)
self.processing_jobs_checker=Thread(target=self._check_jobs_status)
self.jobs_launcher.start()
self.processing_jobs_checker.start()
def _worker_job(self):
while True:
# Run at max 3 jobs concurrently
if self.not_empty and len(self.processing_jobs) < 3:
# Get job from queue
job = self.get()
# Execute a task without blocking the thread
self.process = subprocess.Popen(job)
self.processing_jobs.append(self.process)
# util if queue.join() is used to block the queue
self.task_done()
else:
print("Waiting 4s for jobs")
sleep(4)
def _check_jobs_status(self):
while True:
# Check if jobs are finished
for job in self.processing_jobs:
# Sucessfully completed
if job.returncode == 0:
self.processing_jobs.remove(job)
# Wait 4 seconds and repeat
sleep(4)
def main():
q = JobQueueManager(100)
task = ["stress", "--cpu", "1", "--timeout", "20"]
for i in range(10): #put 10 tasks in the queue
q.put(task)
q.join() #block until all tasks are done
if __name__ == "__main__":
main()
I answer myself, I have come up with a working solution. The JobExecutor class handles in a custom way the Pool of processes. The watch_completed_tasks function tries to watch and handle the output of the tasks when they are done. This way everything is done with only two threads and the main thread is not blocked when submitting processes.
import subprocess
from threading import Timer
from concurrent.futures import ProcessPoolExecutor, as_completed
import logging
def launch_job(job):
process = subprocess.Popen(job, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(f"launching {process.pid}")
return [process.pid, process.stdout.read(), process.stderr.read()]
class JobExecutor(ProcessPoolExecutor):
def __init__(self, max_workers: int):
super().__init__(max_workers)
self.futures = []
self.watch_completed_tasks()
def submit(self, command):
future = super().submit(launch_job, command)
self.futures.append(future)
return future
def watch_completed_tasks(self):
# Manage tasks completion
for completed_task in as_completed(self.futures):
print(f"FINISHED task with PID {completed_task.result()[0]}")
self.futures.remove(completed_task)
# call this function evevery 5 seconds
timer_thread = Timer(5.0, self.watch_completed_tasks)
timer_thread.setName("TasksWatcher")
timer_thread.start()
def main():
executor = JobExecutor(max_workers=5)
for i in range(10):
task = ["stress",
"--cpu", "1",
"--timeout", str(i+5)]
executor.submit(task)
I want to kill a thread in python. This thread can run in a blocking operation and join can't terminate it.
Simular to this:
from threading import Thread
import time
def block():
while True:
print("running")
time.sleep(1)
if __name__ == "__main__":
thread = Thread(target = block)
thread.start()
#kill thread
#do other stuff
My problem is that the real blocking operation is in another module that is not from me so there is no place where I can break with a running variable.
The thread will be killed when exiting the main process if you set it up as a daemon:
from threading import Thread
import time
def block():
while True:
print("running")
time.sleep(1)
if __name__ == "__main__":
thread = Thread(target = block, daemon = True)
thread.start()
sys.exit(0)
Otherwise just set a flag, I'm using a bad example (you should use some synchronization not just a plain variable):
from threading import Thread
import time
RUNNING = True
def block():
global RUNNING
while RUNNING:
print("running")
time.sleep(1)
if __name__ == "__main__":
thread = Thread(target = block, daemon = True)
thread.start()
RUNNING = False # thread will stop, not killed until next loop iteration
.... continue your stuff here
Use a running variable:
from threading import Thread
import time
running = True
def block():
global running
while running:
print("running")
time.sleep(1)
if __name__ == "__main__":
thread = Thread(target = block)
thread.start()
running = False
# do other stuff
I would prefer to wrap it all in a class, but this should work (untested though).
EDIT
There is a way to asynchronously raise an exception in a separate thread which could be caught by a try: except: block, but it's a dirty dirty hack: https://gist.github.com/liuw/2407154
Original post
"I want to kill a thread in python." you can't. Threads are only killed when they're daemons when there are no more non-daemonic threads running from the parent process. Any thread can be asked nicely to terminate itself using standard inter-thread communication methods, but you state that you don't have any chance to interrupt the function you want to kill. This leaves processes.
Processes have more overhead, and are more difficult to pass data to and from, but they do support being killed by sending SIGTERM or SIGKILL.
from multiprocessing import Process, Queue
from time import sleep
def workfunction(*args, **kwargs): #any arguments you send to a child process must be picklable by python's pickle module
sleep(args[0]) #really long computation you might want to kill
return 'results' #anything you want to get back from a child process must be picklable by python's pickle module
class daemon_worker(Process):
def __init__(self, target_func, *args, **kwargs):
self.return_queue = Queue()
self.target_func = target_func
self.args = args
self.kwargs = kwargs
super().__init__(daemon=True)
self.start()
def run(self): #called by self.start()
self.return_queue.put(self.target_func(*self.args, **self.kwargs))
def get_result(self): #raises queue.Empty if no result is ready
return self.return_queue.get()
if __name__=='__main__':
#start some work that takes 1 sec:
worker1 = daemon_worker(workfunction, 1)
worker1.join(3) #wait up to 3 sec for the worker to complete
if not worker1.is_alive(): #if we didn't hit 3 sec timeout
print('worker1 got: {}'.format(worker1.get_result()))
else:
print('worker1 still running')
worker1.terminate()
print('killing worker1')
sleep(.1) #calling worker.is_alive() immediately might incur a race condition where it may or may not have shut down yet.
print('worker1 is alive: {}'.format(worker1.is_alive()))
#start some work that takes 100 sec:
worker2 = daemon_worker(workfunction, 100)
worker2.join(3) #wait up to 3 sec for the worker to complete
if not worker2.is_alive(): #if we didn't hit 3 sec timeout
print('worker2 got: {}'.format(worker2.get_result()))
else:
print('worker2 still running')
worker2.terminate()
print('killing worker2')
sleep(.1) #calling worker.is_alive() immediately might incur a race condition where it may or may not have shut down yet.
print('worker2 is alive: {}'.format(worker2.is_alive())
I am testing Python threading with the following script:
import threading
class FirstThread (threading.Thread):
def run (self):
while True:
print 'first'
class SecondThread (threading.Thread):
def run (self):
while True:
print 'second'
FirstThread().start()
SecondThread().start()
This is running in Python 2.7 on Kubuntu 11.10. Ctrl+C will not kill it. I also tried adding a handler for system signals, but that did not help:
import signal
import sys
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
To kill the process I am killing it by PID after sending the program to the background with Ctrl+Z, which isn't being ignored. Why is Ctrl+C being ignored so persistently? How can I resolve this?
Ctrl+C terminates the main thread, but because your threads aren't in daemon mode, they keep running, and that keeps the process alive. We can make them daemons:
f = FirstThread()
f.daemon = True
f.start()
s = SecondThread()
s.daemon = True
s.start()
But then there's another problem - once the main thread has started your threads, there's nothing else for it to do. So it exits, and the threads are destroyed instantly. So let's keep the main thread alive:
import time
while True:
time.sleep(1)
Now it will keep print 'first' and 'second' until you hit Ctrl+C.
Edit: as commenters have pointed out, the daemon threads may not get a chance to clean up things like temporary files. If you need that, then catch the KeyboardInterrupt on the main thread and have it co-ordinate cleanup and shutdown. But in many cases, letting daemon threads die suddenly is probably good enough.
KeyboardInterrupt and signals are only seen by the process (ie the main thread)... Have a look at Ctrl-c i.e. KeyboardInterrupt to kill threads in python
I think it's best to call join() on your threads when you expect them to die. I've taken the liberty to make the change your loops to end (you can add whatever cleanup needs are required to there as well). The variable die is checked on each pass and when it's True, the program exits.
import threading
import time
class MyThread (threading.Thread):
die = False
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run (self):
while not self.die:
time.sleep(1)
print (self.name)
def join(self):
self.die = True
super().join()
if __name__ == '__main__':
f = MyThread('first')
f.start()
s = MyThread('second')
s.start()
try:
while True:
time.sleep(2)
except KeyboardInterrupt:
f.join()
s.join()
An improved version of #Thomas K's answer:
Defining an assistant function is_any_thread_alive() according to this gist, which can terminates the main() automatically.
Example codes:
import threading
def job1():
...
def job2():
...
def is_any_thread_alive(threads):
return True in [t.is_alive() for t in threads]
if __name__ == "__main__":
...
t1 = threading.Thread(target=job1,daemon=True)
t2 = threading.Thread(target=job2,daemon=True)
t1.start()
t2.start()
while is_any_thread_alive([t1,t2]):
time.sleep(0)
One simple 'gotcha' to beware of, are you sure CAPS LOCK isn't on?
I was running a Python script in the Thonny IDE on a Pi4. With CAPS LOCK on, Ctrl+Shift+C is passed to the keyboard buffer, not Ctrl+C.
Below is the code which demonstrates the problem. Please note that this is only an example, I am using the same logic in a more complicated application, where I can't use sleep as the amount of time, it will take for process1 to modify the variable, is dependent on the speed of the internet connection.
from multiprocessing import Process
code = False
def func():
global code
code = True
pro = Process(target=func)
pro.start()
while code == False:
pass
pro.terminate()
pro.join()
print('Done!')
On running this nothing appears on the screen. When I terminate the program, by pressing CTRL-C, the stack trace shows that the while loop was being executed.
Python has a few concurrency libraries: threading, multiprocessing and asyncio (and more).
multiprocessing is a library which uses subprocesses to bypass python's inability to concurrently run CPU intensive tasks. To share variables between different multiprocessing.Processes, create them via a multiprocessing.Manager() instance. For example:
import multiprocessing
import time
def func(event):
print("> func()")
time.sleep(1)
print("setting event")
event.set()
time.sleep(1)
print("< func()")
def main():
print("In main()")
manager = multiprocessing.Manager()
event = manager.Event()
p = multiprocessing.Process(target=func, args=(event,))
p.start()
while not event.is_set():
print("waiting...")
time.sleep(0.2)
print("OK! joining func()...")
p.join()
print('Done!')
if __name__ == "__main__":
main()
Does
import multiprocessing
import schedule
def worker():
#do some stuff
def sched(argv):
schedule.every(0.01).minutes.do(worker)
while True:
schedule.run_pending()
processs = []
..
..
p = multiprocessing.Process(target=sched,args)
..
..
processs.append(p)
for p in processs:
p.terminate()
kills gracefully a list of processes ?
If not what is the simplest way to do it ?
The goal is to reload the configuration file into memory, so I would like to kill all children processes and create others instead, those latter will read the new config file.
Edit : Added more code to explain that I am running a while True loop
Edit : This is the new code after #dano suggestion
def get_config(self):
from ConfigParser import SafeConfigParser
..
return argv
def sched(self, args, event):
#schedule instruction:
schedule.every(0.01).minutes.do(self.worker,args)
while not event.is_set():
schedule.run_pending()
def dispatch_processs(self, conf):
processs = []
event = multiprocessing.Event()
for conf in self.get_config():
process = multiprocessing.Process(target=self.sched,args=( i for i in conf), kwargs={'event' : event})
processs.append((process, event)
return processs
def start_process(self, process):
process.start()
def gracefull_process(self, process):
process.join()
def main(self):
while True:
processs = self.dispatch_processs(self.get_config())
print ("%s processes running " % len(processs) )
for process, event in processs:
self.start_process(process)
time.sleep(1)
event.set()
self.gracefull_process(process)
The good thing about the code, is that I can edit config file and the process will reload its config also.
The problem is that only the first process runs and the others are ignored.
Edit : This saved my life , working with while True in schedule() is not a good idea, so I set up refresh_time instead
def sched(self, args, event):
schedule.every(0.01).minutes.do(self.worker,args)
for i in range(refresh_time):
schedule.run_pending()
time.sleep(1)
def start_processs(self, processs):
for p,event in processs:
if not p.is_alive():
p.start()
time.sleep(1)
event.set()
self.gracefull_processs(processs)
def gracefull_processs(self, processs):
for p,event in processs:
p.join()
processs = self.dispatch_processs(self.get_config())
self.start_processs(processs)
def main(self):
while True:
processs = self.dispatch_processs(self.get_config())
self.start_processs(processs)
break
print ("Reloading function main")
self.main()
If you don't mind only aborting after worker has completed all of its work, its very simple to add a multiprocessing.Event to handle exiting gracefully:
import multiprocessing
import schedule
def worker():
#do some stuff
def sched(argv, event=None):
schedule.every(0.01).minutes.do(worker)
while not event.is_set(): # Run until we're told to shut down.
schedule.run_pending()
processes = []
..
..
event = multiprocessing.Event()
p = multiprocessing.Process(target=sched,args, kwargs={'event' : event})
..
..
processes.append((p, event))
# Tell all processes to shut down
for _, event in processes:
event.set()
# Now actually wait for them to shut down
for p, _ in processes:
p.join()
A: No, both .terminate() & SIG_* methods are rather brutal
In a need to arrange a gracefull end of any process, as described in your post, there rather shall be some "soft-signalling" layer, that allows, on both ends, to send/receive smart-signalls without being dependent on the O/S interpretations ( O/S knows nothing about your application-level context and state of the respective work-unit, that is currently being processed ).
You may want to read about such soft-signalling approach in links referred from >>> https://stackoverflow.com/a/25373416/3666197
No, it doesn't kill a process according to your own definition of gracefully - unless you take some additional steps.
Assuming you're using a unix system (since you mentioned scp), terminate sends a SIGTERM signal to the child process. You can catch this signal in the child process, and act accordingly (wait for scp to finish):
import signal
def on_terminate(signum, stack):
wait_for_current_scp_operation()
signal.signal(signal.SIGTERM, on_terminate)
Here's a tutorial about handling and sending signals