I need to debug a child process spawned by multiprocessing.Process(). The pdb degugger seems to be unaware of forking and unable to attach to already running processes.
Are there any smarter python debuggers which can be attached to a subprocess?
I've been searching for a simple to solution for this problem and came up with this:
import sys
import pdb
class ForkedPdb(pdb.Pdb):
"""A Pdb subclass that may be used
from a forked multiprocessing child
"""
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
sys.stdin = open('/dev/stdin')
pdb.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin
Use it the same way you might use the classic Pdb:
ForkedPdb().set_trace()
Winpdb is pretty much the definition of a smarter Python debugger. It explicitly supports going down a fork, not sure it works nicely with multiprocessing.Process() but it's worth a try.
For a list of candidates to check for support of your use case, see the list of Python Debuggers in the wiki.
This is an elaboration of Romuald's answer which restores the original stdin using its file descriptor. This keeps readline working inside the debugger. Besides, pdb special management of KeyboardInterrupt is disabled, in order it not to interfere with multiprocessing sigint handler.
class ForkablePdb(pdb.Pdb):
_original_stdin_fd = sys.stdin.fileno()
_original_stdin = None
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
current_stdin = sys.stdin
try:
if not self._original_stdin:
self._original_stdin = os.fdopen(self._original_stdin_fd)
sys.stdin = self._original_stdin
self.cmdloop()
finally:
sys.stdin = current_stdin
Building upon #memplex idea, I had to modify it to get it to work with joblib by setting the sys.stdin in the constructor as well as passing it directly along via joblib.
import os
import pdb
import signal
import sys
import joblib
_original_stdin_fd = None
class ForkablePdb(pdb.Pdb):
_original_stdin = None
_original_pid = os.getpid()
def __init__(self):
pdb.Pdb.__init__(self)
if self._original_pid != os.getpid():
if _original_stdin_fd is None:
raise Exception("Must set ForkablePdb._original_stdin_fd to stdin fileno")
self.current_stdin = sys.stdin
if not self._original_stdin:
self._original_stdin = os.fdopen(_original_stdin_fd)
sys.stdin = self._original_stdin
def _cmdloop(self):
try:
self.cmdloop()
finally:
sys.stdin = self.current_stdin
def handle_pdb(sig, frame):
ForkablePdb().set_trace(frame)
def test(i, fileno):
global _original_stdin_fd
_original_stdin_fd = fileno
while True:
pass
if __name__ == '__main__':
print "PID: %d" % os.getpid()
signal.signal(signal.SIGUSR2, handle_pdb)
ForkablePdb().set_trace()
fileno = sys.stdin.fileno()
joblib.Parallel(n_jobs=2)(joblib.delayed(test)(i, fileno) for i in range(10))
remote-pdb can be used to debug sub-processes. After installation, put the following lines in the code you need to debug:
import remote_pdb
remote_pdb.set_trace()
remote-pdb will print a port number which will accept a telnet connection for debugging that specific process. There are some caveats around worker launch order, where stdout goes when using various frontends, etc. To ensure a specific port is used (must be free and accessible to the current user), use the following instead:
from remote_pdb import RemotePdb
RemotePdb('127.0.0.1', 4444).set_trace()
remote-pdb may also be launched via the breakpoint() command in Python 3.7.
Just use PuDB that gives you an awesome TUI (GUI on terminal) and supports multiprocessing as follow:
from pudb import forked; forked.set_trace()
An idea I had was to create "dummy" classes to fake the implementation of the methods you are using from multiprocessing:
from multiprocessing import Pool
class DummyPool():
#staticmethod
def apply_async(func, args, kwds):
return DummyApplyResult(func(*args, **kwds))
def close(self): pass
def join(self): pass
class DummyApplyResult():
def __init__(self, result):
self.result = result
def get(self):
return self.result
def foo(a, b, switch):
# set trace when DummyPool is used
# import ipdb; ipdb.set_trace()
if switch:
return b - a
else:
return a - b
if __name__ == '__main__':
xml = etree.parse('C:/Users/anmendoza/Downloads/jim - 8.1/running-config.xml')
pool = DummyPool() # switch between Pool() and DummyPool() here
results = []
results.append(pool.apply_async(foo, args=(1, 100), kwds={'switch': True}))
pool.close()
pool.join()
results[0].get()
Here is the version of the ForkedPdb(Romuald's Solution) which will work for Windows and *nix based systems.
import sys
import pdb
import win32console
class MyHandle():
def __init__(self):
self.screenBuffer = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
def readline(self):
return self.screenBuffer.ReadConsole(1000)
class ForkedPdb(pdb.Pdb):
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
if sys.platform == "win32":
sys.stdin = MyHandle()
else:
sys.stdin = open('/dev/stdin')
pdb.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin
The problem here is that Python always connects sys.stdin in the child process to os.devnull to avoid contention for the stream. But this means that when the debugger (or a simple input()) tries to connect to stdin to get input from the user, it immediately reaches end-of-file and reports an error.
One solution, at least if you don't expect multiple debuggers to run at the same time, is to reopen stdin in the child process. That can be done by setting sys.stdin to open(0), which always opens the active terminal. This in fact is what the ForkedPdb solution does, but it can be done more simply and in an os-independent manner like this:
import multiprocessing, sys
def main():
process = multiprocessing.Process(target=worker)
process.start()
process.join()
def worker():
# Python automatically closes sys.stdin for the subprocess, so we reopen
# stdin. This enables pdb to connect to the terminal and accept commands.
# See https://stackoverflow.com/a/30149635/3830997.
sys.stdin = open(0) # or os.fdopen(0)
print("Hello from the subprocess.")
breakpoint() # or import pdb; pdb.set_trace()
print("Exited from breakpoint in the subprocess.")
if __name__ == '__main__':
main()
If you are on a supported platform, try DTrace. Most of the BSD / Solaris / OS X family support DTrace.
Here is an intro by the author. You can use Dtrace to debug just about anything.
Here is a SO post on learning DTrace.
Related
Right now, I'm using subprocess to run a long-running job in the background. For multiple reasons (PyInstaller + AWS CLI) I can't use subprocess anymore.
Is there an easy way to achieve the same thing as below ? Running a long running python function in a multiprocess pool (or something else) and do real time processing of stdout/stderr ?
import subprocess
process = subprocess.Popen(
["python", "long-job.py"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
while True:
out = process.stdout.read(2000).decode()
if not out:
err = process.stderr.read().decode()
else:
err = ""
if (out == "" or err == "") and process.poll() is not None:
break
live_stdout_process(out)
Thanks
getting it cross platform is messy .... first of all windows implementation of non-blocking pipe is not user friendly or portable.
one option is to just have your application read its command line arguments and conditionally execute a file, and you get to use subprocess since you will be launching yourself with different argument.
but to keep it to multiprocessing :
the output must be logged to queues instead of pipes.
you need the child to execute a python file, this can be done using runpy to execute the file as __main__.
this runpy function should run under a multiprocessing child, this child must first redirect its stdout and stderr in the initializer.
when an error happens, your main application must catch it .... but if it is too busy reading the output it won't be able to wait for the error, so a child thread has to start the multiprocess and wait for the error.
the main process has to create the queues and launch the child thread and read the output.
putting it all together:
import multiprocessing
from multiprocessing import Queue
import sys
import concurrent.futures
import threading
import traceback
import runpy
import time
class StdoutQueueWrapper:
def __init__(self,queue:Queue):
self._queue = queue
def write(self,text):
self._queue.put(text)
def flush(self):
pass
def function_to_run():
# runpy.run_path("long-job.py",run_name="__main__") # run long-job.py
print("hello") # print something
raise ValueError # error out
def initializer(stdout_queue: Queue,stderr_queue: Queue):
sys.stdout = StdoutQueueWrapper(stdout_queue)
sys.stderr = StdoutQueueWrapper(stderr_queue)
def thread_function(child_stdout_queue,child_stderr_queue):
with concurrent.futures.ProcessPoolExecutor(1, initializer=initializer,
initargs=(child_stdout_queue, child_stderr_queue)) as pool:
result = pool.submit(function_to_run)
try:
result.result()
except Exception as e:
child_stderr_queue.put(traceback.format_exc())
if __name__ == "__main__":
child_stdout_queue = multiprocessing.Queue()
child_stderr_queue = multiprocessing.Queue()
child_thread = threading.Thread(target=thread_function,args=(child_stdout_queue,child_stderr_queue),daemon=True)
child_thread.start()
while True:
while not child_stdout_queue.empty():
var = child_stdout_queue.get()
print(var,end='')
while not child_stderr_queue.empty():
var = child_stderr_queue.get()
print(var,end='')
if not child_thread.is_alive():
break
time.sleep(0.01) # check output every 0.01 seconds
Note that a direct consequence of running as a multiprocess is that if the child runs into a segmentation fault or some unrecoverable error the parent will also die, hencing running yourself under subprocess might seem a better option if segfaults are expected.
I'd like to run a command and store the results in Redis as it is run.
Although the command for the demo is ls /etc, in real life I'd like to use this for long running processes.
I've written out some demo code to show the idea.
Unfortunately this code when run insists on a fileno, and doesn't work, even if I simulate one. How can I accomplish this?
import subprocess
import redis
class RedisFile:
def __init__(self, key):
self.key = key
self.redis = redis.StrictRedis()
print("inited RedisFile with key:", key)
def write(self, value):
self.redis.append(self.key, value)
def main():
out = RedisFile("out")
error = RedisFile("error")
proc = subprocess.Popen(["ls", "/etc"],
stdout=out,
stderr=error,
bufsize=0
)
main()
you should replace sys.stdout with custom stdout.
one example like this:
from unittest.mock import patch
with patch('sys.stdout', your_stdout),patch('sys.stderr', your_stderr):
do_something
Are there any built-in ways to have different threads have different destinations for print() and similar?
I'm exploring the creation of an interactive Python environment, so I can't just use print() from module spamegg. It has to be the globally available one with no arguments.
You can replace sys.stdout with an object that checks the current thread and writes to the appropriate file:
import sys, threading
class CustomOutput(object):
def __init__(self):
# the "softspace" slot is used internally by Python's print
# to keep track of whether to prepend space to the
# printed expression
self.softspace = 0
self._old_stdout = None
def activate(self):
self._old_stdout = sys.stdout
sys.stdout = self
def deactivate(self):
sys.stdout = self._old_stdout
self._old_stdout = None
def write(self, s):
# actually write to an open file obtained from an attribute
# on the current thread
threading.current_thread().open_file.write(s)
def writelines(self, seq):
for s in seq:
self.write(s)
def close(self):
pass
def flush(self):
pass
def isatty(self):
return False
It is possible to do what you're asking, although it's complicated and clunky and possibly not portable, and I don't think it's what you want to do.
Your objection to just using spamegg.print is:
I'm exploring the creation of an interactive Python environment, so I can't just use print() from module spamegg. It has to be the globally available one with no arguments.
But the solution to that is easy: Just use print from module spamegg in your code, and from spamegg import print in the interactive interpreter. That's all there is to it.
For that matter, there's no good reason this even needs to be called print in the first place. If all of your code used some other output function with a different name, you could do the same thing in the interactive interpreter.
But how does that let each thread have a different destination?
The easy way to do that is to just look up the destination in a threading.local().
But if you really want to do both parts of this the hard way, you can.
To do the global print the hard way, you can either have spamegg replace the builtin print instead of just giving you a way to shadow it, or have it replace sys.stdout, so the builtin print with default arguments will print somewhere else.
import builtins
_real_print = builtins.print
def _print(*args, **kwargs):
kwargs.setdefault('file', my_output_destination)
_real_print(*args, **kwargs)
builtins.print = _print
import io
import sys
class MyStdOut(io.TextIOBase):
# ... __init__, write, etc.
sys.stdout = MyStdOut()
That still requires having MyStdOut use a thread-local target.
Alternatively, you can compile or wrap each thread function in its own custom globals environment that replaces __builtins__ and/or sys from the default, allowing you to give a different one to each thread from the start. For example:
from functools import partial
from threading import Thread
from types import FunctionType
class MyThread(Thread):
def __init__(self, group=None, target=None, *args, **kwargs):
if target:
g = target.__globals__.copy()
g['__builtins__'] = g['__builtins__'].copy()
output = make_output_for_new_thread()
g['__builtins__']['print'] = partial(print, file=output)
target = FunctionType(thread_func.__code__, g, thread_func.__name__,
thread_func.__defaults__, thread_func.__closure__)
super().__init__(self, group, target, *args, **kwargs)
I might have solution for you, but it's quite more complicated than just print.
class ClusteredLogging(object):
'''
Class gathers all logs performed inside with statement and flush
it to mainHandler on exit at once.
Good for multithreaded applications that has to log several
lines at once.
'''
def __init__(self, mainHandler, formatter):
self.mainHandler = mainHandler
self.formatter = formatter
self.buffer = StringIO()
self.handler = logging.StreamHandler(self.buffer)
self.handler.setFormatter(formatter)
def __enter__(self):
rootLogger = logging.getLogger()
rootLogger.addHandler(self.handler)
def __exit__(self, t, value, tb):
rootLogger = logging.getLogger()
rootLogger.removeHandler(self.handler)
self.handler.flush()
self.buffer.flush()
rootLogger.addHandler(self.mainHandler)
logging.info(self.buffer.getvalue().strip())
rootLogger.removeHandler(self.mainHandler)
Using this, you can create log handler for each thread and configure them to store logs to different locations.
Keep in mind that this is developed with slightly different goal in mind (see comments) but you can adapt it by taking handler juggling feature of ClusteredLogging as a start.
And some test code:
import concurrent.futures
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import logging
import sys
# put ClusteredLogging here
if __name__ == "__main__":
formatter = logging.Formatter('%(asctime)s %(levelname)8s\t%(message)s')
onlyMessageFormatter = logging.Formatter("%(message)s")
mainHandler = logging.StreamHandler(sys.stdout)
mainHandler.setFormatter(onlyMessageFormatter)
mainHandler.setLevel(logging.DEBUG)
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
def logSomethingLong(label):
with ClusteredLogging(mainHandler, formatter):
for i in range(15):
logging.info(label + " " + str(i))
labels = ("TEST", "EXPERIMENT", "TRIAL")
executor = concurrent.futures.ProcessPoolExecutor()
futures = [executor.submit(logSomethingLong, label) for label in labels]
concurrent.futures.wait(futures)
I have a test harness (written in Python) that needs to shut down the program under test (written in C) by sending it ^C. On Unix,
proc.send_signal(signal.SIGINT)
works perfectly. On Windows, that throws an error ("signal 2 is not supported" or something like that). I am using Python 2.7 for Windows, so I have the impression that I should be able to do instead
proc.send_signal(signal.CTRL_C_EVENT)
but this doesn't do anything at all. What do I have to do? This is the code that creates the subprocess:
# Windows needs an extra argument passed to subprocess.Popen,
# but the constant isn't defined on Unix.
try: kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
except AttributeError: pass
proc = subprocess.Popen(argv,
stdin=open(os.path.devnull, "r"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
There is a solution by using a wrapper (as described in the link Vinay provided) which is started in a new console window with the Windows start command.
Code of the wrapper:
#wrapper.py
import subprocess, time, signal, sys, os
def signal_handler(signal, frame):
time.sleep(1)
print 'Ctrl+C received in wrapper.py'
signal.signal(signal.SIGINT, signal_handler)
print "wrapper.py started"
subprocess.Popen("python demo.py")
time.sleep(3) #Replace with your IPC code here, which waits on a fire CTRL-C request
os.kill(signal.CTRL_C_EVENT, 0)
Code of the program catching CTRL-C:
#demo.py
import signal, sys, time
def signal_handler(signal, frame):
print 'Ctrl+C received in demo.py'
time.sleep(1)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
print 'demo.py started'
#signal.pause() # does not work under Windows
while(True):
time.sleep(1)
Launch the wrapper like e.g.:
PythonPrompt> import subprocess
PythonPrompt> subprocess.Popen("start python wrapper.py", shell=True)
You need to add some IPC code which allows you to control the wrapper firing the os.kill(signal.CTRL_C_EVENT, 0) command. I used sockets for this purpose in my application.
Explanation:
Preinformation
send_signal(CTRL_C_EVENT) does not work because CTRL_C_EVENT is only for os.kill. [REF1]
os.kill(CTRL_C_EVENT) sends the signal to all processes running in the current cmd window [REF2]
Popen(..., creationflags=CREATE_NEW_PROCESS_GROUP) does not work because CTRL_C_EVENT is ignored for process groups. [REF2]
This is a bug in the python documentation [REF3]
Implemented solution
Let your program run in a different cmd window with the Windows shell command start.
Add a CTRL-C request wrapper between your control application and the application which should get the CTRL-C signal. The wrapper will run in the same cmd window as the application which should get the CTRL-C signal.
The wrapper will shutdown itself and the program which should get the CTRL-C signal by sending all processes in the cmd window the CTRL_C_EVENT.
The control program should be able to request the wrapper to send the CTRL-C signal. This might be implemnted trough IPC means, e.g. sockets.
Helpful posts were:
I had to remove the http in front of the links because I'm a new user and are not allowed to post more than two links.
http://social.msdn.microsoft.com/Forums/en-US/windowsgeneraldevelopmentissues/thread/dc9586ab-1ee8-41aa-a775-cf4828ac1239/#6589714f-12a7-447e-b214-27372f31ca11
Can I send a ctrl-C (SIGINT) to an application on Windows?
Sending SIGINT to a subprocess of python
http://bugs.python.org/issue9524
http://ss64.com/nt/start.html
http://objectmix.com/python/387639-sending-cntrl-c.html#post1443948
Update: IPC based CTRL-C Wrapper
Here you can find a selfwritten python module providing a CTRL-C wrapping including a socket based IPC.
The syntax is quite similiar to the subprocess module.
Usage:
>>> import winctrlc
>>> p1 = winctrlc.Popen("python demo.py")
>>> p2 = winctrlc.Popen("python demo.py")
>>> p3 = winctrlc.Popen("python demo.py")
>>> p2.send_ctrl_c()
>>> p1.send_ctrl_c()
>>> p3.send_ctrl_c()
Code
import socket
import subprocess
import time
import random
import signal, os, sys
class Popen:
_port = random.randint(10000, 50000)
_connection = ''
def _start_ctrl_c_wrapper(self, cmd):
cmd_str = "start \"\" python winctrlc.py "+"\""+cmd+"\""+" "+str(self._port)
subprocess.Popen(cmd_str, shell=True)
def _create_connection(self):
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect(('localhost', self._port))
def send_ctrl_c(self):
self._connection.send(Wrapper.TERMINATION_REQ)
self._connection.close()
def __init__(self, cmd):
self._start_ctrl_c_wrapper(cmd)
self._create_connection()
class Wrapper:
TERMINATION_REQ = "Terminate with CTRL-C"
def _create_connection(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', port))
s.listen(1)
conn, addr = s.accept()
return conn
def _wait_on_ctrl_c_request(self, conn):
while True:
data = conn.recv(1024)
if data == self.TERMINATION_REQ:
ctrl_c_received = True
break
else:
ctrl_c_received = False
return ctrl_c_received
def _cleanup_and_fire_ctrl_c(self, conn):
conn.close()
os.kill(signal.CTRL_C_EVENT, 0)
def _signal_handler(self, signal, frame):
time.sleep(1)
sys.exit(0)
def __init__(self, cmd, port):
signal.signal(signal.SIGINT, self._signal_handler)
subprocess.Popen(cmd)
conn = self._create_connection(port)
ctrl_c_req_received = self._wait_on_ctrl_c_request(conn)
if ctrl_c_req_received:
self._cleanup_and_fire_ctrl_c(conn)
else:
sys.exit(0)
if __name__ == "__main__":
command_string = sys.argv[1]
port_no = int(sys.argv[2])
Wrapper(command_string, port_no)
New answer:
When you create the process, use the flag CREATE_NEW_PROCESS_GROUP. And then you can send CTRL_BREAK to the child process. The default behavior is the same as CTRL_C, except that it won't affect the calling process.
Old answer:
My solution also involves a wrapper script, but it does not need IPC, so it is far simpler to use.
The wrapper script first detaches itself from any existing console, then attach to the target console, then files the Ctrl-C event.
import ctypes
import sys
kernel = ctypes.windll.kernel32
pid = int(sys.argv[1])
kernel.FreeConsole()
kernel.AttachConsole(pid)
kernel.SetConsoleCtrlHandler(None, 1)
kernel.GenerateConsoleCtrlEvent(0, 0)
sys.exit(0)
The initial process must be launched in a separate console so that the Ctrl-C event will not leak. Example
p = subprocess.Popen(['some_command'], creationflags=subprocess.CREATE_NEW_CONSOLE)
# Do something else
subprocess.check_call([sys.executable, 'ctrl_c.py', str(p.pid)]) # Send Ctrl-C
where I named the wrapper script as ctrl_c.py.
Try calling the GenerateConsoleCtrlEvent function using ctypes. As you are creating a new process group, the process group ID should be the same as the pid. So, something like
import ctypes
ctypes.windll.kernel32.GenerateConsoleCtrlEvent(0, proc.pid) # 0 => Ctrl-C
should work.
Update: You're right, I missed that part of the detail. Here's a post which suggests a possible solution, though it's a bit kludgy. More details are in this answer.
Here is a fully working example which doesn't need any modification in the target script.
This overrides the sitecustomize module so it might no be suitable for every scenario. However, in this case you could use a *.pth file in site-packages to execute code at the subprocess startup (see https://nedbatchelder.com/blog/201001/running_code_at_python_startup.html).
Edit This works only out of the box for subprocesses in Python. Other processes have to manually call SetConsoleCtrlHandler(NULL, FALSE).
main.py
import os
import signal
import subprocess
import sys
import time
def main():
env = os.environ.copy()
env['PYTHONPATH'] = '%s%s%s' % ('custom-site', os.pathsep,
env.get('PYTHONPATH', ''))
proc = subprocess.Popen(
[sys.executable, 'sub.py'],
env=env,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
)
time.sleep(1)
proc.send_signal(signal.CTRL_C_EVENT)
proc.wait()
if __name__ == '__main__':
main()
custom-site\sitecustomize.py
import ctypes
import sys
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
if not kernel32.SetConsoleCtrlHandler(None, False):
print('SetConsoleCtrlHandler Error: ', ctypes.get_last_error(),
file=sys.stderr)
sub.py
import atexit
import time
def cleanup():
print ('cleanup')
atexit.register(cleanup)
while True:
time.sleep(1)
I have a single file solution with the following advantages:
- No external libraries. (Other than ctypes)
- Doesn't require the process to be opened in a specific way.
The solution is adapted from this stack overflow post, but I think it's much more elegant in python.
import os
import signal
import subprocess
import sys
import time
# Terminates a Windows console app sending Ctrl-C
def terminateConsole(processId: int, timeout: int = None) -> bool:
currentFilePath = os.path.abspath(__file__)
# Call the below code in a separate process. This is necessary due to the FreeConsole call.
try:
code = subprocess.call('{} {} {}'.format(sys.executable, currentFilePath, processId), timeout=timeout)
if code == 0: return True
except subprocess.TimeoutExpired:
pass
# Backup plan
subprocess.call('taskkill /F /PID {}'.format(processId))
if __name__ == '__main__':
pid = int(sys.argv[1])
import ctypes
kernel = ctypes.windll.kernel32
r = kernel.FreeConsole()
if r == 0: exit(-1)
r = kernel.AttachConsole(pid)
if r == 0: exit(-1)
r = kernel.SetConsoleCtrlHandler(None, True)
if r == 0: exit(-1)
r = kernel.GenerateConsoleCtrlEvent(0, 0)
if r == 0: exit(-1)
r = kernel.FreeConsole()
if r == 0: exit(-1)
# use tasklist to wait while the process is still alive.
while True:
time.sleep(1)
# We pass in stdin as PIPE because there currently is no Console, and stdin is currently invalid.
searchOutput: bytes = subprocess.check_output('tasklist /FI "PID eq {}"'.format(pid), stdin=subprocess.PIPE)
if str(pid) not in searchOutput.decode(): break;
# The following two commands are not needed since we're about to close this script.
# You can leave them here if you want to do more console operations.
r = kernel.SetConsoleCtrlHandler(None, False)
if r == 0: exit(-1)
r = kernel.AllocConsole()
if r == 0: exit(-1)
exit(0)
For those interested in a "quick fix", I've made a console-ctrl package based on Siyuan Ren's answer to make it even easier to use.
Simply run pip install console-ctrl, and in your code:
import console_ctrl
import subprocess
# Start some command IN A SEPARATE CONSOLE
p = subprocess.Popen(['some_command'], creationflags=subprocess.CREATE_NEW_CONSOLE)
# ...
# Stop the target process
console_ctrl.send_ctrl_c(p.pid)
I have been trying this but for some reason ctrl+break works, and ctrl+c does not. So using os.kill(signal.CTRL_C_EVENT, 0) fails, but doing os.kill(signal.CTRL_C_EVENT, 1) works. I am told this has something to do with the create process owner being the only one that can pass a ctrl c? Does that make sense?
To clarify, while running fio manually in a command window it appears to be running as expected. Using the CTRL + BREAK breaks without storing the log as expected and CTRL + C finishes writing to the file also as expected. The problem appears to be in the signal for the CTRL_C_EVENT.
It almost appears to be a bug in Python but may rather be a bug in Windows. Also one other thing, I had a cygwin version running and sending the ctrl+c in python there worked as well, but then again we aren't really running native windows there.
example:
import subprocess, time, signal, sys, os
command = '"C:\\Program Files\\fio\\fio.exe" --rw=randrw --bs=1M --numjobs=8 --iodepth=64 --direct=1 ' \
'--sync=0 --ioengine=windowsaio --name=test --loops=10000 ' \
'--size=99901800 --rwmixwrite=100 --do_verify=0 --filename=I\\:\\test ' \
'--thread --output=C:\\output.txt'
def signal_handler(signal, frame):
time.sleep(1)
print 'Ctrl+C received in wrapper.py'
signal.signal(signal.SIGINT, signal_handler)
print 'command Starting'
subprocess.Popen(command)
print 'command started'
time.sleep(15)
print 'Timeout Completed'
os.kill(signal.CTRL_C_EVENT, 0)
(This was supposed to be a comment under Siyuan Ren's answer but I don't have enough rep so here's a slightly longer version.)
If you don't want to create any helper scripts you can use:
p = subprocess.Popen(['some_command'], creationflags=subprocess.CREATE_NEW_CONSOLE)
# Do something else
subprocess.run([
sys.executable,
"-c",
"import ctypes, sys;"
"kernel = ctypes.windll.kernel32;"
"pid = int(sys.argv[1]);"
"kernel.FreeConsole();"
"kernel.AttachConsole(pid);"
"kernel.SetConsoleCtrlHandler(None, 1);"
"kernel.GenerateConsoleCtrlEvent(0, 0);"
"sys.exit(0)",
str(p.pid)
]) # Send Ctrl-C
But it won't work if you use PyInstaller - sys.executable points to your executable, not the Python interpreter. To solve that issue I've created a tiny utility for Windows: https://github.com/anadius/ctrlc
Now you can send the Ctrl+C event with:
subprocess.run(["ctrlc", str(p.pid)])
I have this Python based service daemon which is doing a lot of multiplexed IO (select).
From another script (also Python) I want to query this service daemon about status/information and/or control the processing (e.g. pause it, shut it down, change some parameters, etc).
What is the best way to send control messages ("from now on you process like this!") and query processed data ("what was the result of that?") using python?
I read somewhere that named pipes might work, but don't know that much about named pipes, especially in python - and whether there are any better alternatives.
Both the background service daemon AND the frontend will be programmed by me, so all options are open :)
I am using Linux.
Pipes and Named pipes are good solution to communicate between different processes.
Pipes work like shared memory buffer but has an interface that mimics a simple file on each of two ends. One process writes data on one end of the pipe, and another reads that data on the other end.
Named pipes are similar to above , except that this pipe is actually associated with a real file in your computer.
More details at
http://www.softpanorama.org/Scripting/pipes.shtml
In Python, named pipe files are created with the os.mkfifo call
x = os.mkfifo(filename)
In child and parent open this pipe as file
out = os.open(filename, os.O_WRONLY)
in = open(filename, 'r')
To write
os.write(out, 'xxxx')
To read
lines = in.readline( )
Edit: Adding links from SO
Create a temporary FIFO (named pipe) in Python?
https://stackoverflow.com/search?q=python+named+pipes
You may want to read more on "IPC and Python"
http://www.freenetpages.co.uk/hp/alan.gauld/tutipc.htm
The best way to do IPC is using message Queue in python as bellow
server process server.py (run this before running client.py and interact.py)
from multiprocessing.managers import BaseManager
import Queue
queue1 = Queue.Queue()
queue2 = Queue.Queue()
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1', callable=lambda:queue1)
QueueManager.register('get_queue2', callable=lambda:queue2)
m = QueueManager(address=('', 50000), authkey='abracadabra')
s = m.get_server()
s.serve_forever()
The inter-actor which is for I/O interact.py
from multiprocessing.managers import BaseManager
import threading
import sys
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1')
QueueManager.register('get_queue2')
m = QueueManager(address=('localhost', 50000),authkey='abracadabra')
m.connect()
queue1 = m.get_queue1()
queue2 = m.get_queue2()
def read():
while True:
sys.stdout.write(queue2.get())
def write():
while True:
queue1.put(sys.stdin.readline())
threads = []
threadr = threading.Thread(target=read)
threadr.start()
threads.append(threadr)
threadw = threading.Thread(target=write)
threadw.start()
threads.append(threadw)
for thread in threads:
thread.join()
The client program Client.py
from multiprocessing.managers import BaseManager
import sys
import string
import os
class QueueManager(BaseManager): pass
QueueManager.register('get_queue1')
QueueManager.register('get_queue2')
m = QueueManager(address=('localhost', 50000), authkey='abracadabra')
m.connect()
queue1 = m.get_queue1()
queue2 = m.get_queue2()
class RedirectOutput:
def __init__(self, stdout):
self.stdout = stdout
def write(self, s):
queue2.put(s)
class RedirectInput:
def __init__(self, stdin):
self.stdin = stdin
def readline(self):
return queue1.get()
# redirect standard output
sys.stdout = RedirectOutput(sys.stdout)
sys.stdin = RedirectInput(sys.stdin)
# The test program which will take input and produce output
Text=raw_input("Enter Text:")
print "you have entered:",Text
def x():
while True:
x= raw_input("Enter 'exit' to end and some thing else to continue")
print x
if 'exit' in x:
break
x()
this can be used to communicate between two process in network or on same machine
remember that inter-actor and server process will not terminate until you manually kill it.