writing the console output to a file in python [duplicate] - python

How do I redirect stdout to an arbitrary file in Python?
When a long-running Python script (e.g, web application) is started from within the ssh session and backgounded, and the ssh session is closed, the application will raise IOError and fail the moment it tries to write to stdout. I needed to find a way to make the application and modules output to a file rather than stdout to prevent failure due to IOError. Currently, I employ nohup to redirect output to a file, and that gets the job done, but I was wondering if there was a way to do it without using nohup, out of curiosity.
I have already tried sys.stdout = open('somefile', 'w'), but this does not seem to prevent some external modules from still outputting to terminal (or maybe the sys.stdout = ... line did not fire at all). I know it should work from simpler scripts I've tested on, but I also didn't have time yet to test on a web application yet.

If you want to do the redirection within the Python script, setting sys.stdout to a file object does the trick:
# for python3
import sys
with open('file', 'w') as sys.stdout:
print('test')
A far more common method is to use shell redirection when executing (same on Windows and Linux):
$ python3 foo.py > file

There is contextlib.redirect_stdout() function in Python 3.4+:
from contextlib import redirect_stdout
with open('help.txt', 'w') as f:
with redirect_stdout(f):
print('it now prints to `help.text`')
It is similar to:
import sys
from contextlib import contextmanager
#contextmanager
def redirect_stdout(new_target):
old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout
try:
yield new_target # run some code with the replaced stdout
finally:
sys.stdout = old_target # restore to the previous value
that can be used on earlier Python versions. The latter version is not reusable. It can be made one if desired.
It doesn't redirect the stdout at the file descriptors level e.g.:
import os
from contextlib import redirect_stdout
stdout_fd = sys.stdout.fileno()
with open('output.txt', 'w') as f, redirect_stdout(f):
print('redirected to a file')
os.write(stdout_fd, b'not redirected')
os.system('echo this also is not redirected')
b'not redirected' and 'echo this also is not redirected' are not redirected to the output.txt file.
To redirect at the file descriptor level, os.dup2() could be used:
import os
import sys
from contextlib import contextmanager
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
#contextmanager
def stdout_redirected(to=os.devnull, stdout=None):
if stdout is None:
stdout = sys.stdout
stdout_fd = fileno(stdout)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stdout_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd) # $ exec > to
try:
yield stdout # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
stdout.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
The same example works now if stdout_redirected() is used instead of redirect_stdout():
import os
import sys
stdout_fd = sys.stdout.fileno()
with open('output.txt', 'w') as f, stdout_redirected(f):
print('redirected to a file')
os.write(stdout_fd, b'it is redirected now\n')
os.system('echo this is also redirected')
print('this is goes back to stdout')
The output that previously was printed on stdout now goes to output.txt as long as stdout_redirected() context manager is active.
Note: stdout.flush() does not flush
C stdio buffers on Python 3 where I/O is implemented directly on read()/write() system calls. To flush all open C stdio output streams, you could call libc.fflush(None) explicitly if some C extension uses stdio-based I/O:
try:
import ctypes
from ctypes.util import find_library
except ImportError:
libc = None
else:
try:
libc = ctypes.cdll.msvcrt # Windows
except OSError:
libc = ctypes.cdll.LoadLibrary(find_library('c'))
def flush(stream):
try:
libc.fflush(None)
stream.flush()
except (AttributeError, ValueError, IOError):
pass # unsupported
You could use stdout parameter to redirect other streams, not only sys.stdout e.g., to merge sys.stderr and sys.stdout:
def merged_stderr_stdout(): # $ exec 2>&1
return stdout_redirected(to=sys.stdout, stdout=sys.stderr)
Example:
from __future__ import print_function
import sys
with merged_stderr_stdout():
print('this is printed on stdout')
print('this is also printed on stdout', file=sys.stderr)
Note: stdout_redirected() mixes buffered I/O (sys.stdout usually) and unbuffered I/O (operations on file descriptors directly). Beware, there could be buffering issues.
To answer, your edit: you could use python-daemon to daemonize your script and use logging module (as #erikb85 suggested) instead of print statements and merely redirecting stdout for your long-running Python script that you run using nohup now.

you can try this too much better
import sys
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
sys.stdout = Logger("yourlogfilename.txt")
print "Hello world !" # this is should be saved in yourlogfilename.txt

The other answers didn't cover the case where you want forked processes to share your new stdout.
To do that:
from os import open, close, dup, O_WRONLY
old = dup(1)
close(1)
open("file", O_WRONLY) # should open on 1
..... do stuff and then restore
close(1)
dup(old) # should dup to 1
close(old) # get rid of left overs

Quoted from PEP 343 -- The "with" Statement (added import statement):
Redirect stdout temporarily:
import sys
from contextlib import contextmanager
#contextmanager
def stdout_redirected(new_stdout):
save_stdout = sys.stdout
sys.stdout = new_stdout
try:
yield None
finally:
sys.stdout = save_stdout
Used as follows:
with open(filename, "w") as f:
with stdout_redirected(f):
print "Hello world"
This isn't thread-safe, of course, but neither is doing this same dance manually. In single-threaded programs (for example in scripts) it is a popular way of doing things.

import sys
sys.stdout = open('stdout.txt', 'w')

Here is a variation of Yuda Prawira answer:
implement flush() and all the file attributes
write it as a contextmanager
capture stderr also
.
import contextlib, sys
#contextlib.contextmanager
def log_print(file):
# capture all outputs to a log file while still printing it
class Logger:
def __init__(self, file):
self.terminal = sys.stdout
self.log = file
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def __getattr__(self, attr):
return getattr(self.terminal, attr)
logger = Logger(file)
_stdout = sys.stdout
_stderr = sys.stderr
sys.stdout = logger
sys.stderr = logger
try:
yield logger.log
finally:
sys.stdout = _stdout
sys.stderr = _stderr
with log_print(open('mylogfile.log', 'w')):
print('hello world')
print('hello world on stderr', file=sys.stderr)
# you can capture the output to a string with:
# with log_print(io.StringIO()) as log:
# ....
# print('[captured output]', log.getvalue())

You need a terminal multiplexer like either tmux or GNU screen
I'm surprised that a small comment by Ryan Amos' to the original question is the only mention of a solution far preferable to all the others on offer, no matter how clever the python trickery may be and how many upvotes they've received. Further to Ryan's comment, tmux is a nice alternative to GNU screen.
But the principle is the same: if you ever find yourself wanting to leave a terminal job running while you log-out, head to the cafe for a sandwich, pop to the bathroom, go home (etc) and then later, reconnect to your terminal session from anywhere or any computer as though you'd never been away, terminal multiplexers are the answer. Think of them as VNC or remote desktop for terminal sessions. Anything else is a workaround. As a bonus, when the boss and/or partner comes in and you inadvertently ctrl-w / cmd-w your terminal window instead of your browser window with its dodgy content, you won't have lost the last 18 hours-worth of processing!

Based on this answer: https://stackoverflow.com/a/5916874/1060344, here is another way I figured out which I use in one of my projects. For whatever you replace sys.stderr or sys.stdout with, you have to make sure that the replacement complies with file interface, especially if this is something you are doing because stderr/stdout are used in some other library that is not under your control. That library may be using other methods of file object.
Check out this way where I still let everything go do stderr/stdout (or any file for that matter) and also send the message to a log file using Python's logging facility (but you can really do anything with this):
class FileToLogInterface(file):
'''
Interface to make sure that everytime anything is written to stderr, it is
also forwarded to a file.
'''
def __init__(self, *args, **kwargs):
if 'cfg' not in kwargs:
raise TypeError('argument cfg is required.')
else:
if not isinstance(kwargs['cfg'], config.Config):
raise TypeError(
'argument cfg should be a valid '
'PostSegmentation configuration object i.e. '
'postsegmentation.config.Config')
self._cfg = kwargs['cfg']
kwargs.pop('cfg')
self._logger = logging.getlogger('access_log')
super(FileToLogInterface, self).__init__(*args, **kwargs)
def write(self, msg):
super(FileToLogInterface, self).write(msg)
self._logger.info(msg)

Programs written in other languages (e.g. C) have to do special magic (called double-forking) expressly to detach from the terminal (and to prevent zombie processes). So, I think the best solution is to emulate them.
A plus of re-executing your program is, you can choose redirections on the command-line, e.g. /usr/bin/python mycoolscript.py 2>&1 1>/dev/null
See this post for more info: What is the reason for performing a double fork when creating a daemon?

I know this question is answered (using python abc.py > output.log 2>&1 ), but I still have to say:
When writing your program, don't write to stdout. Always use logging to output whatever you want. That would give you a lot of freedom in the future when you want to redirect, filter, rotate the output files.

As mentioned by #jfs, most solutions will not properly handle some types of stdout output such as that from C extensions. There is a module that takes care of all this on PyPI called wurlitzer. You just need its sys_pipes context manager. It's as easy as using:
from contextlib import redirect_stdout
import os
from wurlitzer import sys_pipes
log = open("test.log", "a")
with redirect_stdout(log), sys_pipes():
print("print statement")
os.system("echo echo call")

Based on previous answers on this post I wrote this class for myself as a more compact and flexible way of redirecting the output of pieces of code - here just to a list - and ensure that the output is normalized afterwards.
class out_to_lt():
def __init__(self, lt):
if type(lt) == list:
self.lt = lt
else:
raise Exception("Need to pass a list")
def __enter__(self):
import sys
self._sys = sys
self._stdout = sys.stdout
sys.stdout = self
return self
def write(self,txt):
self.lt.append(txt)
def __exit__(self, type, value, traceback):
self._sys.stdout = self._stdout
Used as:
lt = []
with out_to_lt(lt) as o:
print("Test 123\n\n")
print(help(str))
Updating. Just found a scenario where I had to add two extra methods, but was easy to adapt:
class out_to_lt():
...
def isatty(self):
return True #True: You're running in a real terminal, False:You're being piped, redirected, cron
def flush(self):
pass

There are other versions using context but nothing this simple. I actually just googled to double check it would work and was surprised not to see it, so for other people looking for a quick solution that is safe and directed at only the code within the context block, here it is:
import sys
with open('test_file', 'w') as sys.stdout:
print('Testing 1 2 3')
Tested like so:
$ cat redirect_stdout.py
import sys
with open('test_file', 'w') as sys.stdout:
print('Testing 1 2 3')
$ python redirect_stdout.py
$ cat test_file
Testing 1 2 3

Related

Jupyter Notebook exports the output of a cell to a file in REAL TIME [duplicate]

How do I redirect stdout to an arbitrary file in Python?
When a long-running Python script (e.g, web application) is started from within the ssh session and backgounded, and the ssh session is closed, the application will raise IOError and fail the moment it tries to write to stdout. I needed to find a way to make the application and modules output to a file rather than stdout to prevent failure due to IOError. Currently, I employ nohup to redirect output to a file, and that gets the job done, but I was wondering if there was a way to do it without using nohup, out of curiosity.
I have already tried sys.stdout = open('somefile', 'w'), but this does not seem to prevent some external modules from still outputting to terminal (or maybe the sys.stdout = ... line did not fire at all). I know it should work from simpler scripts I've tested on, but I also didn't have time yet to test on a web application yet.
If you want to do the redirection within the Python script, setting sys.stdout to a file object does the trick:
# for python3
import sys
with open('file', 'w') as sys.stdout:
print('test')
A far more common method is to use shell redirection when executing (same on Windows and Linux):
$ python3 foo.py > file
There is contextlib.redirect_stdout() function in Python 3.4+:
from contextlib import redirect_stdout
with open('help.txt', 'w') as f:
with redirect_stdout(f):
print('it now prints to `help.text`')
It is similar to:
import sys
from contextlib import contextmanager
#contextmanager
def redirect_stdout(new_target):
old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout
try:
yield new_target # run some code with the replaced stdout
finally:
sys.stdout = old_target # restore to the previous value
that can be used on earlier Python versions. The latter version is not reusable. It can be made one if desired.
It doesn't redirect the stdout at the file descriptors level e.g.:
import os
from contextlib import redirect_stdout
stdout_fd = sys.stdout.fileno()
with open('output.txt', 'w') as f, redirect_stdout(f):
print('redirected to a file')
os.write(stdout_fd, b'not redirected')
os.system('echo this also is not redirected')
b'not redirected' and 'echo this also is not redirected' are not redirected to the output.txt file.
To redirect at the file descriptor level, os.dup2() could be used:
import os
import sys
from contextlib import contextmanager
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
#contextmanager
def stdout_redirected(to=os.devnull, stdout=None):
if stdout is None:
stdout = sys.stdout
stdout_fd = fileno(stdout)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stdout_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd) # $ exec > to
try:
yield stdout # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
stdout.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
The same example works now if stdout_redirected() is used instead of redirect_stdout():
import os
import sys
stdout_fd = sys.stdout.fileno()
with open('output.txt', 'w') as f, stdout_redirected(f):
print('redirected to a file')
os.write(stdout_fd, b'it is redirected now\n')
os.system('echo this is also redirected')
print('this is goes back to stdout')
The output that previously was printed on stdout now goes to output.txt as long as stdout_redirected() context manager is active.
Note: stdout.flush() does not flush
C stdio buffers on Python 3 where I/O is implemented directly on read()/write() system calls. To flush all open C stdio output streams, you could call libc.fflush(None) explicitly if some C extension uses stdio-based I/O:
try:
import ctypes
from ctypes.util import find_library
except ImportError:
libc = None
else:
try:
libc = ctypes.cdll.msvcrt # Windows
except OSError:
libc = ctypes.cdll.LoadLibrary(find_library('c'))
def flush(stream):
try:
libc.fflush(None)
stream.flush()
except (AttributeError, ValueError, IOError):
pass # unsupported
You could use stdout parameter to redirect other streams, not only sys.stdout e.g., to merge sys.stderr and sys.stdout:
def merged_stderr_stdout(): # $ exec 2>&1
return stdout_redirected(to=sys.stdout, stdout=sys.stderr)
Example:
from __future__ import print_function
import sys
with merged_stderr_stdout():
print('this is printed on stdout')
print('this is also printed on stdout', file=sys.stderr)
Note: stdout_redirected() mixes buffered I/O (sys.stdout usually) and unbuffered I/O (operations on file descriptors directly). Beware, there could be buffering issues.
To answer, your edit: you could use python-daemon to daemonize your script and use logging module (as #erikb85 suggested) instead of print statements and merely redirecting stdout for your long-running Python script that you run using nohup now.
you can try this too much better
import sys
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
sys.stdout = Logger("yourlogfilename.txt")
print "Hello world !" # this is should be saved in yourlogfilename.txt
The other answers didn't cover the case where you want forked processes to share your new stdout.
To do that:
from os import open, close, dup, O_WRONLY
old = dup(1)
close(1)
open("file", O_WRONLY) # should open on 1
..... do stuff and then restore
close(1)
dup(old) # should dup to 1
close(old) # get rid of left overs
Quoted from PEP 343 -- The "with" Statement (added import statement):
Redirect stdout temporarily:
import sys
from contextlib import contextmanager
#contextmanager
def stdout_redirected(new_stdout):
save_stdout = sys.stdout
sys.stdout = new_stdout
try:
yield None
finally:
sys.stdout = save_stdout
Used as follows:
with open(filename, "w") as f:
with stdout_redirected(f):
print "Hello world"
This isn't thread-safe, of course, but neither is doing this same dance manually. In single-threaded programs (for example in scripts) it is a popular way of doing things.
import sys
sys.stdout = open('stdout.txt', 'w')
Here is a variation of Yuda Prawira answer:
implement flush() and all the file attributes
write it as a contextmanager
capture stderr also
.
import contextlib, sys
#contextlib.contextmanager
def log_print(file):
# capture all outputs to a log file while still printing it
class Logger:
def __init__(self, file):
self.terminal = sys.stdout
self.log = file
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def __getattr__(self, attr):
return getattr(self.terminal, attr)
logger = Logger(file)
_stdout = sys.stdout
_stderr = sys.stderr
sys.stdout = logger
sys.stderr = logger
try:
yield logger.log
finally:
sys.stdout = _stdout
sys.stderr = _stderr
with log_print(open('mylogfile.log', 'w')):
print('hello world')
print('hello world on stderr', file=sys.stderr)
# you can capture the output to a string with:
# with log_print(io.StringIO()) as log:
# ....
# print('[captured output]', log.getvalue())
You need a terminal multiplexer like either tmux or GNU screen
I'm surprised that a small comment by Ryan Amos' to the original question is the only mention of a solution far preferable to all the others on offer, no matter how clever the python trickery may be and how many upvotes they've received. Further to Ryan's comment, tmux is a nice alternative to GNU screen.
But the principle is the same: if you ever find yourself wanting to leave a terminal job running while you log-out, head to the cafe for a sandwich, pop to the bathroom, go home (etc) and then later, reconnect to your terminal session from anywhere or any computer as though you'd never been away, terminal multiplexers are the answer. Think of them as VNC or remote desktop for terminal sessions. Anything else is a workaround. As a bonus, when the boss and/or partner comes in and you inadvertently ctrl-w / cmd-w your terminal window instead of your browser window with its dodgy content, you won't have lost the last 18 hours-worth of processing!
Based on this answer: https://stackoverflow.com/a/5916874/1060344, here is another way I figured out which I use in one of my projects. For whatever you replace sys.stderr or sys.stdout with, you have to make sure that the replacement complies with file interface, especially if this is something you are doing because stderr/stdout are used in some other library that is not under your control. That library may be using other methods of file object.
Check out this way where I still let everything go do stderr/stdout (or any file for that matter) and also send the message to a log file using Python's logging facility (but you can really do anything with this):
class FileToLogInterface(file):
'''
Interface to make sure that everytime anything is written to stderr, it is
also forwarded to a file.
'''
def __init__(self, *args, **kwargs):
if 'cfg' not in kwargs:
raise TypeError('argument cfg is required.')
else:
if not isinstance(kwargs['cfg'], config.Config):
raise TypeError(
'argument cfg should be a valid '
'PostSegmentation configuration object i.e. '
'postsegmentation.config.Config')
self._cfg = kwargs['cfg']
kwargs.pop('cfg')
self._logger = logging.getlogger('access_log')
super(FileToLogInterface, self).__init__(*args, **kwargs)
def write(self, msg):
super(FileToLogInterface, self).write(msg)
self._logger.info(msg)
Programs written in other languages (e.g. C) have to do special magic (called double-forking) expressly to detach from the terminal (and to prevent zombie processes). So, I think the best solution is to emulate them.
A plus of re-executing your program is, you can choose redirections on the command-line, e.g. /usr/bin/python mycoolscript.py 2>&1 1>/dev/null
See this post for more info: What is the reason for performing a double fork when creating a daemon?
I know this question is answered (using python abc.py > output.log 2>&1 ), but I still have to say:
When writing your program, don't write to stdout. Always use logging to output whatever you want. That would give you a lot of freedom in the future when you want to redirect, filter, rotate the output files.
As mentioned by #jfs, most solutions will not properly handle some types of stdout output such as that from C extensions. There is a module that takes care of all this on PyPI called wurlitzer. You just need its sys_pipes context manager. It's as easy as using:
from contextlib import redirect_stdout
import os
from wurlitzer import sys_pipes
log = open("test.log", "a")
with redirect_stdout(log), sys_pipes():
print("print statement")
os.system("echo echo call")
Based on previous answers on this post I wrote this class for myself as a more compact and flexible way of redirecting the output of pieces of code - here just to a list - and ensure that the output is normalized afterwards.
class out_to_lt():
def __init__(self, lt):
if type(lt) == list:
self.lt = lt
else:
raise Exception("Need to pass a list")
def __enter__(self):
import sys
self._sys = sys
self._stdout = sys.stdout
sys.stdout = self
return self
def write(self,txt):
self.lt.append(txt)
def __exit__(self, type, value, traceback):
self._sys.stdout = self._stdout
Used as:
lt = []
with out_to_lt(lt) as o:
print("Test 123\n\n")
print(help(str))
Updating. Just found a scenario where I had to add two extra methods, but was easy to adapt:
class out_to_lt():
...
def isatty(self):
return True #True: You're running in a real terminal, False:You're being piped, redirected, cron
def flush(self):
pass
There are other versions using context but nothing this simple. I actually just googled to double check it would work and was surprised not to see it, so for other people looking for a quick solution that is safe and directed at only the code within the context block, here it is:
import sys
with open('test_file', 'w') as sys.stdout:
print('Testing 1 2 3')
Tested like so:
$ cat redirect_stdout.py
import sys
with open('test_file', 'w') as sys.stdout:
print('Testing 1 2 3')
$ python redirect_stdout.py
$ cat test_file
Testing 1 2 3

How to check output of a sub process but also hide it? [duplicate]

NB. I have seen Log output of multiprocessing.Process - unfortunately, it doesn't answer this question.
I am creating a child process (on windows) via multiprocessing. I want all of the child process's stdout and stderr output to be redirected to a log file, rather than appearing at the console. The only suggestion I have seen is for the child process to set sys.stdout to a file. However, this does not effectively redirect all stdout output, due to the behaviour of stdout redirection on Windows.
To illustrate the problem, build a Windows DLL with the following code
#include <iostream>
extern "C"
{
__declspec(dllexport) void writeToStdOut()
{
std::cout << "Writing to STDOUT from test DLL" << std::endl;
}
}
Then create and run a python script like the following, which imports this DLL and calls the function:
from ctypes import *
import sys
print
print "Writing to STDOUT from python, before redirect"
print
sys.stdout = open("stdout_redirect_log.txt", "w")
print "Writing to STDOUT from python, after redirect"
testdll = CDLL("Release/stdout_test.dll")
testdll.writeToStdOut()
In order to see the same behaviour as me, it is probably necessary for the DLL to be built against a different C runtime than than the one Python uses. In my case, python is built with Visual Studio 2010, but my DLL is built with VS 2005.
The behaviour I see is that the console shows:
> stdout_test.py
Writing to STDOUT from python, before redirect
Writing to STDOUT from test DLL
While the file stdout_redirect_log.txt ends up containing:
Writing to STDOUT from python, after redirect
In other words, setting sys.stdout failed to redirect the stdout output generated by the DLL. This is unsurprising given the nature of the underlying APIs for stdout redirection in Windows. I have encountered this problem at the native/C++ level before and never found a way to reliably redirect stdout from within a process. It has to be done externally.
This is actually the very reason I am launching a child process - it's so that I can connect externally to its pipes and thus guarantee that I am intercepting all of its output. I can definitely do this by launching the process manually with pywin32, but I would very much like to be able to use the facilities of multiprocessing, in particular the ability to communicate with the child process via a multiprocessing Pipe object, in order to get progress updates. The question is whether there is any way to both use multiprocessing for its IPC facilities and to reliably redirect all of the child's stdout and stderr output to a file.
UPDATE: Looking at the source code for multiprocessing.Processs, it has a static member, _Popen, which looks like it can be used to override the class used to create the process. If it's set to None (default), it uses a multiprocessing.forking._Popen, but it looks like by saying
multiprocessing.Process._Popen = MyPopenClass
I could override the process creation. However, although I could derive this from multiprocessing.forking._Popen, it looks like I would have to copy a bunch of internal stuff into my implementation, which sounds flaky and not very future-proof. If that's the only choice I think I'd probably plump for doing the whole thing manually with pywin32 instead.
The solution you suggest is a good one: create your processes manually such that you have explicit access to their stdout/stderr file handles. You can then create a socket to communicate with the sub-process and use multiprocessing.connection over that socket (multiprocessing.Pipe creates the same type of connection object, so this should give you all the same IPC functionality).
Here's a two-file example.
master.py:
import multiprocessing.connection
import subprocess
import socket
import sys, os
## Listen for connection from remote process (and find free port number)
port = 10000
while True:
try:
l = multiprocessing.connection.Listener(('localhost', int(port)), authkey="secret")
break
except socket.error as ex:
if ex.errno != 98:
raise
port += 1 ## if errno==98, then port is not available.
proc = subprocess.Popen((sys.executable, "subproc.py", str(port)), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
## open connection for remote process
conn = l.accept()
conn.send([1, "asd", None])
print(proc.stdout.readline())
subproc.py:
import multiprocessing.connection
import subprocess
import sys, os, time
port = int(sys.argv[1])
conn = multiprocessing.connection.Client(('localhost', port), authkey="secret")
while True:
try:
obj = conn.recv()
print("received: %s\n" % str(obj))
sys.stdout.flush()
except EOFError: ## connection closed
break
You may also want to see the first answer to this question to get non-blocking reads from the subprocess.
I don't think you have a better option than redirecting a subprocess to a file as you mentioned in your comment.
The way consoles stdin/out/err work in windows is each process when it's born has its std handles defined. You can change them with SetStdHandle. When you modify python's sys.stdout you only modify where python prints out stuff, not where other DLL's are printing stuff. Part of the CRT in your DLL is using GetStdHandle to find out where to print out to. If you want, you can do whatever piping you want in windows API in your DLL or in your python script with pywin32. Though I do think it'll be simpler with subprocess.
Alternatively - and I know this might be slightly off-topic, but helped in my case for the same problem - , this can be resolved with screen on Linux:
screen -L -Logfile './logfile_%Y-%m-%d.log' python my_multiproc_script.py
this way no need to implement all the master-child communication
I assume I'm off base and missing something, but for what it's worth here is what came to mind when I read your question.
If you can intercept all of the stdout and stderr (I got that impression from your question), then why not add or wrap that capture functionality around each of your processes? Then send what is captured through a queue to a consumer that can do whatever you want with all of the outputs?
In my situation I changed sys.stdout.write to write to a PySide QTextEdit. I couldn't read from sys.stdout and I didn't know how to change sys.stdout to be readable. I created two Pipes. One for stdout and the other for stderr. In the separate process I redirect sys.stdout and sys.stderr to the child connection of the multiprocessing pipe. On the main process I created two threads to read the stdout and stderr parent pipe and redirect the pipe data to sys.stdout and sys.stderr.
import sys
import contextlib
import threading
import multiprocessing as mp
import multiprocessing.queues
from queue import Empty
import time
class PipeProcess(mp.Process):
"""Process to pipe the output of the sub process and redirect it to this sys.stdout and sys.stderr.
Note:
The use_queue = True argument will pass data between processes using Queues instead of Pipes. Queues will
give you the full output and read all of the data from the Queue. A pipe is more efficient, but may not
redirect all of the output back to the main process.
"""
def __init__(self, group=None, target=None, name=None, args=tuple(), kwargs={}, *_, daemon=None,
use_pipe=None, use_queue=None):
self.read_out_th = None
self.read_err_th = None
self.pipe_target = target
self.pipe_alive = mp.Event()
if use_pipe or (use_pipe is None and not use_queue): # Default
self.parent_stdout, self.child_stdout = mp.Pipe(False)
self.parent_stderr, self.child_stderr = mp.Pipe(False)
else:
self.parent_stdout = self.child_stdout = mp.Queue()
self.parent_stderr = self.child_stderr = mp.Queue()
args = (self.child_stdout, self.child_stderr, target) + tuple(args)
target = self.run_pipe_out_target
super(PipeProcess, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs,
daemon=daemon)
def start(self):
"""Start the multiprocess and reading thread."""
self.pipe_alive.set()
super(PipeProcess, self).start()
self.read_out_th = threading.Thread(target=self.read_pipe_out,
args=(self.pipe_alive, self.parent_stdout, sys.stdout))
self.read_err_th = threading.Thread(target=self.read_pipe_out,
args=(self.pipe_alive, self.parent_stderr, sys.stderr))
self.read_out_th.daemon = True
self.read_err_th.daemon = True
self.read_out_th.start()
self.read_err_th.start()
#classmethod
def run_pipe_out_target(cls, pipe_stdout, pipe_stderr, pipe_target, *args, **kwargs):
"""The real multiprocessing target to redirect stdout and stderr to a pipe or queue."""
sys.stdout.write = cls.redirect_write(pipe_stdout) # , sys.__stdout__) # Is redirected in main process
sys.stderr.write = cls.redirect_write(pipe_stderr) # , sys.__stderr__) # Is redirected in main process
pipe_target(*args, **kwargs)
#staticmethod
def redirect_write(child, out=None):
"""Create a function to write out a pipe and write out an additional out."""
if isinstance(child, mp.queues.Queue):
send = child.put
else:
send = child.send_bytes # No need to pickle with child_conn.send(data)
def write(data, *args):
try:
if isinstance(data, str):
data = data.encode('utf-8')
send(data)
if out is not None:
out.write(data)
except:
pass
return write
#classmethod
def read_pipe_out(cls, pipe_alive, pipe_out, out):
if isinstance(pipe_out, mp.queues.Queue):
# Queue has better functionality to get all of the data
def recv():
return pipe_out.get(timeout=0.5)
def is_alive():
return pipe_alive.is_set() or pipe_out.qsize() > 0
else:
# Pipe is more efficient
recv = pipe_out.recv_bytes # No need to unpickle with data = pipe_out.recv()
is_alive = pipe_alive.is_set
# Loop through reading and redirecting data
while is_alive():
try:
data = recv()
if isinstance(data, bytes):
data = data.decode('utf-8')
out.write(data)
except EOFError:
break
except Empty:
pass
except:
pass
def join(self, *args):
# Wait for process to finish (unless a timeout was given)
super(PipeProcess, self).join(*args)
# Trigger to stop the threads
self.pipe_alive.clear()
# Pipe must close to prevent blocking and waiting on recv forever
if not isinstance(self.parent_stdout, mp.queues.Queue):
with contextlib.suppress():
self.parent_stdout.close()
with contextlib.suppress():
self.parent_stderr.close()
# Close the pipes and threads
with contextlib.suppress():
self.read_out_th.join()
with contextlib.suppress():
self.read_err_th.join()
def run_long_print():
for i in range(1000):
print(i)
print(i, file=sys.stderr)
print('finished')
if __name__ == '__main__':
# Example test write (My case was a QTextEdit)
out = open('stdout.log', 'w')
err = open('stderr.log', 'w')
# Overwrite the write function and not the actual stdout object to prove this works
sys.stdout.write = out.write
sys.stderr.write = err.write
# Create a process that uses pipes to read multiprocess output back into sys.stdout.write
proc = PipeProcess(target=run_long_print, use_queue=True) # If use_pipe=True Pipe may not write out all values
# proc.daemon = True # If daemon and use_queue Not all output may be redirected to stdout
proc.start()
# time.sleep(5) # Not needed unless use_pipe or daemon and all of stdout/stderr is desired
# Close the process
proc.join() # For some odd reason this blocks forever when use_queue=False
# Close the output files for this test
out.close()
err.close()
Here is the simple and straightforward way for capturing stdout for multiprocessing.Process:
import app
import io
import sys
from multiprocessing import Process
def run_app(some_param):
sys.stdout = io.TextIOWrapper(open(sys.stdout.fileno(), 'wb', 0), write_through=True)
app.run()
app_process = Process(target=run_app, args=('some_param',))
app_process.start()
# Use app_process.termninate() for python <= 3.7.
app_process.kill()

Python 3: Unbuffered vs Buffered Streams

I have been using the following snippet to silence (redirect output from) C code called in my Python script:
from ctypes import CDLL, c_void_p
import os
import sys
# Code
class silence(object):
def __init__(self, stdout=os.devnull):
self.outfile = stdout
def __enter__(self):
# Flush
sys.__stdout__.flush()
# Save
self.saved_stream = sys.stdout
self.fd = sys.stdout.fileno()
self.saved_fd = os.dup(self.fd)
# Open the redirect
self.new_stream = open(self.outfile, 'wb', 0)
self.new_fd = self.new_stream.fileno()
# Replace
os.dup2(self.new_fd, self.fd)
def __exit__(self, *args):
# Flush
self.saved_stream.flush()
# Restore
os.dup2(self.saved_fd, self.fd)
sys.stdout = self.saved_stream
# Clean up
self.new_stream.close()
os.close(self.saved_fd)
# Test case
libc = CDLL('libc.so.6')
# Silence!
with silence():
libc.printf(b'Hello from C in silence\n')
The idea is to redirect the fd associated with stdout and replace it with one associated with an open null device. Unfortunately, it does not work as expected under Python 3:
$ python2.7 test.py
$ python3.3 -u test.py
$ python3.3 test.py
Hello from C in silence
Under Python 2.7 and 3.3 with unbuffered output it does work. I am unsure what the underlying cause is, however. Even if stdout is buffered the call to sys.saved_stream.flush() should end up calling fflush(stdout) at the C level (flushing the output to the null device).
What part of the Python 3 I/O model am I misunderstanding?
I'm not 100% sure I understand the Py3 I/O model either, but adding
sys.stdout = os.fdopen(self.fd, 'wb', 0)
right after your assignment to self.fd fixes it for me in Python 3.4 (I was able to reproduce the problem in 3.4 before I added this statement).
I'm not entirely sure what's going on either, but on my system there are two ways to fix this:
Replace the call to self.saved_stream.flush() in __exit__ with libc.fflush(None).
Call libc.printf with any string before calling silence(), for example:
libc = CDLL('/bin/cygwin1.dll')
libc.printf(b'')
Also, only with the second way has the outputs of Python's print and libc.printf remains synchronized after with silence(): block.

Capture stdout from a script?

suppose there is a script doing something like this:
# module writer.py
import sys
def write():
sys.stdout.write("foobar")
Now suppose I want to capture the output of the write function and store it in a variable for further processing. The naive solution was:
# module mymodule.py
from writer import write
out = write()
print out.upper()
But this doesn't work. I come up with another solution and it works, but please, let me know if there is a better way to solve the problem. Thanks
import sys
from cStringIO import StringIO
# setup the environment
backup = sys.stdout
# ####
sys.stdout = StringIO() # capture output
write()
out = sys.stdout.getvalue() # release output
# ####
sys.stdout.close() # close the stream
sys.stdout = backup # restore original stdout
print out.upper() # post processing
For future visitors: Python 3.4 contextlib provides for this directly (see Python contextlib help) via the redirect_stdout context manager:
from contextlib import redirect_stdout
import io
f = io.StringIO()
with redirect_stdout(f):
help(pow)
s = f.getvalue()
Setting stdout is a reasonable way to do it. Another is to run it as another process:
import subprocess
proc = subprocess.Popen(["python", "-c", "import writer; writer.write()"], stdout=subprocess.PIPE)
out = proc.communicate()[0]
print out.upper()
Here is a context manager version of your code. It yields a list of two values; the first is stdout, the second is stderr.
import contextlib
#contextlib.contextmanager
def capture():
import sys
from cStringIO import StringIO
oldout,olderr = sys.stdout, sys.stderr
try:
out=[StringIO(), StringIO()]
sys.stdout,sys.stderr = out
yield out
finally:
sys.stdout,sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
with capture() as out:
print 'hi'
Starting with Python 3 you can also use sys.stdout.buffer.write() to write (already) encoded byte strings to stdout (see stdout in Python 3).
When you do that, the simple StringIO approach doesn't work because neither sys.stdout.encoding nor sys.stdout.buffer would be available.
Starting with Python 2.6 you can use the TextIOBase API, which includes the missing attributes:
import sys
from io import TextIOWrapper, BytesIO
# setup the environment
old_stdout = sys.stdout
sys.stdout = TextIOWrapper(BytesIO(), sys.stdout.encoding)
# do some writing (indirectly)
write("blub")
# get output
sys.stdout.seek(0) # jump to the start
out = sys.stdout.read() # read output
# restore stdout
sys.stdout.close()
sys.stdout = old_stdout
# do stuff with the output
print(out.upper())
This solution works for Python 2 >= 2.6 and Python 3.
Please note that our sys.stdout.write() only accepts unicode strings and sys.stdout.buffer.write() only accepts byte strings.
This might not be the case for old code, but is often the case for code that is built to run on Python 2 and 3 without changes.
If you need to support code that sends byte strings to stdout directly without using stdout.buffer, you can use this variation:
class StdoutBuffer(TextIOWrapper):
def write(self, string):
try:
return super(StdoutBuffer, self).write(string)
except TypeError:
# redirect encoded byte strings directly to buffer
return super(StdoutBuffer, self).buffer.write(string)
You don't have to set the encoding of the buffer the sys.stdout.encoding, but this helps when using this method for testing/comparing script output.
Or maybe use functionality that is already there...
from IPython.utils.capture import capture_output
with capture_output() as c:
print('some output')
c()
print c.stdout
This is the decorator counterpart of my original code.
writer.py remains the same:
import sys
def write():
sys.stdout.write("foobar")
mymodule.py sligthly gets modified:
from writer import write as _write
from decorators import capture
#capture
def write():
return _write()
out = write()
# out post processing...
And here is the decorator:
def capture(f):
"""
Decorator to capture standard output
"""
def captured(*args, **kwargs):
import sys
from cStringIO import StringIO
# setup the environment
backup = sys.stdout
try:
sys.stdout = StringIO() # capture output
f(*args, **kwargs)
out = sys.stdout.getvalue() # release output
finally:
sys.stdout.close() # close the stream
sys.stdout = backup # restore original stdout
return out # captured output wrapped in a string
return captured
Here's a context manager taking inspiration from #JonnyJD's answer supporting writing bytes to buffer attributes abut also taking advantage of sys's dunder-io referenes for further simplification.
import io
import sys
import contextlib
#contextlib.contextmanager
def capture_output():
output = {}
try:
# Redirect
sys.stdout = io.TextIOWrapper(io.BytesIO(), sys.stdout.encoding)
sys.stderr = io.TextIOWrapper(io.BytesIO(), sys.stderr.encoding)
yield output
finally:
# Read
sys.stdout.seek(0)
sys.stderr.seek(0)
output['stdout'] = sys.stdout.read()
output['stderr'] = sys.stderr.read()
sys.stdout.close()
sys.stderr.close()
# Restore
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
with capture_output() as output:
print('foo')
sys.stderr.buffer.write(b'bar')
print('stdout: {stdout}'.format(stdout=output['stdout']))
print('stderr: {stderr}'.format(stderr=output['stderr']))
Output is:
stdout: foo
stderr: bar
The question here (the example of how to redirect output, not the tee part) uses os.dup2 to redirect a stream at the OS level. That is nice because it will apply to commands that you spawn from your program as well.
I think You should look at these four objects:
from test.test_support import captured_stdout, captured_output, \
captured_stderr, captured_stdin
Example:
from writer import write
with captured_stdout() as stdout:
write()
print stdout.getvalue().upper()
UPD: As Eric said in a comment, one shouldn't use they directly, so I copied and pasted it.
# Code from test.test_support:
import contextlib
import sys
#contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout and captured_stdin
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import StringIO
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as s:
print "hello"
self.assertEqual(s.getvalue(), "hello")
"""
return captured_output("stdout")
def captured_stderr():
return captured_output("stderr")
def captured_stdin():
return captured_output("stdin")
I like the contextmanager solution however if you need the buffer stored with the open file and fileno support you could do something like this.
import six
from six.moves import StringIO
class FileWriteStore(object):
def __init__(self, file_):
self.__file__ = file_
self.__buff__ = StringIO()
def __getattribute__(self, name):
if name in {
"write", "writelines", "get_file_value", "__file__",
"__buff__"}:
return super(FileWriteStore, self).__getattribute__(name)
return self.__file__.__getattribute__(name)
def write(self, text):
if isinstance(text, six.string_types):
try:
self.__buff__.write(text)
except:
pass
self.__file__.write(text)
def writelines(self, lines):
try:
self.__buff__.writelines(lines)
except:
pass
self.__file__.writelines(lines)
def get_file_value(self):
return self.__buff__.getvalue()
use
import sys
sys.stdout = FileWriteStore(sys.stdout)
print "test"
buffer = sys.stdout.get_file_value()
# you don't want to print the buffer while still storing
# else it will double in size every print
sys.stdout = sys.stdout.__file__
print buffer
Another way when third party code has already copied a reference to sys.stdout is to temporarily replace the write() method itself:
from types import MethodType
...
f = io.StringIO()
def new_write(self, data):
f.write(data)
old_write = sys.stdout.write
sys.stdout.write = MethodType(new_write, sys.stdout)
error = command.run(args)
sys.stdout.write = old_write
output = f.getvalue()

Suppressing output of module calling outside library

I have an annoying problem when using machine learning library PyML. PyML uses libsvm to train the SVM classifier. The problem is that libsvm outputs some text to standard output. But because that is outside of Python I cannot intercept it. I tried using methods described in problem Silence the stdout of a function in Python without trashing sys.stdout and restoring each function call but none of those help.
Is there any way how to do this. Modifying PyML is not an option.
Open /dev/null for writing, use os.dup() to copy stdout, and use os.dup2() to copy your open /dev/null to stdout. Use os.dup2() to copy your copied stdout back to the real stdout after.
devnull = open('/dev/null', 'w')
oldstdout_fno = os.dup(sys.stdout.fileno())
os.dup2(devnull.fileno(), 1)
makesomenoise()
os.dup2(oldstdout_fno, 1)
Dave Smith gave a wonderful answer to that on his blog. Basically, it wraps Ignacio's answer nicely:
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
Now, you can surround any function that garbles unwanted noise into stdout like this:
print "You can see this"
with suppress_stdout():
print "You cannot see this"
print "And you can see this again"
For Python 3 you can use:
from contextlib import contextmanager
import os
import sys
#contextmanager
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
I had the same problem and fixed it like that:
from cStringIO import StringIO
def wrapped_svm_predict(*args):
"""Run :func:`svm_predict` with no *stdout* output."""
so, sys.stdout = sys.stdout, StringIO()
ret = svm_predict(*args)
sys.stdout = so
return ret
I had a similar problem with portaudio/PyAudio initialization. I started with Reid's answer, which worked. Although I needed to redirect stderr instead. So, here is an updated, cross-platform version that redirects both:
import sys, os
# hide diagnostic output
with open(os.devnull, 'w') as devnull:
# suppress stdout
orig_stdout_fno = os.dup(sys.stdout.fileno())
os.dup2(devnull.fileno(), 1)
# suppress stderr
orig_stderr_fno = os.dup(sys.stderr.fileno())
os.dup2(devnull.fileno(), 2)
print('*** stdout should be hidden! ****')
print('*** stderr should be too! ****',
file=sys.stderr)
os.dup2(orig_stdout_fno, 1) # restore stdout
os.dup2(orig_stderr_fno, 2) # restore stderr
print('done.')
Should be easy to comment out a part you don't need.
Edit: These might help, don't have time to look at the moment:
https://docs.python.org/3/library/contextlib.html#contextlib.redirect_stdout

Categories

Resources