I have a logger that has a RotatingFileHandler.
I want to redirect all Stdout and Stderr to the logger.
How to do so?
Not enough rep to comment, but I wanted to add the version of this that worked for me in case others are in a similar situation.
class LoggerWriter:
def __init__(self, level):
# self.level is really like using log.debug(message)
# at least in my case
self.level = level
def write(self, message):
# if statement reduces the amount of newlines that are
# printed to the logger
if message != '\n':
self.level(message)
def flush(self):
# create a flush method so things can be flushed when
# the system wants to. Not sure if simply 'printing'
# sys.stderr is the correct way to do it, but it seemed
# to work properly for me.
self.level(sys.stderr)
and this would look something like:
log = logging.getLogger('foobar')
sys.stdout = LoggerWriter(log.debug)
sys.stderr = LoggerWriter(log.warning)
UPDATE for Python 3:
Including a dummy flush function which prevents an error where the function is expected (Python 2 was fine with just linebuf='').
Note that your output (and log level) appears different if it is logged from an interpreter session vs being run from a file. Running from a file produces the expected behavior (and output featured below).
We still eliminate extra newlines which other solutions do not.
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
Then test with something like:
import StreamToLogger
import sys
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename='out.log',
filemode='a'
)
log = logging.getLogger('foobar')
sys.stdout = StreamToLogger(log,logging.INFO)
sys.stderr = StreamToLogger(log,logging.ERROR)
print('Test to standard out')
raise Exception('Test to standard error')
See below for old Python 2.x answer and the example output:
All of the prior answers seem to have problems adding extra newlines where they aren't needed. The solution that works best for me is from http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/, where he demonstrates how send both stdout and stderr to the logger:
import logging
import sys
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename="out.log",
filemode='a'
)
stdout_logger = logging.getLogger('STDOUT')
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger('STDERR')
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
print "Test to standard out"
raise Exception('Test to standard error')
The output looks like:
2011-08-14 14:46:20,573:INFO:STDOUT:Test to standard out
2011-08-14 14:46:20,573:ERROR:STDERR:Traceback (most recent call last):
2011-08-14 14:46:20,574:ERROR:STDERR: File "redirect.py", line 33, in
2011-08-14 14:46:20,574:ERROR:STDERR:raise Exception('Test to standard error')
2011-08-14 14:46:20,574:ERROR:STDERR:Exception
2011-08-14 14:46:20,574:ERROR:STDERR::
2011-08-14 14:46:20,574:ERROR:STDERR:Test to standard error
Note that self.linebuf = '' is where the flush is being handled, rather than implementing a flush function.
If it's an all-Python system (i.e. no C libraries writing to fds directly, as Ignacio Vazquez-Abrams asked about) then you might be able to use an approach as suggested here:
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message != '\n':
self.logger.log(self.level, message)
and then set sys.stdout and sys.stderr to LoggerWriter instances.
You can use redirect_stdout context manager:
import logging
from contextlib import redirect_stdout
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.write = lambda msg: logging.info(msg) if msg != '\n' else None
with redirect_stdout(logging):
print('Test')
or like this
import logging
from contextlib import redirect_stdout
logger = logging.getLogger('Meow')
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
fmt='[{name}] {asctime} {levelname}: {message}',
datefmt='%m/%d/%Y %H:%M:%S',
style='{'
)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.write = lambda msg: logger.info(msg) if msg != '\n' else None
with redirect_stdout(logger):
print('Test')
Output Redirection Done Right!
The Problem
logger.log and the other functions (.info/.error/etc.) output each call as a separate line, i.e. implicitly add (formatting and) a newline to it.
sys.stderr.write on the other hand just writes its literal input to stream, including partial lines. For example: The output "ZeroDivisionError: division by zero" is actually 4(!) separate calls to sys.stderr.write:
sys.stderr.write('ZeroDivisionError')
sys.stderr.write(': ')
sys.stderr.write('division by zero')
sys.stderr.write('\n')
The 4 most upvoted approaches (1, 2, 3, 4) thus result in extra newlines -- simply put "1/0" into your program and you will get the following:
2021-02-17 13:10:40,814 - ERROR - ZeroDivisionError
2021-02-17 13:10:40,814 - ERROR - :
2021-02-17 13:10:40,814 - ERROR - division by zero
The Solution
Store the intermediate writes in a buffer. The reason I am using a list as buffer rather than a string is to avoid the Shlemiel the painter’s algorithm. TLDR: It is O(n) instead of potentially O(n^2)
class LoggerWriter:
def __init__(self, logfct):
self.logfct = logfct
self.buf = []
def write(self, msg):
if msg.endswith('\n'):
self.buf.append(msg.removesuffix('\n'))
self.logfct(''.join(self.buf))
self.buf = []
else:
self.buf.append(msg)
def flush(self):
pass
# To access the original stdout/stderr, use sys.__stdout__/sys.__stderr__
sys.stdout = LoggerWriter(logger.info)
sys.stderr = LoggerWriter(logger.error)
2021-02-17 13:15:22,956 - ERROR - ZeroDivisionError: division by zero
For versions below Python 3.9, you could replace replace msg.removesuffix('\n') with either msg.rstrip('\n') or msg[:-1].
As an evolution to Cameron Gagnon's response, I've improved the LoggerWriterclass to:
class LoggerWriter(object):
def __init__(self, writer):
self._writer = writer
self._msg = ''
def write(self, message):
self._msg = self._msg + message
while '\n' in self._msg:
pos = self._msg.find('\n')
self._writer(self._msg[:pos])
self._msg = self._msg[pos+1:]
def flush(self):
if self._msg != '':
self._writer(self._msg)
self._msg = ''
now uncontrolled exceptions look nicer:
2018-07-31 13:20:37,482 - ERROR - Traceback (most recent call last):
2018-07-31 13:20:37,483 - ERROR - File "mf32.py", line 317, in <module>
2018-07-31 13:20:37,485 - ERROR - main()
2018-07-31 13:20:37,486 - ERROR - File "mf32.py", line 289, in main
2018-07-31 13:20:37,488 - ERROR - int('')
2018-07-31 13:20:37,489 - ERROR - ValueError: invalid literal for int() with base 10: ''
With flush added to Vinay Sajip's answer:
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message != '\n':
self.logger.log(self.level, message)
def flush(self):
pass
Quick but Fragile One-Liner
sys.stdout.write = logger.info
sys.stderr.write = logger.error
What this does is simply assign the logger functions to the stdout/stderr .write call which means any write call will instead invoke the logger functions.
The downside of this approach is that both calls to .write and the logger functions typically add a newline so you will end up with extra lines in your log file, which may or may not be a problem depending on your use case.
Another pitfall is that if your logger writes to stderr itself we get infinite recursion (a stack overflow error). So only output to a file.
Solving problem where StreamHandler causes infinite Recurison
My logger was causing an infinite recursion, because the Streamhandler was trying to write to stdout, which itself is a logger -> leading to infinite recursion.
Solution
Reinstate the original sys.__stdout__ for the StreamHandler ONLY, so that you can still see the logs showing in the terminal.
class DefaultStreamHandler(logging.StreamHandler):
def __init__(self, stream=sys.__stdout__):
# Use the original sys.__stdout__ to write to stdout
# for this handler, as sys.stdout will write out to logger.
super().__init__(stream)
class LoggerWriter(io.IOBase):
"""Class to replace the stderr/stdout calls to a logger"""
def __init__(self, logger_name: str, log_level: int):
""":param logger_name: Name to give the logger (e.g. 'stderr')
:param log_level: The log level, e.g. logging.DEBUG / logging.INFO that
the MESSAGES should be logged at.
"""
self.std_logger = logging.getLogger(logger_name)
# Get the "root" logger from by its name (i.e. from a config dict or at the bottom of this file)
# We will use this to create a copy of all its settings, except the name
app_logger = logging.getLogger("myAppsLogger")
[self.std_logger.addHandler(handler) for handler in app_logger.handlers]
self.std_logger.setLevel(app_logger.level) # the minimum lvl msgs will show at
self.level = log_level # the level msgs will be logged at
self.buffer = []
def write(self, msg: str):
"""Stdout/stderr logs one line at a time, rather than 1 message at a time.
Use this function to aggregate multi-line messages into 1 log call."""
msg = msg.decode() if issubclass(type(msg), bytes) else msg
if not msg.endswith("\n"):
return self.buffer.append(msg)
self.buffer.append(msg.rstrip("\n"))
message = "".join(self.buffer)
self.std_logger.log(self.level, message)
self.buffer = []
def replace_stderr_and_stdout_with_logger():
"""Replaces calls to sys.stderr -> logger.info & sys.stdout -> logger.error"""
# To access the original stdout/stderr, use sys.__stdout__/sys.__stderr__
sys.stdout = LoggerWriter("stdout", logging.INFO)
sys.stderr = LoggerWriter("stderr", logging.ERROR)
if __name__ == __main__():
# Load the logger & handlers
logger = logging.getLogger("myAppsLogger")
logger.setLevel(logging.DEBUG)
# HANDLER = logging.StreamHandler()
HANDLER = DefaultStreamHandler() # <--- replace the normal streamhandler with this
logger.addHandler(HANDLER)
logFormatter = logging.Formatter("[%(asctime)s] - %(name)s - %(levelname)s - %(message)s")
HANDLER.setFormatter(logFormatter)
# Run this AFTER you load the logger
replace_stderr_and_stdout_with_logger()
And then finally call the replace_stderr_and_stdout_with_logger() after you've initialised your logger (the last bit of the code)
If you want to logging info and error messages into separates stream (info into stdout, errors into stderr) you can use this trick:
class ErrorStreamHandler(log.StreamHandler):
"""Print input log-message into stderr, print only error/warning messages"""
def __init__(self, stream=sys.stderr):
log.Handler.__init__(self, log.WARNING)
self.stream = stream
def emit(self, record):
try:
if record.levelno in (log.INFO, log.DEBUG, log.NOTSET):
return
msg = self.format(record)
stream = self.stream
# issue 35046: merged two stream.writes into one.
stream.write(msg + self.terminator)
self.flush()
except RecursionError: # See issue 36272
raise
except Exception:
self.handleError(record)
class OutStreamHandler(log.StreamHandler):
"""Print input log-message into stdout, print only info/debug messages"""
def __init__(self, loglevel, stream=sys.stdout):
log.Handler.__init__(self, loglevel)
self.stream = stream
def emit(self, record):
try:
if record.levelno not in (log.INFO, log.DEBUG, log.NOTSET):
return
msg = self.format(record)
stream = self.stream
# issue 35046: merged two stream.writes into one.
stream.write(msg + self.terminator)
self.flush()
except RecursionError: # See issue 36272
raise
except Exception:
self.handleError(record)
Usage:
log.basicConfig(level=settings.get_loglevel(),
format="[%(asctime)s] %(levelname)s: %(message)s",
datefmt='%Y/%m/%d %H:%M:%S', handlers=[ErrorStreamHandler(), OutStreamHandler(settings.get_loglevel())])
Related
My main program logs to its own log file and the sub-process should have its own log file.
I replaced the logger object inside the multiprocessing process, but the logging data from the sub-process is additionally redirected to the main log file.
How can I prevent this?
The structure looks like this:
import logging
import sys
import os
from pathlib import Path
import multiprocessing
import time
import requests
class ProcessFilter(logging.Filter):
"""Only accept log records from a specific pid."""
def __init__(self, pid):
self._pid = pid
def filter(self, record):
return record.process == self._pid
def create_logger(file):
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addFilter(ProcessFilter(pid=os.getpid()))
file_handler = logging.FileHandler(file)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.addHandler(stream_handler)
return log
def subprocess_init():
global log
sub_log_file = str(Path.home()) + '/logfile_sub.log'
log = create_logger(sub_log_file)
do_subprocess_stuff()
def do_subprocess_stuff():
count = 0
while True:
create_log("subprocess", count)
time.sleep(5)
count += 1
def main_tasks():
num = 10
while num > 0:
create_log("main", num)
time.sleep(5)
num -= 1
def create_log(text, num):
log.debug(text + " log %s", num)
if __name__ == '__main__':
file = str(Path.home()) + '/logfile.log'
log = create_logger(file)
sub_process = multiprocessing.Process(target=subprocess_init, args=())
sub_process.daemon = True
sub_process.start()
main_tasks()
I am simply translating this answer to fit multiprocessing.
import logging
class ProcessFilter(logging.Filter):
"""Only accept log records from a specific pid."""
def __init__(self, pid):
self._pid = pid
def filter(self, record):
return record.process == self._pid
import logging
import os
def create_logger(file):
log = logging.getLogger('') # why use this logger and not __name__ ?
log.setLevel(logging.DEBUG)
log.addFilter(ProcessFilter(pid=os.getpid())) # logger wide filter
file_handler = logging.FileHandler(file)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.addHandler(stream_handler)
return log
NB. you can also put the filter on a specific handler
I have a logger that has a RotatingFileHandler.
I want to redirect all Stdout and Stderr to the logger.
How to do so?
Not enough rep to comment, but I wanted to add the version of this that worked for me in case others are in a similar situation.
class LoggerWriter:
def __init__(self, level):
# self.level is really like using log.debug(message)
# at least in my case
self.level = level
def write(self, message):
# if statement reduces the amount of newlines that are
# printed to the logger
if message != '\n':
self.level(message)
def flush(self):
# create a flush method so things can be flushed when
# the system wants to. Not sure if simply 'printing'
# sys.stderr is the correct way to do it, but it seemed
# to work properly for me.
self.level(sys.stderr)
and this would look something like:
log = logging.getLogger('foobar')
sys.stdout = LoggerWriter(log.debug)
sys.stderr = LoggerWriter(log.warning)
UPDATE for Python 3:
Including a dummy flush function which prevents an error where the function is expected (Python 2 was fine with just linebuf='').
Note that your output (and log level) appears different if it is logged from an interpreter session vs being run from a file. Running from a file produces the expected behavior (and output featured below).
We still eliminate extra newlines which other solutions do not.
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
Then test with something like:
import StreamToLogger
import sys
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename='out.log',
filemode='a'
)
log = logging.getLogger('foobar')
sys.stdout = StreamToLogger(log,logging.INFO)
sys.stderr = StreamToLogger(log,logging.ERROR)
print('Test to standard out')
raise Exception('Test to standard error')
See below for old Python 2.x answer and the example output:
All of the prior answers seem to have problems adding extra newlines where they aren't needed. The solution that works best for me is from http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/, where he demonstrates how send both stdout and stderr to the logger:
import logging
import sys
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename="out.log",
filemode='a'
)
stdout_logger = logging.getLogger('STDOUT')
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger('STDERR')
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
print "Test to standard out"
raise Exception('Test to standard error')
The output looks like:
2011-08-14 14:46:20,573:INFO:STDOUT:Test to standard out
2011-08-14 14:46:20,573:ERROR:STDERR:Traceback (most recent call last):
2011-08-14 14:46:20,574:ERROR:STDERR: File "redirect.py", line 33, in
2011-08-14 14:46:20,574:ERROR:STDERR:raise Exception('Test to standard error')
2011-08-14 14:46:20,574:ERROR:STDERR:Exception
2011-08-14 14:46:20,574:ERROR:STDERR::
2011-08-14 14:46:20,574:ERROR:STDERR:Test to standard error
Note that self.linebuf = '' is where the flush is being handled, rather than implementing a flush function.
If it's an all-Python system (i.e. no C libraries writing to fds directly, as Ignacio Vazquez-Abrams asked about) then you might be able to use an approach as suggested here:
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message != '\n':
self.logger.log(self.level, message)
and then set sys.stdout and sys.stderr to LoggerWriter instances.
You can use redirect_stdout context manager:
import logging
from contextlib import redirect_stdout
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.write = lambda msg: logging.info(msg) if msg != '\n' else None
with redirect_stdout(logging):
print('Test')
or like this
import logging
from contextlib import redirect_stdout
logger = logging.getLogger('Meow')
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
fmt='[{name}] {asctime} {levelname}: {message}',
datefmt='%m/%d/%Y %H:%M:%S',
style='{'
)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.write = lambda msg: logger.info(msg) if msg != '\n' else None
with redirect_stdout(logger):
print('Test')
Output Redirection Done Right!
The Problem
logger.log and the other functions (.info/.error/etc.) output each call as a separate line, i.e. implicitly add (formatting and) a newline to it.
sys.stderr.write on the other hand just writes its literal input to stream, including partial lines. For example: The output "ZeroDivisionError: division by zero" is actually 4(!) separate calls to sys.stderr.write:
sys.stderr.write('ZeroDivisionError')
sys.stderr.write(': ')
sys.stderr.write('division by zero')
sys.stderr.write('\n')
The 4 most upvoted approaches (1, 2, 3, 4) thus result in extra newlines -- simply put "1/0" into your program and you will get the following:
2021-02-17 13:10:40,814 - ERROR - ZeroDivisionError
2021-02-17 13:10:40,814 - ERROR - :
2021-02-17 13:10:40,814 - ERROR - division by zero
The Solution
Store the intermediate writes in a buffer. The reason I am using a list as buffer rather than a string is to avoid the Shlemiel the painter’s algorithm. TLDR: It is O(n) instead of potentially O(n^2)
class LoggerWriter:
def __init__(self, logfct):
self.logfct = logfct
self.buf = []
def write(self, msg):
if msg.endswith('\n'):
self.buf.append(msg.removesuffix('\n'))
self.logfct(''.join(self.buf))
self.buf = []
else:
self.buf.append(msg)
def flush(self):
pass
# To access the original stdout/stderr, use sys.__stdout__/sys.__stderr__
sys.stdout = LoggerWriter(logger.info)
sys.stderr = LoggerWriter(logger.error)
2021-02-17 13:15:22,956 - ERROR - ZeroDivisionError: division by zero
For versions below Python 3.9, you could replace replace msg.removesuffix('\n') with either msg.rstrip('\n') or msg[:-1].
As an evolution to Cameron Gagnon's response, I've improved the LoggerWriterclass to:
class LoggerWriter(object):
def __init__(self, writer):
self._writer = writer
self._msg = ''
def write(self, message):
self._msg = self._msg + message
while '\n' in self._msg:
pos = self._msg.find('\n')
self._writer(self._msg[:pos])
self._msg = self._msg[pos+1:]
def flush(self):
if self._msg != '':
self._writer(self._msg)
self._msg = ''
now uncontrolled exceptions look nicer:
2018-07-31 13:20:37,482 - ERROR - Traceback (most recent call last):
2018-07-31 13:20:37,483 - ERROR - File "mf32.py", line 317, in <module>
2018-07-31 13:20:37,485 - ERROR - main()
2018-07-31 13:20:37,486 - ERROR - File "mf32.py", line 289, in main
2018-07-31 13:20:37,488 - ERROR - int('')
2018-07-31 13:20:37,489 - ERROR - ValueError: invalid literal for int() with base 10: ''
With flush added to Vinay Sajip's answer:
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message != '\n':
self.logger.log(self.level, message)
def flush(self):
pass
Quick but Fragile One-Liner
sys.stdout.write = logger.info
sys.stderr.write = logger.error
What this does is simply assign the logger functions to the stdout/stderr .write call which means any write call will instead invoke the logger functions.
The downside of this approach is that both calls to .write and the logger functions typically add a newline so you will end up with extra lines in your log file, which may or may not be a problem depending on your use case.
Another pitfall is that if your logger writes to stderr itself we get infinite recursion (a stack overflow error). So only output to a file.
Solving problem where StreamHandler causes infinite Recurison
My logger was causing an infinite recursion, because the Streamhandler was trying to write to stdout, which itself is a logger -> leading to infinite recursion.
Solution
Reinstate the original sys.__stdout__ for the StreamHandler ONLY, so that you can still see the logs showing in the terminal.
class DefaultStreamHandler(logging.StreamHandler):
def __init__(self, stream=sys.__stdout__):
# Use the original sys.__stdout__ to write to stdout
# for this handler, as sys.stdout will write out to logger.
super().__init__(stream)
class LoggerWriter(io.IOBase):
"""Class to replace the stderr/stdout calls to a logger"""
def __init__(self, logger_name: str, log_level: int):
""":param logger_name: Name to give the logger (e.g. 'stderr')
:param log_level: The log level, e.g. logging.DEBUG / logging.INFO that
the MESSAGES should be logged at.
"""
self.std_logger = logging.getLogger(logger_name)
# Get the "root" logger from by its name (i.e. from a config dict or at the bottom of this file)
# We will use this to create a copy of all its settings, except the name
app_logger = logging.getLogger("myAppsLogger")
[self.std_logger.addHandler(handler) for handler in app_logger.handlers]
self.std_logger.setLevel(app_logger.level) # the minimum lvl msgs will show at
self.level = log_level # the level msgs will be logged at
self.buffer = []
def write(self, msg: str):
"""Stdout/stderr logs one line at a time, rather than 1 message at a time.
Use this function to aggregate multi-line messages into 1 log call."""
msg = msg.decode() if issubclass(type(msg), bytes) else msg
if not msg.endswith("\n"):
return self.buffer.append(msg)
self.buffer.append(msg.rstrip("\n"))
message = "".join(self.buffer)
self.std_logger.log(self.level, message)
self.buffer = []
def replace_stderr_and_stdout_with_logger():
"""Replaces calls to sys.stderr -> logger.info & sys.stdout -> logger.error"""
# To access the original stdout/stderr, use sys.__stdout__/sys.__stderr__
sys.stdout = LoggerWriter("stdout", logging.INFO)
sys.stderr = LoggerWriter("stderr", logging.ERROR)
if __name__ == __main__():
# Load the logger & handlers
logger = logging.getLogger("myAppsLogger")
logger.setLevel(logging.DEBUG)
# HANDLER = logging.StreamHandler()
HANDLER = DefaultStreamHandler() # <--- replace the normal streamhandler with this
logger.addHandler(HANDLER)
logFormatter = logging.Formatter("[%(asctime)s] - %(name)s - %(levelname)s - %(message)s")
HANDLER.setFormatter(logFormatter)
# Run this AFTER you load the logger
replace_stderr_and_stdout_with_logger()
And then finally call the replace_stderr_and_stdout_with_logger() after you've initialised your logger (the last bit of the code)
If you want to logging info and error messages into separates stream (info into stdout, errors into stderr) you can use this trick:
class ErrorStreamHandler(log.StreamHandler):
"""Print input log-message into stderr, print only error/warning messages"""
def __init__(self, stream=sys.stderr):
log.Handler.__init__(self, log.WARNING)
self.stream = stream
def emit(self, record):
try:
if record.levelno in (log.INFO, log.DEBUG, log.NOTSET):
return
msg = self.format(record)
stream = self.stream
# issue 35046: merged two stream.writes into one.
stream.write(msg + self.terminator)
self.flush()
except RecursionError: # See issue 36272
raise
except Exception:
self.handleError(record)
class OutStreamHandler(log.StreamHandler):
"""Print input log-message into stdout, print only info/debug messages"""
def __init__(self, loglevel, stream=sys.stdout):
log.Handler.__init__(self, loglevel)
self.stream = stream
def emit(self, record):
try:
if record.levelno not in (log.INFO, log.DEBUG, log.NOTSET):
return
msg = self.format(record)
stream = self.stream
# issue 35046: merged two stream.writes into one.
stream.write(msg + self.terminator)
self.flush()
except RecursionError: # See issue 36272
raise
except Exception:
self.handleError(record)
Usage:
log.basicConfig(level=settings.get_loglevel(),
format="[%(asctime)s] %(levelname)s: %(message)s",
datefmt='%Y/%m/%d %H:%M:%S', handlers=[ErrorStreamHandler(), OutStreamHandler(settings.get_loglevel())])
I have done and tried this code in Python 3 for sending me an email when exceptions happen, but the STMPHandler is not working. It works perfectly with the same args and values for SMTPHandler and the same code copy-pasted from this answer
Working code:
import logging.handlers
smtp_handler = logging.handlers.SMTPHandler(mailhost=("smtp.gmail.com", 587),
fromaddr="somemail#gmail.com",
toaddrs="anothermail#gmail.com",
subject=u"Alfred error!",
credentials=("somemail#gmail.com", "asdasdasdasdasd"),
secure=())
logger = logging.getLogger()
logger.addHandler(smtp_handler)
try:
raise Exception()
except Exception as e:
logger.exception('Unhandled Exception')
All handlers are doing fine and the code from the answer works just with the exception() call.
I can't understand why this is not working.
log_config.py (not working code):
import os
import logging
import logging.handlers
version = "2.0.0"
LOGFILE_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'log', 'my_app.log')
logging.basicConfig(format='%(levelname)s - %(message)s', level=logging.DEBUG)
logging.getLogger('telegram').setLevel(logging.WARNING)
logging.getLogger('chardet.charsetprober').setLevel(logging.WARNING)
class TimedOutFilter(logging.Filter):
def filter(self, record):
if "Error while getting Updates: Timed out" in record.getMessage():
return False
def getLogger(name):
"""
Return a logger for the file
:param name: the file name
:return: a logger
"""
global version
logger = logging.getLogger(name)
fh = logging.handlers.TimedRotatingFileHandler(LOGFILE_PATH, when='midnight')
formatter = logging.Formatter('%(asctime)s - ({0}) %(name)s - %(levelname)s - %(message)s'.format(version))
fh.setFormatter(formatter)
fh.addFilter(TimedOutFilter())
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
if os.environ.get('SERVER', True):
mh = logging.handlers.SMTPHandler(mailhost=("smtp.gmail.com", 587),
fromaddr="somemail#gmail.com",
toaddrs="anothermail#gmail.com",
subject=u"Alfred error!",
credentials=("somemail#gmail.com", "asdasdasdasdasd"),
secure=())
mh.setFormatter(formatter)
mh.addFilter(TimedOutFilter())
mh.setLevel(logging.ERROR)
logger.addHandler(mh)
ch = logging.StreamHandler()
formatter = logging.Formatter('{0} %(levelname)s - %(message)s'.format(version))
ch.setFormatter(formatter)
ch.addFilter(TimedOutFilter())
ch.setLevel(logging.ERROR)
logger.addHandler(ch)
return logger
if __name__ == '__main__':
logger = getLogger(__name__)
try:
raise Exception()
except Exception as e:
logger.exception('Unhandled Exception')
logger.error('an error line')
logger.debug('a debug line')
The timedOutFilter was ruining everything because it only returned False but never True.
So just doing:
class TimedOutFilter(logging.Filter):
def filter(self, record):
if "Error while getting Updates: Timed out" in record.getMessage():
return False
return True # <==== Added this line only
fixed the whole thing.
I have been reading up on proper logging and so far I am liking how it is going. All was fine until I tried to do logging in a main file and a module I wrote. The main file is able to write to a file and the console but the imported module displays nothing in either. If i had to take a guess, I am assuming I would have to configure the modules output separately as I am using in code configs. Problem is I am not sure how or if that is even the reason. I have tried my best to google this instead of asking but here I am now. Here is the link to the source code. If you try to run it you may have to change the import as pycharm does not like it when I import a file directly. So from "from tests import speedtest" to "import speedtest" The files are main.py and speedtest.py
Main
import logging
from tests import speedtest
import time
# Logging configuration
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# creates a handler to deal with writing to the file
file_handler = logging.FileHandler("log.txt", mode="w")
file_handler.setFormatter(logFormatter)
# handler for writing to the console
console_handler = logging.StreamHandler()
console_handler.setFormatter(logFormatter)
# adds the handlers to the root logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# max speed provided
NOMINAL_SPEED = 50
# threshold in percentage 60% seems to be a decent amount to expect.
THRESHOLD = 60
# padding in percentage for severe warning
PAD = 10
# amount of time in between runs
INTERVAL = 300
class Main:
"""
Main running class
"""
def __init__(self):
self.speedtest = speedtest.SpeedTest(share=True)
self.threshold = THRESHOLD
self.pad = PAD
self.nominal = NOMINAL_SPEED
self.done = False
logger.debug("Starting main loop.")
while not self.done:
self.loop()
time.sleep(INTERVAL)
def loop(self):
try:
results = self.speedtest.run()
except Exception as e:
logger.error("Skipped running speed test this run. Will try again next time")
return
download = float(results["download"][:-7])
upload = float(results["upload"][:-7])
url = results["url"]
host = results["host"]
diff_download = (download / self.nominal) * 100
logger.debug("Current download is {} Mbps upload is {} Mbps. Share url: {} host: {}".format(download, upload, url, host))
if (((self.threshold - self.pad)/100) * self.nominal) <= diff_download <= ((self.threshold/100) * self.nominal):
logger.info("Speed is currently at {}% nominal.".format(diff_download))
self.warning()
elif diff_download <= ((self.threshold - self.pad)/100) * self.nominal:
logger.info("Speed is currently at {}% nominal. This is a problem.".format(diff_download))
self.critical()
def warning(self):
pass
def critical(self):
pass
if __name__ == "__main__":
Main()
speedtest
import subprocess
import logging
import os
class SpeedTest:
"""
Class to run speed test and return the results in an easy to use manner
"""
def __init__(self, share=False):
"""
Init method
:param share: When set to true it will also return a url to the speed test image
:return:
"""
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
self._share = share
if share is True:
self.logger.debug("Share flag set to True")
self.cmd = ["speedtest-cli", "--share"]
else:
self.logger.debug("Share not set to true. Ignoring share url")
self.cmd = ["speedtest-cli"]
def run(self):
"""
Runs the speed test returning a dict containing upload, download, ping, and share url if wanted.
:return:
"""
self.logger.debug("Starting speedtest!")
# check_output returns the output in bytes so we use decode() to turn it into a simple string. Then we split
# the lines giving us a list.
try:
stdout = subprocess.check_output(self.cmd).decode().splitlines()
except subprocess.CalledProcessError as e:
self.logger.error(e)
raise e
res = {}
for i in stdout:
if "Download:" in i:
res["download"] = i[10:]
if "Upload:" in i:
res["upload"] = i[8:]
if "Hosted" in i:
res["host"] = i[2:]
if self._share is True and "Share results:" in i:
res["url"] = i[15:]
else:
res["url"] = None
return res
def ping(self, addr):
"""
Pings an address and returns a 1 if the connection can not be made or a 0 if it succeeds
:param addr: IPv4 address
:return:
"""
try:
if os.name is "nt":
self.logger.debug("Windows OS detected")
self.logger.info("Pinging {}".format(addr))
subprocess.check_output(["ping", "-n", "1", addr])
elif os.name is "posix":
self.logger.debug("Nix OS detected")
subprocess.check_output(["ping", "-c", "1", addr])
except subprocess.CalledProcessError:
self.logger.warning("Returned non zero value. Is the internet working?")
return 1
return 0
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
for i in SpeedTest(share=True).run().items():
print(i)
print(SpeedTest().ping("8.8.8.0"))
In speedtest.py when you call:
logging.getLogger(__name__)
it will create a logger object for speedtest.py, so you will have to configure it separately. If you want it to be the same logger as in the main just add:
self.speedtest.logger = logger
after you create the SpeedTest object in Main's constructor
Another option for you is to pass __name__ as an argument to SpeedTest() and create the logger with that argument (I think this is a better option for you since you write to the logger in the constructor).
I have a logger that has a RotatingFileHandler.
I want to redirect all Stdout and Stderr to the logger.
How to do so?
Not enough rep to comment, but I wanted to add the version of this that worked for me in case others are in a similar situation.
class LoggerWriter:
def __init__(self, level):
# self.level is really like using log.debug(message)
# at least in my case
self.level = level
def write(self, message):
# if statement reduces the amount of newlines that are
# printed to the logger
if message != '\n':
self.level(message)
def flush(self):
# create a flush method so things can be flushed when
# the system wants to. Not sure if simply 'printing'
# sys.stderr is the correct way to do it, but it seemed
# to work properly for me.
self.level(sys.stderr)
and this would look something like:
log = logging.getLogger('foobar')
sys.stdout = LoggerWriter(log.debug)
sys.stderr = LoggerWriter(log.warning)
UPDATE for Python 3:
Including a dummy flush function which prevents an error where the function is expected (Python 2 was fine with just linebuf='').
Note that your output (and log level) appears different if it is logged from an interpreter session vs being run from a file. Running from a file produces the expected behavior (and output featured below).
We still eliminate extra newlines which other solutions do not.
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
Then test with something like:
import StreamToLogger
import sys
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename='out.log',
filemode='a'
)
log = logging.getLogger('foobar')
sys.stdout = StreamToLogger(log,logging.INFO)
sys.stderr = StreamToLogger(log,logging.ERROR)
print('Test to standard out')
raise Exception('Test to standard error')
See below for old Python 2.x answer and the example output:
All of the prior answers seem to have problems adding extra newlines where they aren't needed. The solution that works best for me is from http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/, where he demonstrates how send both stdout and stderr to the logger:
import logging
import sys
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename="out.log",
filemode='a'
)
stdout_logger = logging.getLogger('STDOUT')
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger('STDERR')
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
print "Test to standard out"
raise Exception('Test to standard error')
The output looks like:
2011-08-14 14:46:20,573:INFO:STDOUT:Test to standard out
2011-08-14 14:46:20,573:ERROR:STDERR:Traceback (most recent call last):
2011-08-14 14:46:20,574:ERROR:STDERR: File "redirect.py", line 33, in
2011-08-14 14:46:20,574:ERROR:STDERR:raise Exception('Test to standard error')
2011-08-14 14:46:20,574:ERROR:STDERR:Exception
2011-08-14 14:46:20,574:ERROR:STDERR::
2011-08-14 14:46:20,574:ERROR:STDERR:Test to standard error
Note that self.linebuf = '' is where the flush is being handled, rather than implementing a flush function.
If it's an all-Python system (i.e. no C libraries writing to fds directly, as Ignacio Vazquez-Abrams asked about) then you might be able to use an approach as suggested here:
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message != '\n':
self.logger.log(self.level, message)
and then set sys.stdout and sys.stderr to LoggerWriter instances.
You can use redirect_stdout context manager:
import logging
from contextlib import redirect_stdout
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.write = lambda msg: logging.info(msg) if msg != '\n' else None
with redirect_stdout(logging):
print('Test')
or like this
import logging
from contextlib import redirect_stdout
logger = logging.getLogger('Meow')
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
fmt='[{name}] {asctime} {levelname}: {message}',
datefmt='%m/%d/%Y %H:%M:%S',
style='{'
)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.write = lambda msg: logger.info(msg) if msg != '\n' else None
with redirect_stdout(logger):
print('Test')
Output Redirection Done Right!
The Problem
logger.log and the other functions (.info/.error/etc.) output each call as a separate line, i.e. implicitly add (formatting and) a newline to it.
sys.stderr.write on the other hand just writes its literal input to stream, including partial lines. For example: The output "ZeroDivisionError: division by zero" is actually 4(!) separate calls to sys.stderr.write:
sys.stderr.write('ZeroDivisionError')
sys.stderr.write(': ')
sys.stderr.write('division by zero')
sys.stderr.write('\n')
The 4 most upvoted approaches (1, 2, 3, 4) thus result in extra newlines -- simply put "1/0" into your program and you will get the following:
2021-02-17 13:10:40,814 - ERROR - ZeroDivisionError
2021-02-17 13:10:40,814 - ERROR - :
2021-02-17 13:10:40,814 - ERROR - division by zero
The Solution
Store the intermediate writes in a buffer. The reason I am using a list as buffer rather than a string is to avoid the Shlemiel the painter’s algorithm. TLDR: It is O(n) instead of potentially O(n^2)
class LoggerWriter:
def __init__(self, logfct):
self.logfct = logfct
self.buf = []
def write(self, msg):
if msg.endswith('\n'):
self.buf.append(msg.removesuffix('\n'))
self.logfct(''.join(self.buf))
self.buf = []
else:
self.buf.append(msg)
def flush(self):
pass
# To access the original stdout/stderr, use sys.__stdout__/sys.__stderr__
sys.stdout = LoggerWriter(logger.info)
sys.stderr = LoggerWriter(logger.error)
2021-02-17 13:15:22,956 - ERROR - ZeroDivisionError: division by zero
For versions below Python 3.9, you could replace replace msg.removesuffix('\n') with either msg.rstrip('\n') or msg[:-1].
As an evolution to Cameron Gagnon's response, I've improved the LoggerWriterclass to:
class LoggerWriter(object):
def __init__(self, writer):
self._writer = writer
self._msg = ''
def write(self, message):
self._msg = self._msg + message
while '\n' in self._msg:
pos = self._msg.find('\n')
self._writer(self._msg[:pos])
self._msg = self._msg[pos+1:]
def flush(self):
if self._msg != '':
self._writer(self._msg)
self._msg = ''
now uncontrolled exceptions look nicer:
2018-07-31 13:20:37,482 - ERROR - Traceback (most recent call last):
2018-07-31 13:20:37,483 - ERROR - File "mf32.py", line 317, in <module>
2018-07-31 13:20:37,485 - ERROR - main()
2018-07-31 13:20:37,486 - ERROR - File "mf32.py", line 289, in main
2018-07-31 13:20:37,488 - ERROR - int('')
2018-07-31 13:20:37,489 - ERROR - ValueError: invalid literal for int() with base 10: ''
With flush added to Vinay Sajip's answer:
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message != '\n':
self.logger.log(self.level, message)
def flush(self):
pass
Quick but Fragile One-Liner
sys.stdout.write = logger.info
sys.stderr.write = logger.error
What this does is simply assign the logger functions to the stdout/stderr .write call which means any write call will instead invoke the logger functions.
The downside of this approach is that both calls to .write and the logger functions typically add a newline so you will end up with extra lines in your log file, which may or may not be a problem depending on your use case.
Another pitfall is that if your logger writes to stderr itself we get infinite recursion (a stack overflow error). So only output to a file.
Solving problem where StreamHandler causes infinite Recurison
My logger was causing an infinite recursion, because the Streamhandler was trying to write to stdout, which itself is a logger -> leading to infinite recursion.
Solution
Reinstate the original sys.__stdout__ for the StreamHandler ONLY, so that you can still see the logs showing in the terminal.
class DefaultStreamHandler(logging.StreamHandler):
def __init__(self, stream=sys.__stdout__):
# Use the original sys.__stdout__ to write to stdout
# for this handler, as sys.stdout will write out to logger.
super().__init__(stream)
class LoggerWriter(io.IOBase):
"""Class to replace the stderr/stdout calls to a logger"""
def __init__(self, logger_name: str, log_level: int):
""":param logger_name: Name to give the logger (e.g. 'stderr')
:param log_level: The log level, e.g. logging.DEBUG / logging.INFO that
the MESSAGES should be logged at.
"""
self.std_logger = logging.getLogger(logger_name)
# Get the "root" logger from by its name (i.e. from a config dict or at the bottom of this file)
# We will use this to create a copy of all its settings, except the name
app_logger = logging.getLogger("myAppsLogger")
[self.std_logger.addHandler(handler) for handler in app_logger.handlers]
self.std_logger.setLevel(app_logger.level) # the minimum lvl msgs will show at
self.level = log_level # the level msgs will be logged at
self.buffer = []
def write(self, msg: str):
"""Stdout/stderr logs one line at a time, rather than 1 message at a time.
Use this function to aggregate multi-line messages into 1 log call."""
msg = msg.decode() if issubclass(type(msg), bytes) else msg
if not msg.endswith("\n"):
return self.buffer.append(msg)
self.buffer.append(msg.rstrip("\n"))
message = "".join(self.buffer)
self.std_logger.log(self.level, message)
self.buffer = []
def replace_stderr_and_stdout_with_logger():
"""Replaces calls to sys.stderr -> logger.info & sys.stdout -> logger.error"""
# To access the original stdout/stderr, use sys.__stdout__/sys.__stderr__
sys.stdout = LoggerWriter("stdout", logging.INFO)
sys.stderr = LoggerWriter("stderr", logging.ERROR)
if __name__ == __main__():
# Load the logger & handlers
logger = logging.getLogger("myAppsLogger")
logger.setLevel(logging.DEBUG)
# HANDLER = logging.StreamHandler()
HANDLER = DefaultStreamHandler() # <--- replace the normal streamhandler with this
logger.addHandler(HANDLER)
logFormatter = logging.Formatter("[%(asctime)s] - %(name)s - %(levelname)s - %(message)s")
HANDLER.setFormatter(logFormatter)
# Run this AFTER you load the logger
replace_stderr_and_stdout_with_logger()
And then finally call the replace_stderr_and_stdout_with_logger() after you've initialised your logger (the last bit of the code)
If you want to logging info and error messages into separates stream (info into stdout, errors into stderr) you can use this trick:
class ErrorStreamHandler(log.StreamHandler):
"""Print input log-message into stderr, print only error/warning messages"""
def __init__(self, stream=sys.stderr):
log.Handler.__init__(self, log.WARNING)
self.stream = stream
def emit(self, record):
try:
if record.levelno in (log.INFO, log.DEBUG, log.NOTSET):
return
msg = self.format(record)
stream = self.stream
# issue 35046: merged two stream.writes into one.
stream.write(msg + self.terminator)
self.flush()
except RecursionError: # See issue 36272
raise
except Exception:
self.handleError(record)
class OutStreamHandler(log.StreamHandler):
"""Print input log-message into stdout, print only info/debug messages"""
def __init__(self, loglevel, stream=sys.stdout):
log.Handler.__init__(self, loglevel)
self.stream = stream
def emit(self, record):
try:
if record.levelno not in (log.INFO, log.DEBUG, log.NOTSET):
return
msg = self.format(record)
stream = self.stream
# issue 35046: merged two stream.writes into one.
stream.write(msg + self.terminator)
self.flush()
except RecursionError: # See issue 36272
raise
except Exception:
self.handleError(record)
Usage:
log.basicConfig(level=settings.get_loglevel(),
format="[%(asctime)s] %(levelname)s: %(message)s",
datefmt='%Y/%m/%d %H:%M:%S', handlers=[ErrorStreamHandler(), OutStreamHandler(settings.get_loglevel())])