I'm trying to verify logging output in a Python application that writes log messages to two handlers with different levels - stdout and a file.
Using #mock.patch to patch sys.stdout with io.StringIO I'm able to verify that appropriate messages are written there. Can I similarly target TextIO used by the logging module and put my own StringIO object there instead?
import io
import logging
import os
import sys
import unittest.mock
import logutils.colorize
class TestMain(unittest.TestCase):
def _setup_logger(self, stdout_level: str, file_level: str):
try:
os.unlink("test.log")
except FileNotFoundError:
pass
log_stdout_handler = logutils.colorize.ColorizingStreamHandler(sys.stdout)
log_stdout_handler.setFormatter(logging.Formatter("%(message)s"))
log_stdout_handler.setLevel(stdout_level)
log_file_handler = logging.FileHandler(filename="test.log", mode="a", encoding="utf8")
log_file_handler.setFormatter(logging.Formatter("%(message)s"))
log_file_handler.setLevel(file_level)
_root_logger = logging.getLogger()
_root_logger.addHandler(log_stdout_handler)
_root_logger.addHandler(log_file_handler)
_root_logger.setLevel(logging.DEBUG)
def _log_messages(self):
log = logging.getLogger("test")
log.debug("debug")
log.info("info")
log.warning("warning")
log.error("error")
log.critical("critical")
#unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
#unittest.mock.patch("?", new_callable=io.StringIO) # <--- can I target FileHandler's io.TextIOWrapper here?
def test_log_stdout_info_file_debug_then_messages_logged_appropriately(self, mock_file: io.StringIO, mock_stdout: io.StringIO):
self._setup_logger("WARNING", "DEBUG")
self._log_messages()
stdout_output = mock_stdout.getvalue()
stdout_lines = stdout_output.splitlines()
self.assertEqual(len(stdout_lines), 3) # irl i would probably regex match the messages
file_output = mock_file.getvalue()
file_lines = file_output.splitlines()
self.assertEqual(len(file_lines), 5)
Stepping into logging\__init.py:1054 I can see that FileHandler's stream field is an instance of _io.TextIOWrapper, but doing #unittest.mock.patch("_io.TextIOWrapper", new_callable=io.StringIO) does not work. Neither does using io.TextIOWrapper, logging.io.TextIOWrapper, or logging._io.TextIOWrapper.
Or perhaps there's a better way of accomplishing what I need?
I was getting nowhere with that, so what I ended up doing was mocking emit functions of the log handlers (and the _open function of the FileHandler, since I don't want any log files to be created), and doing verification on the mocks.
def _assertEmitted(self, emit_mock: unittest.mock.MagicMock, messages: list[str]) -> typing.NoReturn:
self.assertEqual(emit_mock.call_count, len(messages))
for index, message in enumerate(messages):
self.assertEqual(emit_mock.call_args_list[index].args[0].msg, message)
#unittest.mock.patch.object(logutils.colorize.ColorizingStreamHandler, "emit")
#unittest.mock.patch.object(logging.FileHandler, "emit")
#unittest.mock.patch.object(logging.FileHandler, "_open")
def test_given_log_levels_when_logging_then_appropriate_messages_logged(self, _, file_emit, stdout_emit):
# setup logger
stdout_handler = logutils.colorize.ColorizingStreamHandler(sys.stdout)
stdout_handler.setFormatter(logging.Formatter("..."))
stdout_handler.setLevel("INFO")
file_handler = logging.FileHandler(filename="doesn't matter", mode="a", encoding="utf8")
file_handler.setFormatter(logging.Formatter("..."))
file_handler.setLevel("DEBUG")
root_logger = logging.getLogger()
root_logger.addHandler(log_stdout_handler)
root_logger.addHandler(log_file_handler)
root_logger.setLevel(logging.DEBUG)
# do log calls here
log = logging.getLogger("test")
log.info("info")
log.debug("debug")
self._assertEmitted(stdout_emit, ["info"])
self._assertEmitted(file_emit, ["info", "debug"])
Related
I monitor my script with the logging module of the Python Standard Library and I send the loggings to both the console with StreamHandler, and to a file with FileHandler.
I would like to have the option to disable a handler for a LogRecord independantly of its severity. For example, for a specific LogRecord I would like to have the option not to send it to the file destination or to the console (with passing a parameter).
I have found that the library has the Filter class for that reason (which is described as a finer grained way to filter blocks), but haven't figured out how to do it.
Any ideas how to do this in a cosistent way?
Finally, it is quite easy. I used a function as a Handler.filer as suggested in the comments.
This is a working example:
from pathlib import Path
import logging
from logging import LogRecord
def build_handler_filters(handler: str):
def handler_filter(record: LogRecord):
if hasattr(record, 'block'):
if record.block == handler:
return False
return True
return handler_filter
ch = logging.StreamHandler()
ch.addFilter(build_handler_filters('console'))
fh = logging.FileHandler(Path('/tmp/test.log'))
fh.addFilter(build_handler_filters('file'))
mylogger = logging.getLogger(__name__)
mylogger.setLevel(logging.DEBUG)
mylogger.addHandler(ch)
mylogger.addHandler(fh)
When the logger is called, the message is sent to both console and output, i.e.
mylogger.info('msg').
To block for example the file the logger should be called with the extra argument like this
mylogger.info('msg only to console', extra={'block': 'file'})
Disabling console is analogous.
I am trying to have two different handlers where one handler will print the logs on console and other different handler will print the logs on console. Conslole handler is given by one inbuilt python modbus-tk library and I have written my own file handlers.
LOG = utils.create_logger(name="console", record_format="%(message)s") . ---> This is from modbus-tk library
LOG = utils.create_logger("console", level=logging.INFO)
logging.basicConfig(filename="log", level=logging.DEBUG)
log = logging.getLogger("simulator")
handler = RotatingFileHandler("log",maxBytes=5000,backupCount=1)
log.addHandler(handler)
What I need:
LOG.info("This will print message on console")
log.info("This will print message in file")
But problem is both the logs are getting printed on the console and both are going in file. I want only LOG to be printed on the console and log to be printed in the file.
edited:
Adding snippet from utils.create_logger
def create_logger(name="dummy", level=logging.DEBUG, record_format=None):
"""Create a logger according to the given settings"""
if record_format is None:
record_format = "%(asctime)s\t%(levelname)s\t%(module)s.%(funcName)s\t%(threadName)s\t%(message)s"
logger = logging.getLogger("modbus_tk")
logger.setLevel(level)
formatter = logging.Formatter(record_format)
if name == "udp":
log_handler = LogitHandler(("127.0.0.1", 1975))
elif name == "console":
log_handler = ConsoleHandler()
elif name == "dummy":
log_handler = DummyHandler()
else:
raise Exception("Unknown handler %s" % name)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
I have an own customized logging module. I have modified a little and I think now it can be proper for your problem. It is totally configurable and it can handle more different handlers.
If you want to combine the console and file logging, you only need to remove the return statement (I use this way).
I have written comment to code for more understandable and You can found a test section in if __name__ == "__main__": ... statement.
Code:
import logging
import os
# Custom logger class with multiple destinations
class CustomLogger(logging.Logger):
"""
Customized Logger class from the original logging.Logger class.
"""
# Format for console log
FORMAT = (
"[%(name)-30s][%(levelname)-19s] | %(message)-100s "
"| (%(filename)s:%(lineno)d)"
)
# Format for log file
LOG_FILE_FORMAT = "[%(name)s][%(levelname)s] | %(message)s " "| %(filename)s:%(lineno)d)"
def __init__(
self,
name,
log_file_path=None,
console_level=logging.INFO,
log_file_level=logging.DEBUG,
log_file_open_format="w",
):
logging.Logger.__init__(self, name)
consol_color_formatter = logging.Formatter(self.FORMAT)
# If the "log_file_path" parameter is provided,
# the logs will be visible only in the log file.
if log_file_path:
fh_formatter = logging.Formatter(self.LOG_FILE_FORMAT)
file_handler = logging.FileHandler(log_file_path, mode=log_file_open_format)
file_handler.setLevel(log_file_level)
file_handler.setFormatter(fh_formatter)
self.addHandler(file_handler)
return
# If the "log_file_path" parameter is not provided,
# the logs will be visible only in the console.
console = logging.StreamHandler()
console.setLevel(console_level)
console.setFormatter(consol_color_formatter)
self.addHandler(console)
if __name__ == "__main__": # pragma: no cover
current_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_log.log")
console_logger = CustomLogger(__file__, console_level=logging.INFO)
file_logger = CustomLogger(__file__, log_file_path=current_dir, log_file_level=logging.DEBUG)
console_logger.info("test_to_console")
file_logger.info("test_to_file")
Console output:
>>> python3 test.py
[test.py][INFO ] | test_to_console | (test.py:55)
Content of test_log.log file:
[test.py][INFO] | test_to_file | test.py:56)
If something is not clear of you have question/remark, let me know and I will try to help.
EDIT:
If you change the GetLogger to Logger in your implementation, it will work.
Code:
import logging
def create_logger(name="dummy", level=logging.DEBUG, record_format=None):
"""Create a logger according to the given settings"""
if record_format is None:
record_format = "%(asctime)s\t%(levelname)s\t%(module)s.%(funcName)s\t%(threadName)s\t%(message)s"
logger = logging.Logger("modbus_tk")
logger.setLevel(level)
formatter = logging.Formatter(record_format)
if name == "console":
log_handler = logging.StreamHandler()
else:
raise Exception("Wrong type of handler")
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
console_logger = create_logger(name="console")
# logging.basicConfig(filename="log", level=logging.DEBUG)
file_logger = logging.Logger("simulator")
handler = logging.FileHandler("log", "w")
file_logger.addHandler(handler)
console_logger.info("info to console")
file_logger.info("info to file")
Console output:
>>> python3 test.py
2019-12-16 13:10:45,963 INFO test.<module> MainThread info to console
Content of log file:
info to file
There are a few problems in your code and without seeing the whole configuration it is hard to tell what exactly causes this, but most likely what is happening is that the logs are propagated.
First of all when you call basicConfig you are configuring the root logger and tell it to create a FileHandler with the filename log, but just two lines after that you are creating a RotatingFileHandler that uses the same file. Both loggers are writing to the same file now.
I find it always helps to understand the flow of how logging works in python: https://docs.python.org/3/howto/logging.html#logging-flow
And if you don't want all logs to be sent to the root logger too you should set LOG.propagate = False. That stops this logger from propagating their logs.
I have a Python program that runs daily. I'm using the logging module with FileHandler to write logs to a file. I would like each run's logs to be in its own file with a timestamp. However, I want to delete old files (say > 3 months) to avoid filling the disk.
I've looked at the RotatingFileHandler and TimedRotatingFileHandler but I don't want a single run's logs to be split across multiple files, even if a single run were to take days. Is there a built-in method for that?
The logging module has a built in TimedRotatingFileHandler:
# import module
from logging.handlers import TimedRotatingFileHandler
from logging import Formatter
# get named logger
logger = logging.getLogger(__name__)
# create handler
handler = TimedRotatingFileHandler(filename='runtime.log', when='D', interval=1, backupCount=90, encoding='utf-8', delay=False)
# create formatter and add to handler
formatter = Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handler to named logger
logger.addHandler(handler)
# set the logging level
logger.setLevel(logging.INFO)
# --------------------------------------
# log something
logger.info("test")
Old logs automatically get a timestamp appended.
Every day a new backup will be created.
If more than 91 (current+backups) files exist the oldest will be deleted.
import logging
import time
from logging.handlers import RotatingFileHandler
logFile = 'test-' + time.strftime("%Y%m%d-%H%M%S")+ '.log'
logger = logging.getLogger('my_logger')
handler = RotatingFileHandler(logFile, mode='a', maxBytes=50*1024*1024,
backupCount=5, encoding=None, delay=False)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
for _ in range(10000):
logger.debug("Hello, world!")
As suggest by #MartijnPieters in this question, you could easily extend the FileHandler class in order to handle your own deletion logic.
For example, my class will hold only the last "backup_count" files.
import os
import re
import datetime
import logging
from itertools import islice
class TimedPatternFileHandler(logging.FileHandler):
"""File handler that uses the current time fo the log filename,
by formating the current datetime, according to filename_pattern, using
the strftime function.
If backup_count is non-zero, then older filenames that match the base
filename are deleted to only leave the backup_count most recent copies,
whenever opening a new log file with a different name.
"""
def __init__(self, filename_pattern, mode, backup_count):
self.filename_pattern = os.path.abspath(filename_pattern)
self.backup_count = backup_count
self.filename = datetime.datetime.now().strftime(self.filename_pattern)
delete = islice(self._matching_files(), self.backup_count, None)
for entry in delete:
# print(entry.path)
os.remove(entry.path)
super().__init__(filename=self.filename, mode=mode)
#property
def filename(self):
"""Generate the 'current' filename to open"""
# use the start of *this* interval, not the next
return datetime.datetime.now().strftime(self.filename_pattern)
#filename.setter
def filename(self, _):
pass
def _matching_files(self):
"""Generate DirEntry entries that match the filename pattern.
The files are ordered by their last modification time, most recent
files first.
"""
matches = []
basename = os.path.basename(self.filename_pattern)
pattern = re.compile(re.sub('%[a-zA-z]', '.*', basename))
for entry in os.scandir(os.path.dirname(self.filename_pattern)):
if not entry.is_file():
continue
entry_basename = os.path.basename(entry.path)
if re.match(pattern, entry_basename):
matches.append(entry)
matches.sort(key=lambda e: e.stat().st_mtime, reverse=True)
return iter(matches)
def create_timed_rotating_log(path):
""""""
logger = logging.getLogger("Rotating Log")
logger.setLevel(logging.INFO)
handler = TimedPatternFileHandler('{}_%H-%M-%S.log'.format(path), mode='a', backup_count=5)
logger.addHandler(handler)
logger.info("This is a test!")
Get the date/time. See this answer on how to get the timestamp. If the file is older than the current date by 3 months. Then delete it with
import os
os.remove("filename.extension")
save this file to py2exe, then just use any task scheduler to run this job at startup.
Windows: open the run command and enter shell:startup, then place your exe in here.
On OSX: The old way used to be to create a cron job, this doesn't work in many cases from my experience anymore but still work trying. The new recommended way by apple is CreatingLaunchdJobs. You can also refer to this topic for a more detailed explanation.
I have lots of code on a project with print statements and wanted to make a quick a dirty logger of these print statements and decided to go the custom route. I managed to put together a logger that prints both to the terminal and to a file (with the help of this site), but now I want to add a simple time stamp to each statement and I am running into a weird issue.
Here is my logging class.
class Logger(object):
def __init__(self, stream):
self.terminal = stream
self.log = open("test.log", 'a')
def write(self, message):
self.terminal.flush()
self.terminal.write(self.stamp() + message)
self.log.write(self.stamp() + message)
def stamp(self):
d = datetime.today()
string = d.strftime("[%H:%M:%S] ")
return string
Notice the stamp method that I then attempt to use in the write method.
When running the following two lines I get an unexpected output:
sys.stdout = Logger(sys.stdout)
print("Hello World!")
Output:
[11:10:47] Hello World![11:10:47]
This what the output also looks in the log file, however, I see no reason why the string that I am adding appends to the end. Can someone help me here?
UPDATE
See answer below. However, for quicker reference the issue is using "print()" in general; replace it with sys.stdout.write after assigning the variable.
Also use "logging" for long-term/larger projects right off the bat.
It calls the .write() method of your stream twice because in cpython print calls the stream .write() method twice. The first time is with the object, and the second time it writes a newline character. For example look at line 138 in the pprint module in cpython v3.5.2
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n") # <- write() called again!
You can test this out:
>>> from my_logger import Logger # my_logger.py has your Logger class
>>> import sys
>>> sys.stdout = Logger(stream=sys.stdout)
>>> sys.stdout.write('hi\n')
[14:05:32] hi
You can replace print(<blah>) everywhere in your code using sed.
$ for mymodule in *.py; do
> sed -i -E "s/print\((.+)\)/LOGGER.debug(\1)/" $mymodule
> done
Check out Python's Logging builtin module. It has pretty comprehensive logging including inclusion of a timestamp in all messages format.
import logging
FORMAT = '%(asctime)-15s %(message)s'
DATEFMT = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.debug('message: %s', 'message')
This outputs 2016-07-29 11:44:20 message: message to stdout. There are also handlers to send output to files. There is a basic tutorial, an advanced tutorial and a cookbook of common logging recipes.
There is an example of using simultaneous file and console loggers in the cookbook.
import logging
LOGGER = logging.getLogger(__name__) # get logger named for this module
LOGGER.setLevel(logging.DEBUG) # set logger level to debug
# create formatter
LOG_DATEFMT = '%Y-%m-%d %H:%M:%S'
LOG_FORMAT = ('\n[%(levelname)s/%(name)s:%(lineno)d] %(asctime)s ' +
'(%(processName)s/%(threadName)s)\n> %(message)s')
FORMATTER = logging.Formatter(LOG_FORMAT, datefmt=LOG_DATEFMT)
CH = logging.StreamHandler() # create console handler
CH.setLevel(logging.DEBUG) # set handler level to debug
CH.setFormatter(FORMATTER) # add formatter to ch
LOGGER.addHandler(CH) # add console handler to logger
FH = logging.FileHandler('myapp.log') # create file handler
FH.setLevel(logging.DEBUG) # set handler level to debug
FH.setFormatter(FORMATTER) # add formatter to fh
LOGGER.addHandler(FH) # add file handler to logger
LOGGER.debug('test: %s', 'hi')
This outputs:
[DEBUG/__main__:22] 2016-07-29 12:20:45 (MainProcess/MainThread)
> test: hi
to both console and file myapp.log simultaneously.
You probably need to use newline character.
class Logger(object):
def __init__(self, stream):
self.terminal = stream
self.log = open("test.log", 'a')
def write(self, message):
self.terminal.flush()
self.terminal.write(self.stamp() + message + "\n")
self.log.write(self.stamp() + message + "\n")
def stamp(self):
d = datetime.today()
string = d.strftime("[%H:%M:%S] ")
return string
Anyway, using built-in logging module will be better.
Is there a way to do this? If logging.config.fileConfig('some.log') is the setter, what's the getter? Just curious if this exists.
For my basic usage of a single file log, this worked
logging.getLoggerClass().root.handlers[0].baseFilename
I needed to do something similar in a very simple logging environment, the following routine did the trick
def _find_logger_basefilename(self, logger):
"""Finds the logger base filename(s) currently there is only one
"""
log_file = None
parent = logger.__dict__['parent']
if parent.__class__.__name__ == 'RootLogger':
# this is where the file name lives
for h in logger.__dict__['handlers']:
if h.__class__.__name__ == 'TimedRotatingFileHandler':
log_file = h.baseFilename
else:
log_file = self._find_logger_basefilename(parent)
return log_file
I was looking for the file used by the TimedRotatingFileHandler you might need to change the type of handler you search for, probably FileHandler.
Not sure how it would go in any sort of complex logging environment.
Below simple logic for single file handler:
>>> import logging
>>> logger = logging.getLogger("test")
>>> handler = logging.FileHandler("testlog.log")
>>> logger.addHandler(handler)
>>> print logger.handlers[0].baseFilename
/home/nav/testlog.log
>>>
logging.config.fileConfig('some.log') is going to try to read logging configuration from some.log.
I don't believe there is a general way to retrieve the destination file -- it isn't always guaranteed to even be going to a file. (It may go to syslog, over the network, etc.)
In my case, I used to initialize a single logger (in my main script) and use that in all my packages by doing locallogger = logging.getLogger(__name__). In this setup to get the logging file path I had to modify #John's answer as follows
def find_rootlogger_basefilename():
"""Finds the root logger base filename
"""
log_file = None
rootlogger = logging.getLogger('')
for h in rootlogger.__dict__['handlers']:
if h.__class__.__name__ == 'FileHandler':
log_file = h.baseFilename
break
elif h.__class__.__name__ == 'TimedRotatingFileHandler':
log_file = h.baseFilename
break
return log_file