Get all logging output with mock - python

I want to get all logging output with mock. I searched, but
only found ways to mock explicitly logging.info or logging.warn.
I need all output, whatever logging level was set.
def test_foo():
def my_log(...):
logs.append(...)
with mock.patch('logging.???', my_log):
...
In our libraries we use this:
import logging
logger=logging.getLogger(__name__)
def foo():
logger.info(...)

pytest
If you are writing your tests using pytest, take a look at a neat fixture named caplog that will capture log records for you. It captures all the emitted log records which you can then access via caplog.records list. Each element is an instance of logging.LogRecord, so you can easily access any of the LogRecords attributes. Example:
# spam.py
import logging
logger=logging.getLogger(__name__)
def foo():
logger.info('bar')
# tests.py
import logging
from spam import foo
def test_foo(caplog):
foo()
assert len(caplog.records) == 1
record = next(iter(caplog.records))
assert record.message == 'bar'
assert record.levelno == logging.INFO
assert record.module == 'spam'
# etc
Install
The fixture was first introduced in a pytest plugin named pytest-capturelog which is now abandoned. Luckily, it got a decent fork named pytest-catchlog, which has been merged into pytest==3.3.0 recently. So, if you use a recent version of pytest, you are already good to go; for older versions of pytest, install pytest-catchlog from PyPI.
Docs
At the moment, pytest doesn't provide any docs for the caplog fixture (or at least I couldn't find any), so you can refer to pytest-catchlog's documentation.
Plain unittest
If pytest is not an option, I wouldn't patch logging at all - you can simply add a custom handler instead that will record all the incoming logs. A small example:
# utils.py
import logging
class RecordsCollector(logging.Handler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.records = []
def emit(self, record):
self.records.append(record)
# tests.py
import logging
import unittest
from utils import RecordsCollector
from spam import foo
class SpamTests(unittest.TestCase):
def setUp(self):
self.collector = RecordsCollector()
logging.getLogger('spam').addHandler(self.collector)
def tearDown(self):
logging.getLogger('spam').removeHandler(self.collector)
def test_foo(self):
foo()
# same checks as in the example above
self.assertEqual(len(self.collector.records), 1)
record = next(iter(self.collector.records))
self.assertEqual(record.message, 'bar')
self.assertEqual(record.levelno, logging.INFO)
self.assertEqual(record.module, 'spam')
if __name__ == '__main__':
unittest.main()
You can then extend the custom handler and implement any logic you need, like collecting the records in a dict that maps log levels to lists of records, or add a contextmanager implementation, so you can start and stop capturing records inside the test:
from contextlib import contextmanager
#contextmanager
def record_logs():
collector = RecordsCollector()
logging.getLogger('spam').addHandler(collector)
yield collector
logging.getLogger('spam').removeHandler(collector)
def test_foo(self):
with utils.record_logs() as collector:
foo()
self.assertEqual(len(collector.records), 1)

stdlib
Since Python 3.4 the batteries' unittest has assertLogs. When used without logger and level arguments, it catches all logging (suppresses existing handlers). You can later access recorded entries from the context manager's records attribute. Text output strings are stored in output list.
import logging
import unittest
class TestLogging(unittest.TestCase):
def test(self):
with self.assertLogs() as ctx:
logging.getLogger('foo').info('message from foo')
logging.getLogger('bar').info('message from bar')
print(ctx.records)
Tornado
For Python 2 I usually take Tornado's ExpectLog. It's self-contained and works for normal Python code. It's actually more elegant solution then stdlib's, because instead of several class, ExpectLog is just a normal logging.Filter (a class, source). But it lacks a couple of features, including access to recorded entries, so usually I also extend it a bit, like:
class ExpectLog(logging.Filter):
def __init__(self, logger, regex, required=True, level=None):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
self.logger = logger
self.orig_level = self.logger.level
self.level = level
self.regex = re.compile(regex)
self.formatter = logging.Formatter()
self.required = required
self.matched = []
self.logged_stack = False
def filter(self, record):
if record.exc_info:
self.logged_stack = True
message = self.formatter.format(record)
if self.regex.search(message):
self.matched.append(record)
return False
return True
def __enter__(self):
self.logger.addFilter(self)
if self.level:
self.logger.setLevel(self.level)
return self
def __exit__(self, typ, value, tb):
self.logger.removeFilter(self)
if self.level:
self.logger.setLevel(self.orig_level)
if not typ and self.required and not self.matched:
raise Exception("did not get expected log message")
Then you can have something like:
class TestLogging(unittest.TestCase):
def testTornadoself):
logging.basicConfig(level = logging.INFO)
with ExpectLog('foo', '.*', required = False) as ctxFoo:
with ExpectLog('bar', '.*', required = False) as ctxBar:
logging.getLogger('foo').info('message from foo')
logging.getLogger('bar').info('message from bar')
print(ctxFoo.matched)
print(ctxBar.matched)
However, note that for the filter approach current logging level is important (can be overridden with level argument), and also you need a filter per logger of interest. You can follow the approach and make something that fits your case better.
Update
Alternatively there's unittest2 backport for Python 2 which has assertLogs.

The module testfixtures has a class to handle this:
>>> import logging
>>> from testfixtures import LogCapture
>>> with LogCapture() as l:
... logger = logging.getLogger()
... logger.info('a message')
... logger.error('an error')
>>> l.check(
... ('root', 'INFO', 'a message'),
... ('root', 'ERROR', 'another error'),
... )
Traceback (most recent call last):
...
AssertionError: sequence not as expected:
same:
(('root', 'INFO', 'a message'),)
expected:
(('root', 'ERROR', 'another error'),)
actual:
(('root', 'ERROR', 'an error'),)
Source: http://testfixtures.readthedocs.io/en/latest/logging.html

I found this solution:
def test_foo(self):
logs=[]
def my_log(self, *args, **kwargs):
logs.append((args, kwargs))
with mock.patch('logging.Logger._log', my_log):
...

Related

How can I stop the python process from terminating after a failed unit test so that I can call pdb.pm [duplicate]

Is there a way to automatically start the debugger at the point at which a unittest fails?
Right now I am just using pdb.set_trace() manually, but this is very tedious as I need to add it each time and take it out at the end.
For Example:
import unittest
class tests(unittest.TestCase):
def setUp(self):
pass
def test_trigger_pdb(self):
#this is the way I do it now
try:
assert 1==0
except AssertionError:
import pdb
pdb.set_trace()
def test_no_trigger(self):
#this is the way I would like to do it:
a=1
b=2
assert a==b
#magically, pdb would start here
#so that I could inspect the values of a and b
if __name__=='__main__':
#In the documentation the unittest.TestCase has a debug() method
#but I don't understand how to use it
#A=tests()
#A.debug(A)
unittest.main()
I think what you are looking for is nose. It works like a test runner for unittest.
You can drop into the debugger on errors, with the following command:
nosetests --pdb
import unittest
import sys
import pdb
import functools
import traceback
def debug_on(*exceptions):
if not exceptions:
exceptions = (AssertionError, )
def decorator(f):
#functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except exceptions:
info = sys.exc_info()
traceback.print_exception(*info)
pdb.post_mortem(info[2])
return wrapper
return decorator
class tests(unittest.TestCase):
#debug_on()
def test_trigger_pdb(self):
assert 1 == 0
I corrected the code to call post_mortem on the exception instead of set_trace.
Third party test framework enhancements generally seem to include the feature (nose and nose2 were already mentioned in other answers). Some more:
pytest supports it.
pytest --pdb
Or if you use absl-py's absltest instead of unittest module:
name_of_test.py --pdb_post_mortem
A simple option is to just run the tests without result collection and letting the first exception crash down the stack (for arbitrary post mortem handling) by e.g.
try: unittest.findTestCases(__main__).debug()
except:
pdb.post_mortem(sys.exc_info()[2])
Another option: Override unittest.TextTestResult's addError and addFailure in a debug test runner for immediate post_mortem debugging (before tearDown()) - or for collecting and handling errors & tracebacks in an advanced way.
(Doesn't require extra frameworks or an extra decorator for test methods)
Basic example:
import unittest, pdb
class TC(unittest.TestCase):
def testZeroDiv(self):
1 / 0
def debugTestRunner(post_mortem=None):
"""unittest runner doing post mortem debugging on failing tests"""
if post_mortem is None:
post_mortem = pdb.post_mortem
class DebugTestResult(unittest.TextTestResult):
def addError(self, test, err):
# called before tearDown()
traceback.print_exception(*err)
post_mortem(err[2])
super(DebugTestResult, self).addError(test, err)
def addFailure(self, test, err):
traceback.print_exception(*err)
post_mortem(err[2])
super(DebugTestResult, self).addFailure(test, err)
return unittest.TextTestRunner(resultclass=DebugTestResult)
if __name__ == '__main__':
##unittest.main()
unittest.main(testRunner=debugTestRunner())
##unittest.main(testRunner=debugTestRunner(pywin.debugger.post_mortem))
##unittest.findTestCases(__main__).debug()
To apply #cmcginty's answer to the successor nose 2 (recommended by nose available on Debian-based systems via apt-get install nose2), you can drop into the debugger on failures and errors by calling
nose2
in your test directory.
For this, you need to have a suitable .unittest.cfg in your home directory or unittest.cfg in the project directory; it needs to contain the lines
[debugger]
always-on = True
errors-only = False
To address the comment in your code "In the documentation the unittest.TestCase has a debug() method but I don't understand how to use it", you can do something like this:
suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
suite.debug()
Individual test cases are created like:
testCase = tests('test_trigger_pdb') (where tests is a sub-class of TestCase as per your example). And then you can do testCase.debug() to debug one case.
Here's a built-in, no extra modules, solution:
import unittest
import sys
import pdb
####################################
def ppdb(e=None):
"""conditional debugging
use with: `if ppdb(): pdb.set_trace()`
"""
return ppdb.enabled
ppdb.enabled = False
###################################
class SomeTest(unittest.TestCase):
def test_success(self):
try:
pass
except Exception, e:
if ppdb(): pdb.set_trace()
raise
def test_fail(self):
try:
res = 1/0
#note: a `nosetests --pdb` run will stop after any exception
#even one without try/except and ppdb() does not not modify that.
except Exception, e:
if ppdb(): pdb.set_trace()
raise
if __name__ == '__main__':
#conditional debugging, but not in nosetests
if "--pdb" in sys.argv:
print "pdb requested"
ppdb.enabled = not sys.argv[0].endswith("nosetests")
sys.argv.remove("--pdb")
unittest.main()
call it with python myunittest.py --pdb and it will halt. Otherwise it won't.
Some solution above modifies business logic:
try: # <-- new code
original_code() # <-- changed (indented)
except Exception as e: # <-- new code
pdb.post_mortem(...) # <-- new code
To minimize changes to the original code, we can define a function decorator, and simply decorate the function that's throwing:
def pm(func):
import functools, pdb
#functools.wraps(func)
def func2(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
pdb.post_mortem(e.__traceback__)
raise
return func2
Use:
#pm
def test_xxx(...):
...
Buildt a module with a decorator which post mortems into every type of error except AssertionError. The decorator can be triggered by the logging root level
#!/usr/bin/env python3
'''
Decorator for getting post mortem on errors of a unittest TestCase
'''
import sys
import pdb
import functools
import traceback
import logging
import unittest
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
def debug_on(log_level):
'''
Function decorator for post mortem debugging unittest functions.
Args:
log_level (int): logging levels coesponding to logging stl module
Usecase:
class tests(unittest.TestCase):
#debug_on(logging.root.level)
def test_trigger_pdb(self):
assert 1 == 0
'''
def decorator(f):
#functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except BaseException as err:
info = sys.exc_info()
traceback.print_exception(*info)
if log_level < logging.INFO and type(err) != AssertionError:
pdb.post_mortem(info[2])
return wrapper
return decorator
class Debug_onTester(unittest.TestCase):
#debug_on(logging.root.level)
def test_trigger_pdb(self):
assert 1 == 0
if __name__ == '__main__':
unittest.main()

Preventing logging file IO during test execution

I want to test a class that does logging when initialised and save logs to local file. Therefore, I'm mocking the logging piece of logic in order to avoid file IO when testing. This is pseudo-code representing how I've structured the tests
class TestClass:
def test_1(self, monkeypatch):
monkeypatch.setattr('dotted.path.to.logger', lambda *args: '')
assert True
def test_2(self, monkeypatch):
monkeypatch.setattr('dotted.path.to.logger', lambda *args: '')
assert True
def test_3(self, monkeypatch):
monkeypatch.setattr('dotted.path.to.logger', lambda *args: '')
assert True
Note how monkeypatch.setattr() is copy-pasted across all methods. Considering that:
we know a priori that all call methods will need to be monkey-patched in the same way, and
one might forget to monkeypatch new methods,
I think that monkey-patching should be abstracted at class level. How do we abstract monkeypatching at class level? I would expect the solution to be something similar to what follows:
import pytest
class TestClass:
pytest.monkeypatch.setattr('dotted.path.to.logger', lambda *args: '')
def test_1(self):
assert True
def test_2(self):
assert True
def test_3(self):
assert True
This is where loggers are configured.
def initialise_logger(session_dir: str):
"""If missing, initialise folder "log" to store .log files. Verbosity:
CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET."""
os.makedirs(session_dir, exist_ok=True)
logging.basicConfig(filename=os.path.join(session_dir, 'session.log'),
filemode='a',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
format='|'.join(['(%(threadName)s)',
'%(asctime)s.%(msecs)03d',
'%(levelname)s',
'%(filename)s:%(lineno)d',
'%(message)s']))
# Adopt NYSE time zone (aka EST aka UTC -0500 aka US/Eastern). Source:
# https://stackoverflow.com/questions/32402502/how-to-change-the-time-zone-in-python-logging
logging.Formatter.converter = lambda *args: get_now().timetuple()
# Set verbosity in console. Verbosity above logging level is ignored.
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
console.setFormatter(logging.Formatter('|'.join(['(%(threadName)s)',
'%(asctime)s',
'%(levelname)s',
'%(filename)s:%(lineno)d',
'%(message)s'])))
logger = logging.getLogger()
logger.addHandler(console)
class TwsApp:
def __init__(self):
initialise_logger(<directory>)
A cleaner implementation:
# conftest.py
import pytest
#pytest.fixture(autouse=True)
def dont_configure_logging(monkeypatch):
monkeypatch.setattr('twsapp.client.initialise_logger', lambda x: None)
You don't need to mark individual tests with the fixture, nor inject it, this will be applied regardless.
Inject the caplog fixture if you need to assert on records logged. Note that you don't need to configure loggers in order to make logging assertions - the caplog fixture will inject the necessary handlers it needs in order to work correctly. If you want to customise the logging format used for tests, do that in pytest.ini or under a [tool:pytest] section of setup.cfg.
In practice, I've put the fixture in /test/conftest.py. In fact, pytest automatically load fixture from files named conftest.py and can be applied in any module during the testing session.
from _pytest.monkeypatch import MonkeyPatch
#pytest.fixture(scope="class")
def suppress_logger(request):
"""Source: https://github.com/pytest-dev/pytest/issues/363"""
# BEFORE running the test.
monkeypatch = MonkeyPatch()
# Provide dotted path to method or function to be mocked.
monkeypatch.setattr('twsapp.client.initialise_logger', lambda x: None)
# DURING the test.
yield monkeypatch
# AFTER running the test.
monkeypatch.undo()
import pytest
#pytest.mark.usefixtures("suppress_logger")
class TestClass:
def test_1(self):
assert True
def test_2(self):
assert True
def test_3(self):
assert True
EDIT: I ended up using the following in conftest.py
#pytest.fixture(autouse=True)
def suppress_logger(mocker, request):
if 'no_suppress_logging' not in request.keywords:
# If not decorated with: #pytest.mark.no_suppress_logging_error
mocker.patch('logging.error')
mocker.patch('logging.warning')
mocker.patch('logging.debug')
mocker.patch('logging.info')

Nested prefixes accross loggers in Python

I'm currently working on a project where were use a single root logger. I understand from reading about logging that is a Bad Thing but am struggling to find a nice solution to a nice benefit this gives us.
Something we do (in part, to get around not having different loggers but in part gives us a nice feature) is to have a log_prefix decorator.
e.g.
#log_prefix("Download")
def download_file():
logging.info("Downloading file..")
connection = get_connection("127.0.0.1")
//Do other stuff
return file
#log_prefix("GetConnection")
def get_connection(url):
logging.info("Making connection")
//Do other stuff
logging.info("Finished making connection")
return connection
This gives us some nicely formatting logs that might look like:
Download:Downloading file..
Download:GetConnection:Making Connection
Download:GetConnection:Other stuff
Download:GetConnection:Finished making connection
Download:Other stuff
This also means that if we have
#log_prefix("StartTelnetSession")
logging.info("Starting telnet session..")
connection = get_connection("127.0.0.1")
We get the same prefix at the end:
StartTelnetSession:Starting telnet session..
StartTelnetSession:GetConnection:Making Connection
StartTelnetSession:GetConnection:Other stuff
StartTelnetSession:GetConnection:Finished making connection
This has proven to be quite useful for development and support.
I can see plenty of cases where actually just using a separate logger for the action would solve our problem but I can also see cases where throwing away the nesting we have will make things worse.
Are there any patterns or common uses out there for nesting loggers? i.e
logging.getLogger("Download").getLogger("MakingConnection")
Or am I missing something here?
You could use a LoggerAdapter to add extra contextual information:
utils_logging.py:
import functools
def log_prefix(logger, label, prefix=list()):
def decorator(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
prefix.append(label)
logger.extra['prefix'] = ':'.join(prefix)
result = func(*args, **kwargs)
prefix.pop()
logger.extra['prefix'] = ':'.join(prefix)
return result
return wrapper
return decorator
foo.py:
import logging
import utils_logging as UL
import bar
logger = logging.LoggerAdapter(logging.getLogger(__name__), {'prefix':''})
#UL.log_prefix(logger, "Download")
def download_file():
logger.info("Downloading file..")
connection = bar.get_connection("127.0.0.1")
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(prefix)s %(name)s %(levelname)s %(message)s')
download_file()
bar.get_connection('foo')
bar.py:
import logging
import utils_logging as UL
logger = logging.LoggerAdapter(logging.getLogger(__name__), {'prefix':''})
#UL.log_prefix(logger, "GetConnection")
def get_connection(url):
logger.info("Making connection")
logger.info("Finished making connection")
yields
Download __main__ INFO Downloading file..
Download:GetConnection bar INFO Making connection
Download:GetConnection bar INFO Finished making connection
GetConnection bar INFO Making connection
GetConnection bar INFO Finished making connection
Note: I don't think it is a good idea to have a new Logger instance for each
prefix because these instances are not garbage
collected. All
you need is for some prefix variable to take on a different value depending on
context. You don't need a new Logger instance for that -- one LoggerAdapter will
do.
Logger names are hierarchical.
logger = logging.getLogger("Download.MakingConnection")
This logger would inherit any configuration from logging.getLogger("Download").
Python 2.7 also added a convenience function for accessing descendants of an arbitrary logger.
logger = logging.getLogger("Download.MakingConnection")
parent_logger = logging.getLogger("Download")
child_logger = parent_logger.getChild("MakingConnection")
assert logger is child_logger
Here is an alternative which uses a logging.Filter to modify the record.msg. By modifying the message instead of adding an %(prefix)s field,
the format does not need to change.
This will make it easier to mix loggers which make use of log_prefix and those that don't.
To get the prefix, the logger should be initialized with a call to add_prefix_filter:
logger = UL.add_prefix_filter(logging.getLogger(__name__))
To append labels to the prefix, the functions should be decorated with #log_prefix(label), as before.
utils_logging.py:
import functools
import logging
prefix = list()
def log_prefix(label):
def decorator(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
prefix.append(label)
try:
result = func(*args, **kwargs)
finally:
prefix.pop()
return result
return wrapper
return decorator
class PrefixFilter(logging.Filter):
def filter(self, record):
if prefix:
record.msg = '{}:{}'.format(':'.join(prefix), record.msg)
return True
def add_prefix_filter(logger):
logger.addFilter(PrefixFilter())
return logger
main.py:
import logging
import bar
import utils_logging as UL
logger = UL.add_prefix_filter(logging.getLogger(__name__))
#UL.log_prefix("Download")
def download_file():
logger.info("Downloading file..")
connection = bar.get_connection("127.0.0.1")
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(message)s')
logger.info('Starting...')
download_file()
bar.get_connection('foo')
bar.py:
import logging
import utils_logging as UL
logger = UL.add_prefix_filter(logging.getLogger(__name__))
#UL.log_prefix("GetConnection")
def get_connection(url):
logger.info("Making connection")
logger.info("Finished making connection")
yields
Starting...
Download:Downloading file..
Download:GetConnection:Making connection
Download:GetConnection:Finished making connection
GetConnection:Making connection
GetConnection:Finished making connection

Python unittest - how can I add debugging information to a TestResult object?

I have some unittest code that runs a bunch of tests. Previously, when each test was run some information would get printed to stdout. This information would greatly assist in debugging in case the test failed. Now I would like to to write a more sophisticated program that invokes unittest and captures the results of the testing programmatically. It appears that unittest provides an object called TestResult which is meant to contain the output of tests. It has a list of all the errors, a list of all the failures, etc. I would also like to add my debugging output to this object so that I can access it programmatically later. Is this possible?
EDIT: Here is an example:
import unittest2
class DemoTest(unittest2.TestCase):
def test_one(self):
print "'grimelsome' attribute of 'smoxy' was set to 'blimpy'"
self.assertTrue(True)
def test_two(self):
print "'smithereen' attribute of 'brouhaha' was set to 'False'"
self.assertTrue(True)
if __name__ == '__main__':
suite = unittest2.TestLoader().loadTestsFromTestCase(DemoTest)
result = unittest2.TextTestRunner(verbosity=2).run(suite)
# this is what I'd like to be able to do:
for fail in result.failures:
print what_would_have_gone_to_stdout
You just need to use TextTestRunner buffer option:
import unittest2
class DemoTest(unittest2.TestCase):
def test_one(self):
print "'grimelsome' attribute of 'smoxy' was set to 'blimpy'"
self.assertTrue(True)
def test_two(self):
print "'smithereen' attribute of 'brouhaha' was set to 'False'"
self.assertTrue(False)
if __name__ == '__main__':
suite = unittest2.TestLoader().loadTestsFromTestCase(DemoTest)
result = unittest2.TextTestRunner(verbosity=2, buffer=True).run(suite)
Before each test, TextTestResult, used by the runner, will replace sys.stderr and sys.stdout with its own streams and only send the content to the original streams if the test fails or discards it otherwise.
Note that because the fake sys.std* streams changes after each test, if you wanted to to do the same thing with logging output, you would have to add the logging handler after the sys.sdt* stream have been replaced, before each test or implement your own handler.
Here's an example subclassing logging.StreamHandler:
import logging
import sys
import unittest2
class DemoTest(unittest2.TestCase):
logger = logging.getLogger('DemoTest')
def setUp(self):
self.logger.debug("setting up stuff and logging it...")
def teardown(self):
self.logger.debug("You won't see me")
print "me neither"
def test_one(self):
self.logger.debug("'grimelsome' attribute of 'smoxy' was set to 'blimpy'")
self.assertTrue(True)
def test_two(self):
self.logger.debug("'smithereen' attribute of 'brouhaha' was set to 'False'")
self.assertTrue(False)
class TestHandler(logging.StreamHandler):
def __init__(self):
logging.Handler.__init__(self)
#property
def stream(self):
"""Use which ever stream sys.stderr is referencing."""
return sys.stderr
if __name__ == '__main__':
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
logger_handler = TestHandler()
logger_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logger_handler)
suite = unittest2.TestLoader().loadTestsFromTestCase(DemoTest)
result = unittest2.TextTestRunner(
verbosity=2, buffer=True, resultclass=TestResult
).run(suite)

How should I verify a log message when testing Python code under nose?

I'm trying to write a simple unit test that will verify that, under a certain condition, a class in my application will log an error via the standard logging API. I can't work out what the cleanest way to test this situation is.
I know that nose already captures logging output through it's logging plugin, but this seems to be intended as a reporting and debugging aid for failed tests.
The two ways to do this I can see are:
Mock out the logging module, either in a piecemeal way (mymodule.logging = mockloggingmodule) or with a proper mocking library.
Write or use an existing nose plugin to capture the output and verify it.
If I go for the former approach, I'd like to know what the cleanest way to reset the global state to what it was before I mocked out the logging module.
Looking forward to your hints and tips on this one...
From python 3.4 on, the standard unittest library offers a new test assertion context manager, assertLogs. From the docs:
with self.assertLogs('foo', level='INFO') as cm:
logging.getLogger('foo').info('first message')
logging.getLogger('foo.bar').error('second message')
self.assertEqual(cm.output, ['INFO:foo:first message',
'ERROR:foo.bar:second message'])
UPDATE: No longer any need for the answer below. Use the built-in Python way instead!
This answer extends the work done in https://stackoverflow.com/a/1049375/1286628. The handler is largely the same (the constructor is more idiomatic, using super). Further, I add a demonstration of how to use the handler with the standard library's unittest.
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs.
Messages are available from an instance's ``messages`` dict, in order, indexed by
a lowercase log level string (e.g., 'debug', 'info', etc.).
"""
def __init__(self, *args, **kwargs):
self.messages = {'debug': [], 'info': [], 'warning': [], 'error': [],
'critical': []}
super(MockLoggingHandler, self).__init__(*args, **kwargs)
def emit(self, record):
"Store a message from ``record`` in the instance's ``messages`` dict."
try:
self.messages[record.levelname.lower()].append(record.getMessage())
except Exception:
self.handleError(record)
def reset(self):
self.acquire()
try:
for message_list in self.messages.values():
message_list.clear()
finally:
self.release()
Then you can use the handler in a standard-library unittest.TestCase like so:
import unittest
import logging
import foo
class TestFoo(unittest.TestCase):
#classmethod
def setUpClass(cls):
super(TestFoo, cls).setUpClass()
# Assuming you follow Python's logging module's documentation's
# recommendation about naming your module's logs after the module's
# __name__,the following getLogger call should fetch the same logger
# you use in the foo module
foo_log = logging.getLogger(foo.__name__)
cls._foo_log_handler = MockLoggingHandler(level='DEBUG')
foo_log.addHandler(cls._foo_log_handler)
cls.foo_log_messages = cls._foo_log_handler.messages
def setUp(self):
super(TestFoo, self).setUp()
self._foo_log_handler.reset() # So each test is independent
def test_foo_objects_fromble_nicely(self):
# Do a bunch of frombling with foo objects
# Now check that they've logged 5 frombling messages at the INFO level
self.assertEqual(len(self.foo_log_messages['info']), 5)
for info_message in self.foo_log_messages['info']:
self.assertIn('fromble', info_message)
I used to mock loggers, but in this situation I found best to use logging handlers, so I wrote this one based on the document suggested by jkp(now dead, but cached on Internet Archive)
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
Simplest answer of all
Pytest has a built-in fixture called caplog. No setup needed.
def test_foo(foo, caplog, expected_msgs):
foo.bar()
assert [r.msg for r in caplog.records] == expected_msgs
I wish I'd known about caplog before I wasted 6 hours.
Warning, though - it resets, so you need to perform your SUT action in the same test where you make assertions about caplog.
Personally, I want my console output clean, so I like this to silence the log-to-stderr:
from logging import getLogger
from pytest import fixture
#fixture
def logger(caplog):
logger = getLogger()
_ = [logger.removeHandler(h) for h in logger.handlers if h != caplog.handler] # type: ignore
return logger
#fixture
def foo(logger):
return Foo(logger=logger)
#fixture
def expected_msgs():
# return whatever it is you expect from the SUT
def test_foo(foo, caplog, expected_msgs):
foo.bar()
assert [r.msg for r in caplog.records] == expected_msgs
There is a lot to like about pytest fixtures if you're sick of horrible unittest code.
Brandon's answer:
pip install testfixtures
snippet:
import logging
from testfixtures import LogCapture
logger = logging.getLogger('')
with LogCapture() as logs:
# my awesome code
logger.error('My code logged an error')
assert 'My code logged an error' in str(logs)
Note: the above does not conflict with calling nosetests and getting the output of logCapture plugin of the tool
As a follow up to Reef's answer, I took a liberty of coding up an example using pymox.
It introduces some extra helper functions that make it easier to stub functions and methods.
import logging
# Code under test:
class Server(object):
def __init__(self):
self._payload_count = 0
def do_costly_work(self, payload):
# resource intensive logic elided...
pass
def process(self, payload):
self.do_costly_work(payload)
self._payload_count += 1
logging.info("processed payload: %s", payload)
logging.debug("payloads served: %d", self._payload_count)
# Here are some helper functions
# that are useful if you do a lot
# of pymox-y work.
import mox
import inspect
import contextlib
import unittest
def stub_all(self, *targets):
for target in targets:
if inspect.isfunction(target):
module = inspect.getmodule(target)
self.StubOutWithMock(module, target.__name__)
elif inspect.ismethod(target):
self.StubOutWithMock(target.im_self or target.im_class, target.__name__)
else:
raise NotImplementedError("I don't know how to stub %s" % repr(target))
# Monkey-patch Mox class with our helper 'StubAll' method.
# Yucky pymox naming convention observed.
setattr(mox.Mox, 'StubAll', stub_all)
#contextlib.contextmanager
def mocking():
mocks = mox.Mox()
try:
yield mocks
finally:
mocks.UnsetStubs() # Important!
mocks.VerifyAll()
# The test case example:
class ServerTests(unittest.TestCase):
def test_logging(self):
s = Server()
with mocking() as m:
m.StubAll(s.do_costly_work, logging.info, logging.debug)
# expectations
s.do_costly_work(mox.IgnoreArg()) # don't care, we test logging here.
logging.info("processed payload: %s", 'hello')
logging.debug("payloads served: %d", 1)
# verified execution
m.ReplayAll()
s.process('hello')
if __name__ == '__main__':
unittest.main()
If you define a helper method like this:
import logging
def capture_logging():
records = []
class CaptureHandler(logging.Handler):
def emit(self, record):
records.append(record)
def __enter__(self):
logging.getLogger().addHandler(self)
return records
def __exit__(self, exc_type, exc_val, exc_tb):
logging.getLogger().removeHandler(self)
return CaptureHandler()
Then you can write test code like this:
with capture_logging() as log:
... # trigger some logger warnings
assert len(log) == ...
assert log[0].getMessage() == ...
You should use mocking, as someday You might want to change Your logger to a, say, database one. You won't be happy if it'll try to connect to the database during nosetests.
Mocking will continue to work even if standard output will be suppressed.
I have used pyMox's stubs. Remember to unset the stubs after the test.
The ExpectLog class implemented in tornado is a great utility:
with ExpectLog('channel', 'message regex'):
do_it()
http://tornado.readthedocs.org/en/latest/_modules/tornado/testing.html#ExpectLog
Keying off #Reef's answer, I did tried the code below. It works well for me both for Python 2.7 (if you install mock) and for Python 3.4.
"""
Demo using a mock to test logging output.
"""
import logging
try:
import unittest
except ImportError:
import unittest2 as unittest
try:
# Python >= 3.3
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
logging.basicConfig()
LOG=logging.getLogger("(logger under test)")
class TestLoggingOutput(unittest.TestCase):
""" Demo using Mock to test logging INPUT. That is, it tests what
parameters were used to invoke the logging method, while still
allowing actual logger to execute normally.
"""
def test_logger_log(self):
"""Check for Logger.log call."""
original_logger = LOG
patched_log = patch('__main__.LOG.log',
side_effect=original_logger.log).start()
log_msg = 'My log msg.'
level = logging.ERROR
LOG.log(level, log_msg)
# call_args is a tuple of positional and kwargs of the last call
# to the mocked function.
# Also consider using call_args_list
# See: https://docs.python.org/3/library/unittest.mock.html#unittest.mock.Mock.call_args
expected = (level, log_msg)
self.assertEqual(expected, patched_log.call_args[0])
if __name__ == '__main__':
unittest.main()

Categories

Resources