Preventing logging file IO during test execution - python

I want to test a class that does logging when initialised and save logs to local file. Therefore, I'm mocking the logging piece of logic in order to avoid file IO when testing. This is pseudo-code representing how I've structured the tests
class TestClass:
def test_1(self, monkeypatch):
monkeypatch.setattr('dotted.path.to.logger', lambda *args: '')
assert True
def test_2(self, monkeypatch):
monkeypatch.setattr('dotted.path.to.logger', lambda *args: '')
assert True
def test_3(self, monkeypatch):
monkeypatch.setattr('dotted.path.to.logger', lambda *args: '')
assert True
Note how monkeypatch.setattr() is copy-pasted across all methods. Considering that:
we know a priori that all call methods will need to be monkey-patched in the same way, and
one might forget to monkeypatch new methods,
I think that monkey-patching should be abstracted at class level. How do we abstract monkeypatching at class level? I would expect the solution to be something similar to what follows:
import pytest
class TestClass:
pytest.monkeypatch.setattr('dotted.path.to.logger', lambda *args: '')
def test_1(self):
assert True
def test_2(self):
assert True
def test_3(self):
assert True
This is where loggers are configured.
def initialise_logger(session_dir: str):
"""If missing, initialise folder "log" to store .log files. Verbosity:
CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET."""
os.makedirs(session_dir, exist_ok=True)
logging.basicConfig(filename=os.path.join(session_dir, 'session.log'),
filemode='a',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
format='|'.join(['(%(threadName)s)',
'%(asctime)s.%(msecs)03d',
'%(levelname)s',
'%(filename)s:%(lineno)d',
'%(message)s']))
# Adopt NYSE time zone (aka EST aka UTC -0500 aka US/Eastern). Source:
# https://stackoverflow.com/questions/32402502/how-to-change-the-time-zone-in-python-logging
logging.Formatter.converter = lambda *args: get_now().timetuple()
# Set verbosity in console. Verbosity above logging level is ignored.
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
console.setFormatter(logging.Formatter('|'.join(['(%(threadName)s)',
'%(asctime)s',
'%(levelname)s',
'%(filename)s:%(lineno)d',
'%(message)s'])))
logger = logging.getLogger()
logger.addHandler(console)
class TwsApp:
def __init__(self):
initialise_logger(<directory>)

A cleaner implementation:
# conftest.py
import pytest
#pytest.fixture(autouse=True)
def dont_configure_logging(monkeypatch):
monkeypatch.setattr('twsapp.client.initialise_logger', lambda x: None)
You don't need to mark individual tests with the fixture, nor inject it, this will be applied regardless.
Inject the caplog fixture if you need to assert on records logged. Note that you don't need to configure loggers in order to make logging assertions - the caplog fixture will inject the necessary handlers it needs in order to work correctly. If you want to customise the logging format used for tests, do that in pytest.ini or under a [tool:pytest] section of setup.cfg.

In practice, I've put the fixture in /test/conftest.py. In fact, pytest automatically load fixture from files named conftest.py and can be applied in any module during the testing session.
from _pytest.monkeypatch import MonkeyPatch
#pytest.fixture(scope="class")
def suppress_logger(request):
"""Source: https://github.com/pytest-dev/pytest/issues/363"""
# BEFORE running the test.
monkeypatch = MonkeyPatch()
# Provide dotted path to method or function to be mocked.
monkeypatch.setattr('twsapp.client.initialise_logger', lambda x: None)
# DURING the test.
yield monkeypatch
# AFTER running the test.
monkeypatch.undo()
import pytest
#pytest.mark.usefixtures("suppress_logger")
class TestClass:
def test_1(self):
assert True
def test_2(self):
assert True
def test_3(self):
assert True
EDIT: I ended up using the following in conftest.py
#pytest.fixture(autouse=True)
def suppress_logger(mocker, request):
if 'no_suppress_logging' not in request.keywords:
# If not decorated with: #pytest.mark.no_suppress_logging_error
mocker.patch('logging.error')
mocker.patch('logging.warning')
mocker.patch('logging.debug')
mocker.patch('logging.info')

Related

Using PyTest fixture without passing them

I am using PyTest framework for writing and running my tests.
I have implemented a concrete logger:
class Logger(object):
class LogFormats:
...
def __init__(self, testname ,setup ,silent=True):
"""
creating concrete logger for pytest.
the logger will create a file for the test in specific test directory in quali FS and will
write to this file all test log output (colored).
:param: testname: test name - recieved from pytest fixtures (command line parameters)
:param: setup: test setup - recieved from pytest fixtures (command line parameters)
:param: silent: log test in silent mode (info only) or not silent mode (everything is logged)
:param: root_password: password for root user
"""
....
...
and in the conftest.py file I wrote the function that will be invoked when this logger will be requested (creating a logger fixture)
#pytest.fixture(scope="module",autouse=True)
def logger(request):
setup = request.config.getoption('--setupname')
logger = Logger(testname=request.node.name, setup=setup)
return logger
Now, my question is how do I make this concrete logger global using pytest?
Meaning I don't want to pass it as argument to the test function like this:
def test_logger(other_fixture,logger):
but still be able to use it inside the test_logger test function (like global variable)
You could do
#pytest.mark.usefixtures("logger")
def test_logger(other_fixture):

How do I correctly setup and teardown for my pytest class with tests?

I am using selenium for end to end testing and I can't get how to use setup_class and teardown_class methods.
I need to set up browser in setup_class method, then perform a bunch of tests defined as class methods and finally quit browser in teardown_class method.
But logically it seems like a bad solution, because in fact my tests will not work with class, but with object. I pass self param inside every test method, so I can access objects' vars:
class TestClass:
def setup_class(cls):
pass
def test_buttons(self, data):
# self.$attribute can be used, but not cls.$attribute?
pass
def test_buttons2(self, data):
# self.$attribute can be used, but not cls.$attribute?
pass
def teardown_class(cls):
pass
And it even seems not to be correct to create browser instance for class.. It should be created for every object separately, right?
So, I need to use __init__ and __del__ methods instead of setup_class and teardown_class?
According to Fixture finalization / executing teardown code, the current best practice for setup and teardown is to use yield instead of return:
import pytest
#pytest.fixture()
def resource():
print("setup")
yield "resource"
print("teardown")
class TestResource:
def test_that_depends_on_resource(self, resource):
print("testing {}".format(resource))
Running it results in
$ py.test --capture=no pytest_yield.py
=== test session starts ===
platform darwin -- Python 2.7.10, pytest-3.0.2, py-1.4.31, pluggy-0.3.1
collected 1 items
pytest_yield.py setup
testing resource
.teardown
=== 1 passed in 0.01 seconds ===
Another way to write teardown code is by accepting a request-context object into your fixture function and calling its request.addfinalizer method with a function that performs the teardown one or multiple times:
import pytest
#pytest.fixture()
def resource(request):
print("setup")
def teardown():
print("teardown")
request.addfinalizer(teardown)
return "resource"
class TestResource:
def test_that_depends_on_resource(self, resource):
print("testing {}".format(resource))
When you write "tests defined as class methods", do you really mean class methods (methods which receive its class as first parameter) or just regular methods (methods which receive an instance as first parameter)?
Since your example uses self for the test methods I'm assuming the latter, so you just need to use setup_method instead:
class Test:
def setup_method(self, test_method):
# configure self.attribute
def teardown_method(self, test_method):
# tear down self.attribute
def test_buttons(self):
# use self.attribute for test
The test method instance is passed to setup_method and teardown_method, but can be ignored if your setup/teardown code doesn't need to know the testing context. More information can be found here.
I also recommend that you familiarize yourself with py.test's fixtures, as they are a more powerful concept.
This might help http://docs.pytest.org/en/latest/xunit_setup.html
In my test suite, I group my test cases into classes. For the setup and teardown I need for all the test cases in that class, I use the setup_class(cls) and teardown_class(cls) classmethods.
And for the setup and teardown I need for each of the test case, I use the setup_method(method) and teardown_method(methods)
Example:
lh = <got log handler from logger module>
class TestClass:
#classmethod
def setup_class(cls):
lh.info("starting class: {} execution".format(cls.__name__))
#classmethod
def teardown_class(cls):
lh.info("starting class: {} execution".format(cls.__name__))
def setup_method(self, method):
lh.info("starting execution of tc: {}".format(method.__name__))
def teardown_method(self, method):
lh.info("starting execution of tc: {}".format(method.__name__))
def test_tc1(self):
<tc_content>
assert
def test_tc2(self):
<tc_content>
assert
Now when I run my tests, when the TestClass execution is starting, it logs the details for when it is beginning execution, when it is ending execution and same for the methods..
You can add up other setup and teardown steps you might have in the respective locations.
Hope it helps!
As #Bruno suggested, using pytest fixtures is another solution that is accessible for both test classes or even just simple test functions. Here's an example testing python2.7 functions:
import pytest
#pytest.fixture(scope='function')
def some_resource(request):
stuff_i_setup = ["I setup"]
def some_teardown():
stuff_i_setup[0] += " ... but now I'm torn down..."
print stuff_i_setup[0]
request.addfinalizer(some_teardown)
return stuff_i_setup[0]
def test_1_that_needs_resource(some_resource):
print some_resource + "... and now I'm testing things..."
So, running test_1... produces:
I setup... and now I'm testing things...
I setup ... but now I'm torn down...
Notice that stuff_i_setup is referenced in the fixture, allowing that object to be setup and torn down for the test it's interacting with. You can imagine this could be useful for a persistent object, such as a hypothetical database or some connection, that must be cleared before each test runs to keep them isolated.
Your code should work just as you expect it to if you add #classmethod decorators.
#classmethod
def setup_class(cls):
"Runs once per class"
#classmethod
def teardown_class(cls):
"Runs at end of class"
See http://pythontesting.net/framework/pytest/pytest-xunit-style-fixtures/
import pytest
class Test:
#pytest.fixture()
def setUp(self):
print("setup")
yield "resource"
print("teardown")
def test_that_depends_on_resource(self, setUp):
print("testing {}".format(setUp))
In order to run:
pytest nam_of_the_module.py -v
I'm not sure I got the specifics of using Selenium in your original questions, but in case you were simply asking about how to use a more classical setUp/tearDown style, Pytest supports most unittest features, so you could do something like:
import unittest
class TestHello(unittest.TestCase):
def setUp(self):
print('running setUp')
def test_one(self):
print('running test_one')
def test_two(self):
print('running test_two')
def tearDown(self):
print('running tearDown')
Which produces:
$ pytest -s -v
====================== test session starts =======================
platform linux -- Python 3.8.2, pytest-6.2.4, py-1.10.0, pluggy-0.13.1 -- /gnu/store/nckjv3ccwdi6096j478gvns43ssbls2p-python-wrapper-3.8.2/bin/python
cachedir: .pytest_cache
hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/tmp/test/.hypothesis/examples')
rootdir: /tmp/test
plugins: hypothesis-5.4.1
collected 2 items
test_hw.py::TestHello::test_one running setUp
running test_one
running tearDown
PASSED
test_hw.py::TestHello::test_two running setUp
running test_two
running tearDown
PASSED

Get all logging output with mock

I want to get all logging output with mock. I searched, but
only found ways to mock explicitly logging.info or logging.warn.
I need all output, whatever logging level was set.
def test_foo():
def my_log(...):
logs.append(...)
with mock.patch('logging.???', my_log):
...
In our libraries we use this:
import logging
logger=logging.getLogger(__name__)
def foo():
logger.info(...)
pytest
If you are writing your tests using pytest, take a look at a neat fixture named caplog that will capture log records for you. It captures all the emitted log records which you can then access via caplog.records list. Each element is an instance of logging.LogRecord, so you can easily access any of the LogRecords attributes. Example:
# spam.py
import logging
logger=logging.getLogger(__name__)
def foo():
logger.info('bar')
# tests.py
import logging
from spam import foo
def test_foo(caplog):
foo()
assert len(caplog.records) == 1
record = next(iter(caplog.records))
assert record.message == 'bar'
assert record.levelno == logging.INFO
assert record.module == 'spam'
# etc
Install
The fixture was first introduced in a pytest plugin named pytest-capturelog which is now abandoned. Luckily, it got a decent fork named pytest-catchlog, which has been merged into pytest==3.3.0 recently. So, if you use a recent version of pytest, you are already good to go; for older versions of pytest, install pytest-catchlog from PyPI.
Docs
At the moment, pytest doesn't provide any docs for the caplog fixture (or at least I couldn't find any), so you can refer to pytest-catchlog's documentation.
Plain unittest
If pytest is not an option, I wouldn't patch logging at all - you can simply add a custom handler instead that will record all the incoming logs. A small example:
# utils.py
import logging
class RecordsCollector(logging.Handler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.records = []
def emit(self, record):
self.records.append(record)
# tests.py
import logging
import unittest
from utils import RecordsCollector
from spam import foo
class SpamTests(unittest.TestCase):
def setUp(self):
self.collector = RecordsCollector()
logging.getLogger('spam').addHandler(self.collector)
def tearDown(self):
logging.getLogger('spam').removeHandler(self.collector)
def test_foo(self):
foo()
# same checks as in the example above
self.assertEqual(len(self.collector.records), 1)
record = next(iter(self.collector.records))
self.assertEqual(record.message, 'bar')
self.assertEqual(record.levelno, logging.INFO)
self.assertEqual(record.module, 'spam')
if __name__ == '__main__':
unittest.main()
You can then extend the custom handler and implement any logic you need, like collecting the records in a dict that maps log levels to lists of records, or add a contextmanager implementation, so you can start and stop capturing records inside the test:
from contextlib import contextmanager
#contextmanager
def record_logs():
collector = RecordsCollector()
logging.getLogger('spam').addHandler(collector)
yield collector
logging.getLogger('spam').removeHandler(collector)
def test_foo(self):
with utils.record_logs() as collector:
foo()
self.assertEqual(len(collector.records), 1)
stdlib
Since Python 3.4 the batteries' unittest has assertLogs. When used without logger and level arguments, it catches all logging (suppresses existing handlers). You can later access recorded entries from the context manager's records attribute. Text output strings are stored in output list.
import logging
import unittest
class TestLogging(unittest.TestCase):
def test(self):
with self.assertLogs() as ctx:
logging.getLogger('foo').info('message from foo')
logging.getLogger('bar').info('message from bar')
print(ctx.records)
Tornado
For Python 2 I usually take Tornado's ExpectLog. It's self-contained and works for normal Python code. It's actually more elegant solution then stdlib's, because instead of several class, ExpectLog is just a normal logging.Filter (a class, source). But it lacks a couple of features, including access to recorded entries, so usually I also extend it a bit, like:
class ExpectLog(logging.Filter):
def __init__(self, logger, regex, required=True, level=None):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
self.logger = logger
self.orig_level = self.logger.level
self.level = level
self.regex = re.compile(regex)
self.formatter = logging.Formatter()
self.required = required
self.matched = []
self.logged_stack = False
def filter(self, record):
if record.exc_info:
self.logged_stack = True
message = self.formatter.format(record)
if self.regex.search(message):
self.matched.append(record)
return False
return True
def __enter__(self):
self.logger.addFilter(self)
if self.level:
self.logger.setLevel(self.level)
return self
def __exit__(self, typ, value, tb):
self.logger.removeFilter(self)
if self.level:
self.logger.setLevel(self.orig_level)
if not typ and self.required and not self.matched:
raise Exception("did not get expected log message")
Then you can have something like:
class TestLogging(unittest.TestCase):
def testTornadoself):
logging.basicConfig(level = logging.INFO)
with ExpectLog('foo', '.*', required = False) as ctxFoo:
with ExpectLog('bar', '.*', required = False) as ctxBar:
logging.getLogger('foo').info('message from foo')
logging.getLogger('bar').info('message from bar')
print(ctxFoo.matched)
print(ctxBar.matched)
However, note that for the filter approach current logging level is important (can be overridden with level argument), and also you need a filter per logger of interest. You can follow the approach and make something that fits your case better.
Update
Alternatively there's unittest2 backport for Python 2 which has assertLogs.
The module testfixtures has a class to handle this:
>>> import logging
>>> from testfixtures import LogCapture
>>> with LogCapture() as l:
... logger = logging.getLogger()
... logger.info('a message')
... logger.error('an error')
>>> l.check(
... ('root', 'INFO', 'a message'),
... ('root', 'ERROR', 'another error'),
... )
Traceback (most recent call last):
...
AssertionError: sequence not as expected:
same:
(('root', 'INFO', 'a message'),)
expected:
(('root', 'ERROR', 'another error'),)
actual:
(('root', 'ERROR', 'an error'),)
Source: http://testfixtures.readthedocs.io/en/latest/logging.html
I found this solution:
def test_foo(self):
logs=[]
def my_log(self, *args, **kwargs):
logs.append((args, kwargs))
with mock.patch('logging.Logger._log', my_log):
...

Python unittest - how can I add debugging information to a TestResult object?

I have some unittest code that runs a bunch of tests. Previously, when each test was run some information would get printed to stdout. This information would greatly assist in debugging in case the test failed. Now I would like to to write a more sophisticated program that invokes unittest and captures the results of the testing programmatically. It appears that unittest provides an object called TestResult which is meant to contain the output of tests. It has a list of all the errors, a list of all the failures, etc. I would also like to add my debugging output to this object so that I can access it programmatically later. Is this possible?
EDIT: Here is an example:
import unittest2
class DemoTest(unittest2.TestCase):
def test_one(self):
print "'grimelsome' attribute of 'smoxy' was set to 'blimpy'"
self.assertTrue(True)
def test_two(self):
print "'smithereen' attribute of 'brouhaha' was set to 'False'"
self.assertTrue(True)
if __name__ == '__main__':
suite = unittest2.TestLoader().loadTestsFromTestCase(DemoTest)
result = unittest2.TextTestRunner(verbosity=2).run(suite)
# this is what I'd like to be able to do:
for fail in result.failures:
print what_would_have_gone_to_stdout
You just need to use TextTestRunner buffer option:
import unittest2
class DemoTest(unittest2.TestCase):
def test_one(self):
print "'grimelsome' attribute of 'smoxy' was set to 'blimpy'"
self.assertTrue(True)
def test_two(self):
print "'smithereen' attribute of 'brouhaha' was set to 'False'"
self.assertTrue(False)
if __name__ == '__main__':
suite = unittest2.TestLoader().loadTestsFromTestCase(DemoTest)
result = unittest2.TextTestRunner(verbosity=2, buffer=True).run(suite)
Before each test, TextTestResult, used by the runner, will replace sys.stderr and sys.stdout with its own streams and only send the content to the original streams if the test fails or discards it otherwise.
Note that because the fake sys.std* streams changes after each test, if you wanted to to do the same thing with logging output, you would have to add the logging handler after the sys.sdt* stream have been replaced, before each test or implement your own handler.
Here's an example subclassing logging.StreamHandler:
import logging
import sys
import unittest2
class DemoTest(unittest2.TestCase):
logger = logging.getLogger('DemoTest')
def setUp(self):
self.logger.debug("setting up stuff and logging it...")
def teardown(self):
self.logger.debug("You won't see me")
print "me neither"
def test_one(self):
self.logger.debug("'grimelsome' attribute of 'smoxy' was set to 'blimpy'")
self.assertTrue(True)
def test_two(self):
self.logger.debug("'smithereen' attribute of 'brouhaha' was set to 'False'")
self.assertTrue(False)
class TestHandler(logging.StreamHandler):
def __init__(self):
logging.Handler.__init__(self)
#property
def stream(self):
"""Use which ever stream sys.stderr is referencing."""
return sys.stderr
if __name__ == '__main__':
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
logger_handler = TestHandler()
logger_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logger_handler)
suite = unittest2.TestLoader().loadTestsFromTestCase(DemoTest)
result = unittest2.TextTestRunner(
verbosity=2, buffer=True, resultclass=TestResult
).run(suite)

How should I verify a log message when testing Python code under nose?

I'm trying to write a simple unit test that will verify that, under a certain condition, a class in my application will log an error via the standard logging API. I can't work out what the cleanest way to test this situation is.
I know that nose already captures logging output through it's logging plugin, but this seems to be intended as a reporting and debugging aid for failed tests.
The two ways to do this I can see are:
Mock out the logging module, either in a piecemeal way (mymodule.logging = mockloggingmodule) or with a proper mocking library.
Write or use an existing nose plugin to capture the output and verify it.
If I go for the former approach, I'd like to know what the cleanest way to reset the global state to what it was before I mocked out the logging module.
Looking forward to your hints and tips on this one...
From python 3.4 on, the standard unittest library offers a new test assertion context manager, assertLogs. From the docs:
with self.assertLogs('foo', level='INFO') as cm:
logging.getLogger('foo').info('first message')
logging.getLogger('foo.bar').error('second message')
self.assertEqual(cm.output, ['INFO:foo:first message',
'ERROR:foo.bar:second message'])
UPDATE: No longer any need for the answer below. Use the built-in Python way instead!
This answer extends the work done in https://stackoverflow.com/a/1049375/1286628. The handler is largely the same (the constructor is more idiomatic, using super). Further, I add a demonstration of how to use the handler with the standard library's unittest.
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs.
Messages are available from an instance's ``messages`` dict, in order, indexed by
a lowercase log level string (e.g., 'debug', 'info', etc.).
"""
def __init__(self, *args, **kwargs):
self.messages = {'debug': [], 'info': [], 'warning': [], 'error': [],
'critical': []}
super(MockLoggingHandler, self).__init__(*args, **kwargs)
def emit(self, record):
"Store a message from ``record`` in the instance's ``messages`` dict."
try:
self.messages[record.levelname.lower()].append(record.getMessage())
except Exception:
self.handleError(record)
def reset(self):
self.acquire()
try:
for message_list in self.messages.values():
message_list.clear()
finally:
self.release()
Then you can use the handler in a standard-library unittest.TestCase like so:
import unittest
import logging
import foo
class TestFoo(unittest.TestCase):
#classmethod
def setUpClass(cls):
super(TestFoo, cls).setUpClass()
# Assuming you follow Python's logging module's documentation's
# recommendation about naming your module's logs after the module's
# __name__,the following getLogger call should fetch the same logger
# you use in the foo module
foo_log = logging.getLogger(foo.__name__)
cls._foo_log_handler = MockLoggingHandler(level='DEBUG')
foo_log.addHandler(cls._foo_log_handler)
cls.foo_log_messages = cls._foo_log_handler.messages
def setUp(self):
super(TestFoo, self).setUp()
self._foo_log_handler.reset() # So each test is independent
def test_foo_objects_fromble_nicely(self):
# Do a bunch of frombling with foo objects
# Now check that they've logged 5 frombling messages at the INFO level
self.assertEqual(len(self.foo_log_messages['info']), 5)
for info_message in self.foo_log_messages['info']:
self.assertIn('fromble', info_message)
I used to mock loggers, but in this situation I found best to use logging handlers, so I wrote this one based on the document suggested by jkp(now dead, but cached on Internet Archive)
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
Simplest answer of all
Pytest has a built-in fixture called caplog. No setup needed.
def test_foo(foo, caplog, expected_msgs):
foo.bar()
assert [r.msg for r in caplog.records] == expected_msgs
I wish I'd known about caplog before I wasted 6 hours.
Warning, though - it resets, so you need to perform your SUT action in the same test where you make assertions about caplog.
Personally, I want my console output clean, so I like this to silence the log-to-stderr:
from logging import getLogger
from pytest import fixture
#fixture
def logger(caplog):
logger = getLogger()
_ = [logger.removeHandler(h) for h in logger.handlers if h != caplog.handler] # type: ignore
return logger
#fixture
def foo(logger):
return Foo(logger=logger)
#fixture
def expected_msgs():
# return whatever it is you expect from the SUT
def test_foo(foo, caplog, expected_msgs):
foo.bar()
assert [r.msg for r in caplog.records] == expected_msgs
There is a lot to like about pytest fixtures if you're sick of horrible unittest code.
Brandon's answer:
pip install testfixtures
snippet:
import logging
from testfixtures import LogCapture
logger = logging.getLogger('')
with LogCapture() as logs:
# my awesome code
logger.error('My code logged an error')
assert 'My code logged an error' in str(logs)
Note: the above does not conflict with calling nosetests and getting the output of logCapture plugin of the tool
As a follow up to Reef's answer, I took a liberty of coding up an example using pymox.
It introduces some extra helper functions that make it easier to stub functions and methods.
import logging
# Code under test:
class Server(object):
def __init__(self):
self._payload_count = 0
def do_costly_work(self, payload):
# resource intensive logic elided...
pass
def process(self, payload):
self.do_costly_work(payload)
self._payload_count += 1
logging.info("processed payload: %s", payload)
logging.debug("payloads served: %d", self._payload_count)
# Here are some helper functions
# that are useful if you do a lot
# of pymox-y work.
import mox
import inspect
import contextlib
import unittest
def stub_all(self, *targets):
for target in targets:
if inspect.isfunction(target):
module = inspect.getmodule(target)
self.StubOutWithMock(module, target.__name__)
elif inspect.ismethod(target):
self.StubOutWithMock(target.im_self or target.im_class, target.__name__)
else:
raise NotImplementedError("I don't know how to stub %s" % repr(target))
# Monkey-patch Mox class with our helper 'StubAll' method.
# Yucky pymox naming convention observed.
setattr(mox.Mox, 'StubAll', stub_all)
#contextlib.contextmanager
def mocking():
mocks = mox.Mox()
try:
yield mocks
finally:
mocks.UnsetStubs() # Important!
mocks.VerifyAll()
# The test case example:
class ServerTests(unittest.TestCase):
def test_logging(self):
s = Server()
with mocking() as m:
m.StubAll(s.do_costly_work, logging.info, logging.debug)
# expectations
s.do_costly_work(mox.IgnoreArg()) # don't care, we test logging here.
logging.info("processed payload: %s", 'hello')
logging.debug("payloads served: %d", 1)
# verified execution
m.ReplayAll()
s.process('hello')
if __name__ == '__main__':
unittest.main()
If you define a helper method like this:
import logging
def capture_logging():
records = []
class CaptureHandler(logging.Handler):
def emit(self, record):
records.append(record)
def __enter__(self):
logging.getLogger().addHandler(self)
return records
def __exit__(self, exc_type, exc_val, exc_tb):
logging.getLogger().removeHandler(self)
return CaptureHandler()
Then you can write test code like this:
with capture_logging() as log:
... # trigger some logger warnings
assert len(log) == ...
assert log[0].getMessage() == ...
You should use mocking, as someday You might want to change Your logger to a, say, database one. You won't be happy if it'll try to connect to the database during nosetests.
Mocking will continue to work even if standard output will be suppressed.
I have used pyMox's stubs. Remember to unset the stubs after the test.
The ExpectLog class implemented in tornado is a great utility:
with ExpectLog('channel', 'message regex'):
do_it()
http://tornado.readthedocs.org/en/latest/_modules/tornado/testing.html#ExpectLog
Keying off #Reef's answer, I did tried the code below. It works well for me both for Python 2.7 (if you install mock) and for Python 3.4.
"""
Demo using a mock to test logging output.
"""
import logging
try:
import unittest
except ImportError:
import unittest2 as unittest
try:
# Python >= 3.3
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
logging.basicConfig()
LOG=logging.getLogger("(logger under test)")
class TestLoggingOutput(unittest.TestCase):
""" Demo using Mock to test logging INPUT. That is, it tests what
parameters were used to invoke the logging method, while still
allowing actual logger to execute normally.
"""
def test_logger_log(self):
"""Check for Logger.log call."""
original_logger = LOG
patched_log = patch('__main__.LOG.log',
side_effect=original_logger.log).start()
log_msg = 'My log msg.'
level = logging.ERROR
LOG.log(level, log_msg)
# call_args is a tuple of positional and kwargs of the last call
# to the mocked function.
# Also consider using call_args_list
# See: https://docs.python.org/3/library/unittest.mock.html#unittest.mock.Mock.call_args
expected = (level, log_msg)
self.assertEqual(expected, patched_log.call_args[0])
if __name__ == '__main__':
unittest.main()

Categories

Resources