I'm trying to figure out how to decorate a test function in a way that makes the information from the decorator available to setUp. The code looks something like this:
import unittest
class MyTest(unittest.TestCase):
def setUp(self):
stopService()
eraseAllPreferences()
setTestPreferences()
startService()
#setPreference("abc", 5)
def testPreference1(self):
pass
#setPreference("xyz", 5)
def testPreference2(self):
pass
The goal is for setUp to understand it's running testPreference1 and to know that it needs to set preference "abc" to 5 before starting the service (& similarly regarding "xyz" and testPreference2).
I can of course just use a conditional on the the test name (if self._testMethodName == "testPreference1") but that doesn't feel quite as maintainable as the number of tests grows (+ refactoring is more error-prone). I'm hoping to solve this in setUp rather than overriding the run implementation. I'm also having
I'm running python3.6 although if there are creative solutions depending on newer python features happy to learn about that too.
Decorators work well but there's no real "official" way to get the underlying method so I just did what the unittest source does: method = getattr(self, self._testMethodName)
import functools
import unittest
def setFoo(value):
def inner(func):
print(f"Changing foo for function {func}")
func.foo = value
#functools.wraps(func)
def wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return wrapper
return inner
class Foo(unittest.TestCase):
def setUp(self):
method = getattr(self, self._testMethodName)
print(f"Foo = {method.foo}")
#setFoo("abc")
def testFoo(self):
self.assertEqual(self.testFoo.foo, "abc")
#setFoo("xyz")
def testBar(self):
self.assertEqual(self.testBar.foo, "xyz")
if __name__ == "__main__":
unittest.main()
I'm trying to setup the target under test in #pytest.fixture and use it in all my tests in the module. I'm able to patch the test correctly, but after I add the #pytest.fixture to return the mock object and invoke the mocked object in other unit tests the object starting to refer back to the original function.
Following is the code I have. I was expecting the mocked_worker in the unit test to refer to the return value, but it is invoking the actual os.getcwd method instead.
Please help me correct the code:
import os
import pytest
from unittest.mock import patch
class Worker:
def work_on(self):
path = os.getcwd()
print(f'Working on {path}')
return path
#pytest.fixture()
def mocked_worker():
with patch('test.test_module.os.getcwd', return_value="Testing"):
result = Worker()
return result
def test_work_on(mocked_worker):
ans = mocked_worker.work_on()
assert ans == "Testing"
The problem is that when the worker returns the scope of "with" statement ends making the object take its real value, the solution is to use "yield".
#pytest.fixture()
def mocked_worker():
with patch('test.test_module.os.getcwd', return_value="Testing"):
result = Worker()
yield result
I would recommend to use pytest-mock. So full example of one file (test_file.py) solution using this library would be:
import os
import pytest
from unittest.mock import patch
class Worker:
def work_on(self):
path = os.getcwd()
print(f'Working on {path}')
return path
#pytest.fixture()
def mocked_worker(mocker): # mocker is pytest-mock fixture
mocker.patch('test_file.os.getcwd', return_value="Testing")
def test_work_on(mocked_worker):
worker = Worker() # here we create instance of Worker, not mock itself!!
ans = worker.work_on()
assert ans == "Testing"
used libraries for reference:
pytest==5.3.0
pytest-mock==1.12.1
Using python 2.7, celery 3.0.24 and mock 1.0.1. I have this:
class FancyTask(celery.Task):
#classmethod
def helper_method1(cls, name):
"""do some remote request depending on name"""
return 'foo' + name + 'bar'
def __call__(self, *args, **kwargs):
funcname = self.name.split()[-1]
bigname = self.helper_method1(funcname)
return bigname
#celery.task(base=FancyTask)
def task1(*args, **kwargs):
pass
#celery.task(base=FancyTask)
def task2(*args, **kwargs):
pass
how can I patch helper_method1 while testing either task?
I've tried something like:
import mock
from mymodule import tasks
class TestTasks(unittest.TestCase):
def test_task1(self):
task = tasks.task1
task.helper_method1 = mock.MagickMock(return_value='42')
res = task.delay('blah')
task.helper_method1.assert_called_with('blah')
and the test is failing. The original function is the one being called. And no, this question didn't help me.
(I don't have a celery instance up and running so it's difficult for me to test this)
The target function in your application code is a classmethod. The function your test code is mocking is an instance method.
Does changing the test_task1 like this help -
def test_task1(self):
FancyTask.helper_method1 = mock.MagickMock(return_value='42')
task = tasks.task1
res = task.delay('blah')
task.helper_method1.assert_called_with('blah')
You probably also need to change the assert_called_with so it is called from the class level instead of the instance level.
change
task.helper_method1.assert_called_with('blah')
to
FancyTask.helper_method1.assert_called_with('blah')
I have started using nose to run tests. I discovered the multiprocessing plugin has a timeout, that I can change on the command line.
Is there a way to extend the timeout for individual tests (in the test code) so I don't have a massive global timeout?
I haven't any experience with the multiprocessing plugin but if you subclass the plugin with something like this:
from nose.plugins.multiprocess import MultiProcess
PLUGIN = None
class TimeoutMultiProcess(MultiProcess):
def configure(self, options, conf):
global PLUGIN
PLUGIN = self
super(TimeoutMultiProcess, self).configure(options, conf)
if not self.enabled:
return
then you can create your own test running script like:
import unittest
class TestA(unittest.TestCase):
def setUp(self):
from runtest import PLUGIN
print PLUGIN.config.multiprocess_timeout
def test_a(self):
pass
def test_b(self):
pass
if __name__ == '__main__':
from runtest import TimeoutMultiProcess
import nose
nose.main(addplugins=[TimeoutMultiProcess()], defaultTest="./test.py")
You'll be able to change the config.multiprocess_timeout to different values within your tests. I'm not sure if it will work for you, but it's worth a shot.
I'm trying to write a simple unit test that will verify that, under a certain condition, a class in my application will log an error via the standard logging API. I can't work out what the cleanest way to test this situation is.
I know that nose already captures logging output through it's logging plugin, but this seems to be intended as a reporting and debugging aid for failed tests.
The two ways to do this I can see are:
Mock out the logging module, either in a piecemeal way (mymodule.logging = mockloggingmodule) or with a proper mocking library.
Write or use an existing nose plugin to capture the output and verify it.
If I go for the former approach, I'd like to know what the cleanest way to reset the global state to what it was before I mocked out the logging module.
Looking forward to your hints and tips on this one...
From python 3.4 on, the standard unittest library offers a new test assertion context manager, assertLogs. From the docs:
with self.assertLogs('foo', level='INFO') as cm:
logging.getLogger('foo').info('first message')
logging.getLogger('foo.bar').error('second message')
self.assertEqual(cm.output, ['INFO:foo:first message',
'ERROR:foo.bar:second message'])
UPDATE: No longer any need for the answer below. Use the built-in Python way instead!
This answer extends the work done in https://stackoverflow.com/a/1049375/1286628. The handler is largely the same (the constructor is more idiomatic, using super). Further, I add a demonstration of how to use the handler with the standard library's unittest.
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs.
Messages are available from an instance's ``messages`` dict, in order, indexed by
a lowercase log level string (e.g., 'debug', 'info', etc.).
"""
def __init__(self, *args, **kwargs):
self.messages = {'debug': [], 'info': [], 'warning': [], 'error': [],
'critical': []}
super(MockLoggingHandler, self).__init__(*args, **kwargs)
def emit(self, record):
"Store a message from ``record`` in the instance's ``messages`` dict."
try:
self.messages[record.levelname.lower()].append(record.getMessage())
except Exception:
self.handleError(record)
def reset(self):
self.acquire()
try:
for message_list in self.messages.values():
message_list.clear()
finally:
self.release()
Then you can use the handler in a standard-library unittest.TestCase like so:
import unittest
import logging
import foo
class TestFoo(unittest.TestCase):
#classmethod
def setUpClass(cls):
super(TestFoo, cls).setUpClass()
# Assuming you follow Python's logging module's documentation's
# recommendation about naming your module's logs after the module's
# __name__,the following getLogger call should fetch the same logger
# you use in the foo module
foo_log = logging.getLogger(foo.__name__)
cls._foo_log_handler = MockLoggingHandler(level='DEBUG')
foo_log.addHandler(cls._foo_log_handler)
cls.foo_log_messages = cls._foo_log_handler.messages
def setUp(self):
super(TestFoo, self).setUp()
self._foo_log_handler.reset() # So each test is independent
def test_foo_objects_fromble_nicely(self):
# Do a bunch of frombling with foo objects
# Now check that they've logged 5 frombling messages at the INFO level
self.assertEqual(len(self.foo_log_messages['info']), 5)
for info_message in self.foo_log_messages['info']:
self.assertIn('fromble', info_message)
I used to mock loggers, but in this situation I found best to use logging handlers, so I wrote this one based on the document suggested by jkp(now dead, but cached on Internet Archive)
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
Simplest answer of all
Pytest has a built-in fixture called caplog. No setup needed.
def test_foo(foo, caplog, expected_msgs):
foo.bar()
assert [r.msg for r in caplog.records] == expected_msgs
I wish I'd known about caplog before I wasted 6 hours.
Warning, though - it resets, so you need to perform your SUT action in the same test where you make assertions about caplog.
Personally, I want my console output clean, so I like this to silence the log-to-stderr:
from logging import getLogger
from pytest import fixture
#fixture
def logger(caplog):
logger = getLogger()
_ = [logger.removeHandler(h) for h in logger.handlers if h != caplog.handler] # type: ignore
return logger
#fixture
def foo(logger):
return Foo(logger=logger)
#fixture
def expected_msgs():
# return whatever it is you expect from the SUT
def test_foo(foo, caplog, expected_msgs):
foo.bar()
assert [r.msg for r in caplog.records] == expected_msgs
There is a lot to like about pytest fixtures if you're sick of horrible unittest code.
Brandon's answer:
pip install testfixtures
snippet:
import logging
from testfixtures import LogCapture
logger = logging.getLogger('')
with LogCapture() as logs:
# my awesome code
logger.error('My code logged an error')
assert 'My code logged an error' in str(logs)
Note: the above does not conflict with calling nosetests and getting the output of logCapture plugin of the tool
As a follow up to Reef's answer, I took a liberty of coding up an example using pymox.
It introduces some extra helper functions that make it easier to stub functions and methods.
import logging
# Code under test:
class Server(object):
def __init__(self):
self._payload_count = 0
def do_costly_work(self, payload):
# resource intensive logic elided...
pass
def process(self, payload):
self.do_costly_work(payload)
self._payload_count += 1
logging.info("processed payload: %s", payload)
logging.debug("payloads served: %d", self._payload_count)
# Here are some helper functions
# that are useful if you do a lot
# of pymox-y work.
import mox
import inspect
import contextlib
import unittest
def stub_all(self, *targets):
for target in targets:
if inspect.isfunction(target):
module = inspect.getmodule(target)
self.StubOutWithMock(module, target.__name__)
elif inspect.ismethod(target):
self.StubOutWithMock(target.im_self or target.im_class, target.__name__)
else:
raise NotImplementedError("I don't know how to stub %s" % repr(target))
# Monkey-patch Mox class with our helper 'StubAll' method.
# Yucky pymox naming convention observed.
setattr(mox.Mox, 'StubAll', stub_all)
#contextlib.contextmanager
def mocking():
mocks = mox.Mox()
try:
yield mocks
finally:
mocks.UnsetStubs() # Important!
mocks.VerifyAll()
# The test case example:
class ServerTests(unittest.TestCase):
def test_logging(self):
s = Server()
with mocking() as m:
m.StubAll(s.do_costly_work, logging.info, logging.debug)
# expectations
s.do_costly_work(mox.IgnoreArg()) # don't care, we test logging here.
logging.info("processed payload: %s", 'hello')
logging.debug("payloads served: %d", 1)
# verified execution
m.ReplayAll()
s.process('hello')
if __name__ == '__main__':
unittest.main()
If you define a helper method like this:
import logging
def capture_logging():
records = []
class CaptureHandler(logging.Handler):
def emit(self, record):
records.append(record)
def __enter__(self):
logging.getLogger().addHandler(self)
return records
def __exit__(self, exc_type, exc_val, exc_tb):
logging.getLogger().removeHandler(self)
return CaptureHandler()
Then you can write test code like this:
with capture_logging() as log:
... # trigger some logger warnings
assert len(log) == ...
assert log[0].getMessage() == ...
You should use mocking, as someday You might want to change Your logger to a, say, database one. You won't be happy if it'll try to connect to the database during nosetests.
Mocking will continue to work even if standard output will be suppressed.
I have used pyMox's stubs. Remember to unset the stubs after the test.
The ExpectLog class implemented in tornado is a great utility:
with ExpectLog('channel', 'message regex'):
do_it()
http://tornado.readthedocs.org/en/latest/_modules/tornado/testing.html#ExpectLog
Keying off #Reef's answer, I did tried the code below. It works well for me both for Python 2.7 (if you install mock) and for Python 3.4.
"""
Demo using a mock to test logging output.
"""
import logging
try:
import unittest
except ImportError:
import unittest2 as unittest
try:
# Python >= 3.3
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
logging.basicConfig()
LOG=logging.getLogger("(logger under test)")
class TestLoggingOutput(unittest.TestCase):
""" Demo using Mock to test logging INPUT. That is, it tests what
parameters were used to invoke the logging method, while still
allowing actual logger to execute normally.
"""
def test_logger_log(self):
"""Check for Logger.log call."""
original_logger = LOG
patched_log = patch('__main__.LOG.log',
side_effect=original_logger.log).start()
log_msg = 'My log msg.'
level = logging.ERROR
LOG.log(level, log_msg)
# call_args is a tuple of positional and kwargs of the last call
# to the mocked function.
# Also consider using call_args_list
# See: https://docs.python.org/3/library/unittest.mock.html#unittest.mock.Mock.call_args
expected = (level, log_msg)
self.assertEqual(expected, patched_log.call_args[0])
if __name__ == '__main__':
unittest.main()