Using python 2.7, celery 3.0.24 and mock 1.0.1. I have this:
class FancyTask(celery.Task):
#classmethod
def helper_method1(cls, name):
"""do some remote request depending on name"""
return 'foo' + name + 'bar'
def __call__(self, *args, **kwargs):
funcname = self.name.split()[-1]
bigname = self.helper_method1(funcname)
return bigname
#celery.task(base=FancyTask)
def task1(*args, **kwargs):
pass
#celery.task(base=FancyTask)
def task2(*args, **kwargs):
pass
how can I patch helper_method1 while testing either task?
I've tried something like:
import mock
from mymodule import tasks
class TestTasks(unittest.TestCase):
def test_task1(self):
task = tasks.task1
task.helper_method1 = mock.MagickMock(return_value='42')
res = task.delay('blah')
task.helper_method1.assert_called_with('blah')
and the test is failing. The original function is the one being called. And no, this question didn't help me.
(I don't have a celery instance up and running so it's difficult for me to test this)
The target function in your application code is a classmethod. The function your test code is mocking is an instance method.
Does changing the test_task1 like this help -
def test_task1(self):
FancyTask.helper_method1 = mock.MagickMock(return_value='42')
task = tasks.task1
res = task.delay('blah')
task.helper_method1.assert_called_with('blah')
You probably also need to change the assert_called_with so it is called from the class level instead of the instance level.
change
task.helper_method1.assert_called_with('blah')
to
FancyTask.helper_method1.assert_called_with('blah')
Related
I'm trying to figure out how to decorate a test function in a way that makes the information from the decorator available to setUp. The code looks something like this:
import unittest
class MyTest(unittest.TestCase):
def setUp(self):
stopService()
eraseAllPreferences()
setTestPreferences()
startService()
#setPreference("abc", 5)
def testPreference1(self):
pass
#setPreference("xyz", 5)
def testPreference2(self):
pass
The goal is for setUp to understand it's running testPreference1 and to know that it needs to set preference "abc" to 5 before starting the service (& similarly regarding "xyz" and testPreference2).
I can of course just use a conditional on the the test name (if self._testMethodName == "testPreference1") but that doesn't feel quite as maintainable as the number of tests grows (+ refactoring is more error-prone). I'm hoping to solve this in setUp rather than overriding the run implementation. I'm also having
I'm running python3.6 although if there are creative solutions depending on newer python features happy to learn about that too.
Decorators work well but there's no real "official" way to get the underlying method so I just did what the unittest source does: method = getattr(self, self._testMethodName)
import functools
import unittest
def setFoo(value):
def inner(func):
print(f"Changing foo for function {func}")
func.foo = value
#functools.wraps(func)
def wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return wrapper
return inner
class Foo(unittest.TestCase):
def setUp(self):
method = getattr(self, self._testMethodName)
print(f"Foo = {method.foo}")
#setFoo("abc")
def testFoo(self):
self.assertEqual(self.testFoo.foo, "abc")
#setFoo("xyz")
def testBar(self):
self.assertEqual(self.testBar.foo, "xyz")
if __name__ == "__main__":
unittest.main()
In the py.test docs it describes declaring factory methods as fixtures, like-so:
#pytest.fixture
def make_foo():
def __make_foo(name):
foo = Foo()
foo.name = name
return foo
return __make_foo
What are the benefits/tradeoffs of doing this over just defining a make_foo function and using that? I don't understand why it is a fixture.
Actually, the most important advantage is being able to use other fixtures, and make the dependency injection of pytest work for you.
The other advantage is allowing you to pass parameters to the factory, which would have to be static in a normal fixture.
Look at this example:
#pytest.fixture
def mocked_server():
with mock.patch('something'):
yield MyServer()
#pytest.fixture
def connected_client(mocked_server):
client = Client()
client.connect_to(mocked_server, local_port=123) # local_port must be static
return client
You could now write a test that gets a connected_client, but you can't change the port.
What if you need a test with multiple clients? You can't either.
If you now write:
#pytest.fixture
def connect_client(mocked_server):
def __connect(local_port):
client = Client()
client.connect_to(mocked_server, local_port)
return client
return __connect
You get to write tests receiving a connect_client factory, and call it to get an initialized client in any port, and how many times you want!
If you have many simple factories then you can simplify their creation with decorator:
def factory_fixture(factory):
#pytest.fixture(scope='session')
def maker():
return factory
maker.__name__ = factory.__name__
return maker
#factory_fixture
def make_stuff(foo, bar):
return 'foo' + str(foo + bar)
this is equivalent of
#pytest.fixture(score='session')
def make_stuff():
def make(foo, bar):
return 'foo' + str(foo + bar)
return
One example might be a session-level fixture, e.g.:
#pytest.fixture(scope="session")
def make_foo():
def __make_foo(name):
foo = Foo()
foo.name = name
return foo
return __make_foo
This way, Pytest will ensure that only one instance of the factory exists for the duration of your tests. This example in particular perhaps doesn't gain much from this, but if the outer function does a lot of processing, such as reading from a file or initialising data structures, then this can save you a lot of time overall.
see below code, this gives ans to your questions..
import pytest
#pytest.fixture
def make_foo():
def __make_foo(name):
print(name)
return __make_foo
def test_a(make_foo):
make_foo('abc')
def test_b(make_foo):
make_foo('def')
The output is shown below ::
tmp3.py::test_a abc
PASSED
tmp3.py::test_b def
PASSED
basically you can pass the arguments in factory fixture and can use according to your requirement.
So i have unit test case class and a class that I want to test. There is a function in this class that has a function that I'm using to run this method that i am testing. To visualize the problem i will write some abstract code below:
import needed_module
from casual_class import CasualClass
class CasualClassTests(unittest.TestCase):
def setUp(self):
self.casual_class = CasualClass()
def _run(self, func):
result = needed_module.runner(func)
return result
#mock.patch('needed_module.runner', return_value='test_run_output')
def test_get_that_item(self, mocked_runner):
result = self._run(self.casual_class.get_that_item())
And the tested class:
import needed_module
class CasualClass:
def get_that_item(self):
#..some code..
run_output = needed_module.runner(something)
return something2
In this code get_that_item code wont even run because the runner is mocked. What i want to achieve is to run needed_module.runner original in test case and mocked one in the tested class. I've searched the internet for too long, to solve this...
To achieve such a task I had to create an object of needed_module before patching the function and pass it to _run function in test case to use it.
class CasualClassTests(unittest.TestCase):
def setUp(self):
self.casual_class = CasualClass()
def _run(self, func, runner=None):
if not runner:
result = needed_module.runner(func)
else:
result = runner(func)
return result
def test_get_that_item(self, mocked_runner):
runner = needed_module.runner
with mock.patch('needed_module.runner', return_value='test_run_output'):
result = self._run(self.casual_class.get_that_item(), runner)
I'm trying to complete my unit tests for the errbot plugin I am writing. Can anyone tell me how to mock out a helper method which is used by a botcmd method?
Example:
class ChatBot(BotPlugin):
#classmethod
def mycommandhelper(cls):
return 'This is my awesome commandz'
#botcmd
def mycommand(self, message, args):
return self.mycommandhelper()
How can I mock the mycommandhelper class when executing the my command class? In my case this class is performing some remote operations which should not be executed during unit testing.
A very simple/crude way would be to simply redefine the function that does the remote operations.
Example:
def new_command_helper(cls):
return 'Mocked!'
def test(self):
ch_bot = ChatBot()
ch_bot.mycommandhelper = new_command_helper
# Add your test logic
If you'd like to have this method mocked throughout all of your tests, simply do it in the setUp unittest method.
def new_command_helper(cls):
return 'Mocked!'
class Tests(unittest.TestCase):
def setUp(self):
ChatBot.mycommandhelper = new_command_helper
After a lot of fiddling around the following seems to work:
class TestChatBot(object):
extra_plugin_dir = '.'
def test_command(self, testbot):
def othermethod():
return 'O no'
plugin = testbot.bot.plugin_manager.get_plugin_obj_by_name('chatbot')
with mock.patch.object(plugin, 'mycommandhelper') as mock_cmdhelper:
mock_cmdhelper.return_value = othermethod()
testbot.push_message('!mycommand')
assert 'O no' in testbot.pop_message()
Although I believe using patch decorators would be cleaner.
I am finding that I am using plenty of context managers in Python. However, I have been testing a number of things using them, and I am often needing the following:
class MyTestCase(unittest.TestCase):
def testFirstThing(self):
with GetResource() as resource:
u = UnderTest(resource)
u.doStuff()
self.assertEqual(u.getSomething(), 'a value')
def testSecondThing(self):
with GetResource() as resource:
u = UnderTest(resource)
u.doOtherStuff()
self.assertEqual(u.getSomething(), 'a value')
When this gets to many tests, this is clearly going to get boring, so in the spirit of SPOT/DRY (single point of truth/dont repeat yourself), I'd want to refactor those bits into the test setUp() and tearDown() methods.
However, trying to do that has lead to this ugliness:
def setUp(self):
self._resource = GetSlot()
self._resource.__enter__()
def tearDown(self):
self._resource.__exit__(None, None, None)
There must be a better way to do this. Ideally, in the setUp()/tearDown() without repetitive bits for each test method (I can see how repeating a decorator on each method could do it).
Edit: Consider the undertest object to be internal, and the GetResource object to be a third party thing (which we aren't changing).
I've renamed GetSlot to GetResource here—this is more general than specific case—where context managers are the way which the object is intended to go into a locked state and out.
How about overriding unittest.TestCase.run() as illustrated below? This approach doesn't require calling any private methods or doing something to every method, which is what the questioner wanted.
from contextlib import contextmanager
import unittest
#contextmanager
def resource_manager():
yield 'foo'
class MyTest(unittest.TestCase):
def run(self, result=None):
with resource_manager() as resource:
self.resource = resource
super(MyTest, self).run(result)
def test(self):
self.assertEqual('foo', self.resource)
unittest.main()
This approach also allows passing the TestCase instance to the context manager, if you want to modify the TestCase instance there.
Manipulating context managers in situations where you don't want a with statement to clean things up if all your resource acquisitions succeed is one of the use cases that contextlib.ExitStack() is designed to handle.
For example (using addCleanup() rather than a custom tearDown() implementation):
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource = stack.enter_context(GetResource())
self.addCleanup(stack.pop_all().close)
That's the most robust approach, since it correctly handles acquisition of multiple resources:
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource1 = stack.enter_context(GetResource())
self._resource2 = stack.enter_context(GetOtherResource())
self.addCleanup(stack.pop_all().close)
Here, if GetOtherResource() fails, the first resource will be cleaned up immediately by the with statement, while if it succeeds, the pop_all() call will postpone the cleanup until the registered cleanup function runs.
If you know you're only ever going to have one resource to manage, you can skip the with statement:
def setUp(self):
stack = contextlib.ExitStack()
self._resource = stack.enter_context(GetResource())
self.addCleanup(stack.close)
However, that's a bit more error prone, since if you add more resources to the stack without first switching to the with statement based version, successfully allocated resources may not get cleaned up promptly if later resource acquisitions fail.
You can also write something comparable using a custom tearDown() implementation by saving a reference to the resource stack on the test case:
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource1 = stack.enter_context(GetResource())
self._resource2 = stack.enter_context(GetOtherResource())
self._resource_stack = stack.pop_all()
def tearDown(self):
self._resource_stack.close()
Alternatively, you can also define a custom cleanup function that accesses the resource via a closure reference, avoiding the need to store any extra state on the test case purely for cleanup purposes:
def setUp(self):
with contextlib.ExitStack() as stack:
resource = stack.enter_context(GetResource())
def cleanup():
if necessary:
one_last_chance_to_use(resource)
stack.pop_all().close()
self.addCleanup(cleanup)
pytest fixtures are very close to your idea/style, and allow for exactly what you want:
import pytest
from code.to.test import foo
#pytest.fixture(...)
def resource():
with your_context_manager as r:
yield r
def test_foo(resource):
assert foo(resource).bar() == 42
The problem with calling __enter__ and __exit__ as you did, is not that you have done so: they can be called outside of a with statement. The problem is that your code has no provision to call the object's __exit__ method properly if an exception occurs.
So, the way to do it is to have a decorator that will wrap the call to your original method in a withstatement. A short metaclass can apply the decorator transparently to all methods named test* in the class -
# -*- coding: utf-8 -*-
from functools import wraps
import unittest
def setup_context(method):
# the 'wraps' decorator preserves the original function name
# otherwise unittest would not call it, as its name
# would not start with 'test'
#wraps(method)
def test_wrapper(self, *args, **kw):
with GetSlot() as slot:
self._slot = slot
result = method(self, *args, **kw)
delattr(self, "_slot")
return result
return test_wrapper
class MetaContext(type):
def __new__(mcs, name, bases, dct):
for key, value in dct.items():
if key.startswith("test"):
dct[key] = setup_context(value)
return type.__new__(mcs, name, bases, dct)
class GetSlot(object):
def __enter__(self):
return self
def __exit__(self, *args, **kw):
print "exiting object"
def doStuff(self):
print "doing stuff"
def doOtherStuff(self):
raise ValueError
def getSomething(self):
return "a value"
def UnderTest(*args):
return args[0]
class MyTestCase(unittest.TestCase):
__metaclass__ = MetaContext
def testFirstThing(self):
u = UnderTest(self._slot)
u.doStuff()
self.assertEqual(u.getSomething(), 'a value')
def testSecondThing(self):
u = UnderTest(self._slot)
u.doOtherStuff()
self.assertEqual(u.getSomething(), 'a value')
unittest.main()
(I also included mock implementations of "GetSlot" and the methods and functions in your example so that I myself could test the decorator and metaclass I am suggesting on this answer)
I'd argue you should separate your test of the context manager from your test of the Slot class. You could even use a mock object simulating the initialize/finalize interface of slot to test the context manager object, and then test your slot object separately.
from unittest import TestCase, main
class MockSlot(object):
initialized = False
ok_called = False
error_called = False
def initialize(self):
self.initialized = True
def finalize_ok(self):
self.ok_called = True
def finalize_error(self):
self.error_called = True
class GetSlot(object):
def __init__(self, slot_factory=MockSlot):
self.slot_factory = slot_factory
def __enter__(self):
s = self.s = self.slot_factory()
s.initialize()
return s
def __exit__(self, type, value, traceback):
if type is None:
self.s.finalize_ok()
else:
self.s.finalize_error()
class TestContextManager(TestCase):
def test_getslot_calls_initialize(self):
g = GetSlot()
with g as slot:
pass
self.assertTrue(g.s.initialized)
def test_getslot_calls_finalize_ok_if_operation_successful(self):
g = GetSlot()
with g as slot:
pass
self.assertTrue(g.s.ok_called)
def test_getslot_calls_finalize_error_if_operation_unsuccessful(self):
g = GetSlot()
try:
with g as slot:
raise ValueError
except:
pass
self.assertTrue(g.s.error_called)
if __name__ == "__main__":
main()
This makes code simpler, prevents concern mixing and allows you to reuse the context manager without having to code it in many places.