I'm trying to figure out how to decorate a test function in a way that makes the information from the decorator available to setUp. The code looks something like this:
import unittest
class MyTest(unittest.TestCase):
def setUp(self):
stopService()
eraseAllPreferences()
setTestPreferences()
startService()
#setPreference("abc", 5)
def testPreference1(self):
pass
#setPreference("xyz", 5)
def testPreference2(self):
pass
The goal is for setUp to understand it's running testPreference1 and to know that it needs to set preference "abc" to 5 before starting the service (& similarly regarding "xyz" and testPreference2).
I can of course just use a conditional on the the test name (if self._testMethodName == "testPreference1") but that doesn't feel quite as maintainable as the number of tests grows (+ refactoring is more error-prone). I'm hoping to solve this in setUp rather than overriding the run implementation. I'm also having
I'm running python3.6 although if there are creative solutions depending on newer python features happy to learn about that too.
Decorators work well but there's no real "official" way to get the underlying method so I just did what the unittest source does: method = getattr(self, self._testMethodName)
import functools
import unittest
def setFoo(value):
def inner(func):
print(f"Changing foo for function {func}")
func.foo = value
#functools.wraps(func)
def wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return wrapper
return inner
class Foo(unittest.TestCase):
def setUp(self):
method = getattr(self, self._testMethodName)
print(f"Foo = {method.foo}")
#setFoo("abc")
def testFoo(self):
self.assertEqual(self.testFoo.foo, "abc")
#setFoo("xyz")
def testBar(self):
self.assertEqual(self.testBar.foo, "xyz")
if __name__ == "__main__":
unittest.main()
Related
I'm trying to complete my unit tests for the errbot plugin I am writing. Can anyone tell me how to mock out a helper method which is used by a botcmd method?
Example:
class ChatBot(BotPlugin):
#classmethod
def mycommandhelper(cls):
return 'This is my awesome commandz'
#botcmd
def mycommand(self, message, args):
return self.mycommandhelper()
How can I mock the mycommandhelper class when executing the my command class? In my case this class is performing some remote operations which should not be executed during unit testing.
A very simple/crude way would be to simply redefine the function that does the remote operations.
Example:
def new_command_helper(cls):
return 'Mocked!'
def test(self):
ch_bot = ChatBot()
ch_bot.mycommandhelper = new_command_helper
# Add your test logic
If you'd like to have this method mocked throughout all of your tests, simply do it in the setUp unittest method.
def new_command_helper(cls):
return 'Mocked!'
class Tests(unittest.TestCase):
def setUp(self):
ChatBot.mycommandhelper = new_command_helper
After a lot of fiddling around the following seems to work:
class TestChatBot(object):
extra_plugin_dir = '.'
def test_command(self, testbot):
def othermethod():
return 'O no'
plugin = testbot.bot.plugin_manager.get_plugin_obj_by_name('chatbot')
with mock.patch.object(plugin, 'mycommandhelper') as mock_cmdhelper:
mock_cmdhelper.return_value = othermethod()
testbot.push_message('!mycommand')
assert 'O no' in testbot.pop_message()
Although I believe using patch decorators would be cleaner.
Let's consider this piece of code where I would like to create bar dynamically with a decorator
def foo():
def bar():
print "I am bar from foo"
print bar()
def baz():
def bar():
print "I am bar from baz"
print bar()
I thought I could create bar from the outside with a decorator:
def bar2():
print "I am super bar from foo"
setattr(foo, 'bar', bar2)
But the result is not what I was expecting (I would like to get I am super bar from foo:
>>> foo()
I am bar from foo
Is it possible to override a sub-function on an existing function with a decorator?
The actual use case
I am writing a wrapper for a library and to avoid boilerplate code I would like to simplify my work.
Each library function has a prefix lib_ and returns an error code. I would like to add the prefix to the current function and treat the error code. This could be as simple as this:
def call():
fname = __libprefix__ + inspect.stack()[1][3]
return_code = getattr(__lib__, fname)(*args)
if return_code < 0: raise LibError(fname, return_code)
def foo():
call()
The problem is that call might act differently in certain cases. Some library functions do not return an error_code so it would be easier to write it like
this:
def foo():
call(check_status=True)
Or much better in my opinion (this is the point where I started thinking about decorators):
#LibFunc(check_status=True)
def foo():
call()
In this last example I should declare call inside foo as a sub-function created dynamically by the decorator itself.
The idea was to use something like this:
class LibFunc(object):
def __init__(self,**kwargs):
self.kwargs = kwargs
def __call__(self, original_func):
decorator_self = self
def wrappee( *args, **kwargs):
def call(*args):
fname = __libprefix__ + original_func.__name__
return_code = getattr(__lib__, fname)(*args)
if return_code < 0: raise LibError(fname, return_code)
print original_func
print call
# <<<< The part that does not work
setattr(original_func, 'call', call)
# <<<<
original_func(*args,**kwargs)
return wrappee
Initially I was tempted to call the call inside the decorator itself to minimize the writing:
#LibFunc():
foo(): pass
Unfortunately, this is not an option since other things should sometime be done before and after the call:
#LibFunc():
foo(a,b):
value = c_float()
call(a, pointer(value), b)
return value.value
Another option that I thought about was to use SWIG, but again this is not an option because I will need to rebuild the existing library with the SWIG wrapping functions.
And last but not least, I may get inspiration from SWIG typemaps and declare my wrapper as this:
#LibFunc(check_exit = true, map = ('<a', '>c_float', '<c_int(b)')):
foo(a,b): pass
This looks like the best solution to me, but this is another topic and another question...
Are you married to the idea of a decorator? Because if your goal is bunch of module-level functions each of which wraps somelib.lib_somefunctionname, I don't see why you need one.
Those module-level names don't have to be functions, they just have to be callable. They could be a bunch of class instances, as long as they have a __call__ method.
I used two different subclasses to determine how to treat the return value:
#!/usr/bin/env python3
import libtowrap # Replace with the real library name.
class Wrapper(object):
'''
Parent class for all wrapped functions in libtowrap.
'''
def __init__(self, name):
self.__name__ = str(name)
self.wrapped_name = 'lib_' + self.__name__
self.wrapped_func = getattr(libtowrap, self.wrapped_name)
self.__doc__ = self.wrapped_func.__doc__
return
class CheckedWrapper(Wrapper):
'''
Wraps functions in libtowrap that return an error code that must
be checked. Negative return values indicate an error, and will
raise a LibError. Successful calls return None.
'''
def __call__(self, *args, **kwargs):
error_code = self.wrapped_func(*args, **kwargs)
if error_code < 0:
raise LibError(self.__name__, error_code)
return
class UncheckedWrapper(Wrapper):
'''
Wraps functions in libtowrap that return a useful value, as
opposed to an error code.
'''
def __call__(self, *args, **kwargs):
return self.wrapped_func(*args, **kwargs)
strict = CheckedWrapper('strict')
negative_means_failure = CheckedWrapper('negative_means_failure')
whatever = UncheckedWrapper('whatever')
negative_is_ok = UncheckedWrapper('negative_is_ok')
Note that the wrapper "functions" are assigned while the module is being imported. They are in the top-level module namespace, and not hidden by any if __name__ == '__main__' test.
They will behave like functions for most purposes, but there will be minor differences. For example, I gave each instance a __name__ that matches the name they're assigned to, not the lib_-prefixed name used in libtowrap... but I copied the original __doc__, which might refer to a prefixed name like lib_some_other_function. Also, testing them with isinstance will probably surprise people.
For more about decorators, and for many more annoying little discrepancies like the ones I mentioned above, see Graham Dumpleton's half-hour lecture "Advanced Methods for Creating Decorators" (PyCon US 2014; slides). He is the author of the wrapt module (Python Package Index; Git Hub; Read the Docs), which corrects all(?) of the usual decorator inconsistencies. It might solve your problem entirely (except for the old lib_-style names showing up in __doc__).
Using python 2.7, celery 3.0.24 and mock 1.0.1. I have this:
class FancyTask(celery.Task):
#classmethod
def helper_method1(cls, name):
"""do some remote request depending on name"""
return 'foo' + name + 'bar'
def __call__(self, *args, **kwargs):
funcname = self.name.split()[-1]
bigname = self.helper_method1(funcname)
return bigname
#celery.task(base=FancyTask)
def task1(*args, **kwargs):
pass
#celery.task(base=FancyTask)
def task2(*args, **kwargs):
pass
how can I patch helper_method1 while testing either task?
I've tried something like:
import mock
from mymodule import tasks
class TestTasks(unittest.TestCase):
def test_task1(self):
task = tasks.task1
task.helper_method1 = mock.MagickMock(return_value='42')
res = task.delay('blah')
task.helper_method1.assert_called_with('blah')
and the test is failing. The original function is the one being called. And no, this question didn't help me.
(I don't have a celery instance up and running so it's difficult for me to test this)
The target function in your application code is a classmethod. The function your test code is mocking is an instance method.
Does changing the test_task1 like this help -
def test_task1(self):
FancyTask.helper_method1 = mock.MagickMock(return_value='42')
task = tasks.task1
res = task.delay('blah')
task.helper_method1.assert_called_with('blah')
You probably also need to change the assert_called_with so it is called from the class level instead of the instance level.
change
task.helper_method1.assert_called_with('blah')
to
FancyTask.helper_method1.assert_called_with('blah')
What's the best way to toggle decorators on and off, without actually going to each decoration and commenting it out? Say you have a benchmarking decorator:
# deco.py
def benchmark(func):
def decorator():
# fancy benchmarking
return decorator
and in your module something like:
# mymodule.py
from deco import benchmark
class foo(object):
#benchmark
def f():
# code
#benchmark
def g():
# more code
That's fine, but sometimes you don't care about the benchmarks and don't want the overhead. I have been doing the following. Add another decorator:
# anothermodule.py
def noop(func):
# do nothing, just return the original function
return func
And then comment out the import line and add another:
# mymodule.py
#from deco import benchmark
from anothermodule import noop as benchmark
Now benchmarks are toggled on a per-file basis, having only to change the import statement in the module in question. Individual decorators can be controlled independently.
Is there a better way to do this? It would be nice to not have to edit the source file at all, and to specify which decorators to use in which files elsewhere.
You could add the conditional to the decorator itself:
def use_benchmark(modname):
return modname == "mymodule"
def benchmark(func):
if not use_benchmark(func.__module__):
return func
def decorator():
# fancy benchmarking
return decorator
If you apply this decorator in mymodule.py, it will be enabled; if you apply it in othermodule.py, it will not be enabled.
I've been using the following approach. It's almost identical to the one suggested by CaptainMurphy, but it has the advantage that you don't need to call the decorator like a function.
import functools
class SwitchedDecorator:
def __init__(self, enabled_func):
self._enabled = False
self._enabled_func = enabled_func
#property
def enabled(self):
return self._enabled
#enabled.setter
def enabled(self, new_value):
if not isinstance(new_value, bool):
raise ValueError("enabled can only be set to a boolean value")
self._enabled = new_value
def __call__(self, target):
if self._enabled:
return self._enabled_func(target)
return target
def deco_func(target):
"""This is the actual decorator function. It's written just like any other decorator."""
def g(*args,**kwargs):
print("your function has been wrapped")
return target(*args,**kwargs)
functools.update_wrapper(g, target)
return g
# This is where we wrap our decorator in the SwitchedDecorator class.
my_decorator = SwitchedDecorator(deco_func)
# Now my_decorator functions just like the deco_func decorator,
# EXCEPT that we can turn it on and off.
my_decorator.enabled=True
#my_decorator
def example1():
print("example1 function")
# we'll now disable my_decorator. Any subsequent uses will not
# actually decorate the target function.
my_decorator.enabled=False
#my_decorator
def example2():
print("example2 function")
In the above, example1 will be decorated, and example2 will NOT be decorated. When I have to enable or disable decorators by module, I just have a function that makes a new SwitchedDecorator whenever I need a different copy.
I think you should use a decorator a to decorate the decorator b, which let you switch the decorator b on or off with the help of a decision function.
This sounds complex, but the idea is rather simple.
So let's say you have a decorator logger:
from functools import wraps
def logger(f):
#wraps(f)
def innerdecorator(*args, **kwargs):
print (args, kwargs)
res = f(*args, **kwargs)
print res
return res
return innerdecorator
This is a very boring decorator and I have a dozen or so of these, cachers, loggers, things which inject stuff, benchmarking etc. I could easily extend it with an if statement, but this seems to be a bad choice; because then I have to change a dozen of decorators, which is not fun at all.
So what to do? Let's step one level higher. Say we have a decorator, which can decorate a decorator? This decorator would look like this:
#point_cut_decorator(logger)
def my_oddly_behaving_function
This decorator accepts logger, which is not a very interesting fact. But it also has enough power to choose if the logger should be applied or not to my_oddly_behaving_function. I called it point_cut_decorator, because it has some aspects of aspect oriented programming. A point cut is a set of locations, where some code (advice) has to be interwoven with the execution flow. The definitions of point cuts is usually in one place. This technique seems to be very similar.
How can we implement it decision logic. Well I have chosen to make a function, which accepts the decoratee, the decorator, file and name, which can only say if a decorator should be applied or not. These are the coordinates, which are good enough to pinpoint the location very precisely.
This is the implementation of point_cut_decorator, I have chosen to implement the decision function as a simple function, you could extend it to let it decide from your settings or configuration, if you use regexes for all 4 coordinates, you will end up with something very powerful:
from functools import wraps
myselector is the decision function, on true a decorator is applied on false it is not applied. Parameters are the filename, the module name, the decorated object and finally the decorator. This allows us to switch of behaviour in a fine grained manner.
def myselector(fname, name, decoratee, decorator):
print fname
if decoratee.__name__ == "test" and fname == "decorated.py" and decorator.__name__ == "logger":
return True
return False
This decorates a function, checks myselector and if myselector says go on, it will apply the decorator to the function.
def point_cut_decorator(d):
def innerdecorator(f):
#wraps(f)
def wrapper(*args, **kwargs):
if myselector(__file__, __name__, f, d):
ps = d(f)
return ps(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
return innerdecorator
def logger(f):
#wraps(f)
def innerdecorator(*args, **kwargs):
print (args, kwargs)
res = f(*args, **kwargs)
print res
return res
return innerdecorator
And this is how you use it:
#point_cut_decorator(logger)
def test(a):
print "hello"
return "world"
test(1)
EDIT:
This is the regular expression approach I talked about:
from functools import wraps
import re
As you can see, I can specify somewhere a couple of rules, which decides a decorator should be applied or not:
rules = [{
"file": "decorated.py",
"module": ".*",
"decoratee": ".*test.*",
"decorator": "logger"
}]
Then I loop over all rules and return True if a rule matches or false if a rule doesn't matches. By making rules empty in production, this will not slow down your application too much:
def myselector(fname, name, decoratee, decorator):
for rule in rules:
file_rule, module_rule, decoratee_rule, decorator_rule = rule["file"], rule["module"], rule["decoratee"], rule["decorator"]
if (
re.match(file_rule, fname)
and re.match(module_rule, name)
and re.match(decoratee_rule, decoratee.__name__)
and re.match(decorator_rule, decorator.__name__)
):
return True
return False
Here is what I finally came up with for per-module toggling. It uses #nneonneo's suggestion as a starting point.
Random modules use decorators as normal, no knowledge of toggling.
foopkg.py:
from toggledeco import benchmark
#benchmark
def foo():
print("function in foopkg")
barpkg.py:
from toggledeco import benchmark
#benchmark
def bar():
print("function in barpkg")
The decorator module itself maintains a set of function references for all decorators that have been disabled, and each decorator checks for its existence in this set. If so, it just returns the raw function (no decorator). By default the set is empty (everything enabled).
toggledeco.py:
import functools
_disabled = set()
def disable(func):
_disabled.add(func)
def enable(func):
_disabled.discard(func)
def benchmark(func):
if benchmark in _disabled:
return func
#functools.wraps(func)
def deco(*args,**kwargs):
print("--> benchmarking %s(%s,%s)" % (func.__name__,args,kwargs))
ret = func(*args,**kwargs)
print("<-- done")
return deco
The main program can toggle individual decorators on and off during imports:
from toggledeco import benchmark, disable, enable
disable(benchmark) # no benchmarks...
import foopkg
enable(benchmark) # until they are enabled again
import barpkg
foopkg.foo() # no benchmarking
barpkg.bar() # yes benchmarking
reload(foopkg)
foopkg.foo() # now with benchmarking
Output:
function in foopkg
--> benchmarking bar((),{})
function in barpkg
<-- done
--> benchmarking foo((),{})
function in foopkg
<-- done
This has the added bug/feature that enabling/disabling will trickle down to any submodules imported from modules imported in the main function.
EDIT:
Here's class suggested by #nneonneo. In order to use it, the decorator must be called as a function ( #benchmark(), not #benchmark ).
class benchmark:
disabled = False
#classmethod
def enable(cls):
cls.disabled = False
#classmethod
def disable(cls):
cls.disabled = True
def __call__(cls,func):
if cls.disabled:
return func
#functools.wraps(func)
def deco(*args,**kwargs):
print("--> benchmarking %s(%s,%s)" % (func.__name__,args,kwargs))
ret = func(*args,**kwargs)
print("<-- done")
return deco
I would implement a check for a config file inside the decorator's body. If benchmark has to be used according to the config file, then I would go to your current decorator's body. If not, I would return the function and do nothing more. Something in this flavor:
# deco.py
def benchmark(func):
if config == 'dontUseDecorators': # no use of decorator
# do nothing
return func
def decorator(): # else call decorator
# fancy benchmarking
return decorator
What happens when calling a decorated function ? # in
#benchmark
def f():
# body comes here
is syntactic sugar for this
f = benchmark(f)
so if config wants you to overlook decorator, you are just doing f = f() which is what you expect.
I don't think anyone has suggested this yet:
benchmark_modules = set('mod1', 'mod2') # Load this from a config file
def benchmark(func):
if not func.__module__ in benchmark_modules:
return func
def decorator():
# fancy benchmarking
return decorator
Each function or method has a __module__ attribute that is the name of the module where the function is defined. Create a whitelist (or blacklist if you prefer) of modules where benchmarking is to occur, and if you don't want to benchmark that module just return the original undecorated function.
another straight way:
# mymodule.py
from deco import benchmark
class foo(object):
def f():
# code
if <config.use_benchmark>:
f = benchmark(f)
def g():
# more code
if <config.use_benchmark>:
g = benchmark(g)
Here's a workaround to automatically toggle a decorator (here: #profile used by line_profiler):
if 'profile' not in __builtins__ or type(__builtins__) is not dict: profile=lambda x: None;
More info
This conditional (only if needed) instantiation of the profile variable (as an empty lambda function) prevents raising NameError when trying to import our module with user-defined functions where the decorator #profile is applied to every profiled user function. If I ever want to use the decorator for profiling - it will still work, not being overwritten (already existing in an external script kernprof that contains this decorator).
I am finding that I am using plenty of context managers in Python. However, I have been testing a number of things using them, and I am often needing the following:
class MyTestCase(unittest.TestCase):
def testFirstThing(self):
with GetResource() as resource:
u = UnderTest(resource)
u.doStuff()
self.assertEqual(u.getSomething(), 'a value')
def testSecondThing(self):
with GetResource() as resource:
u = UnderTest(resource)
u.doOtherStuff()
self.assertEqual(u.getSomething(), 'a value')
When this gets to many tests, this is clearly going to get boring, so in the spirit of SPOT/DRY (single point of truth/dont repeat yourself), I'd want to refactor those bits into the test setUp() and tearDown() methods.
However, trying to do that has lead to this ugliness:
def setUp(self):
self._resource = GetSlot()
self._resource.__enter__()
def tearDown(self):
self._resource.__exit__(None, None, None)
There must be a better way to do this. Ideally, in the setUp()/tearDown() without repetitive bits for each test method (I can see how repeating a decorator on each method could do it).
Edit: Consider the undertest object to be internal, and the GetResource object to be a third party thing (which we aren't changing).
I've renamed GetSlot to GetResource here—this is more general than specific case—where context managers are the way which the object is intended to go into a locked state and out.
How about overriding unittest.TestCase.run() as illustrated below? This approach doesn't require calling any private methods or doing something to every method, which is what the questioner wanted.
from contextlib import contextmanager
import unittest
#contextmanager
def resource_manager():
yield 'foo'
class MyTest(unittest.TestCase):
def run(self, result=None):
with resource_manager() as resource:
self.resource = resource
super(MyTest, self).run(result)
def test(self):
self.assertEqual('foo', self.resource)
unittest.main()
This approach also allows passing the TestCase instance to the context manager, if you want to modify the TestCase instance there.
Manipulating context managers in situations where you don't want a with statement to clean things up if all your resource acquisitions succeed is one of the use cases that contextlib.ExitStack() is designed to handle.
For example (using addCleanup() rather than a custom tearDown() implementation):
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource = stack.enter_context(GetResource())
self.addCleanup(stack.pop_all().close)
That's the most robust approach, since it correctly handles acquisition of multiple resources:
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource1 = stack.enter_context(GetResource())
self._resource2 = stack.enter_context(GetOtherResource())
self.addCleanup(stack.pop_all().close)
Here, if GetOtherResource() fails, the first resource will be cleaned up immediately by the with statement, while if it succeeds, the pop_all() call will postpone the cleanup until the registered cleanup function runs.
If you know you're only ever going to have one resource to manage, you can skip the with statement:
def setUp(self):
stack = contextlib.ExitStack()
self._resource = stack.enter_context(GetResource())
self.addCleanup(stack.close)
However, that's a bit more error prone, since if you add more resources to the stack without first switching to the with statement based version, successfully allocated resources may not get cleaned up promptly if later resource acquisitions fail.
You can also write something comparable using a custom tearDown() implementation by saving a reference to the resource stack on the test case:
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource1 = stack.enter_context(GetResource())
self._resource2 = stack.enter_context(GetOtherResource())
self._resource_stack = stack.pop_all()
def tearDown(self):
self._resource_stack.close()
Alternatively, you can also define a custom cleanup function that accesses the resource via a closure reference, avoiding the need to store any extra state on the test case purely for cleanup purposes:
def setUp(self):
with contextlib.ExitStack() as stack:
resource = stack.enter_context(GetResource())
def cleanup():
if necessary:
one_last_chance_to_use(resource)
stack.pop_all().close()
self.addCleanup(cleanup)
pytest fixtures are very close to your idea/style, and allow for exactly what you want:
import pytest
from code.to.test import foo
#pytest.fixture(...)
def resource():
with your_context_manager as r:
yield r
def test_foo(resource):
assert foo(resource).bar() == 42
The problem with calling __enter__ and __exit__ as you did, is not that you have done so: they can be called outside of a with statement. The problem is that your code has no provision to call the object's __exit__ method properly if an exception occurs.
So, the way to do it is to have a decorator that will wrap the call to your original method in a withstatement. A short metaclass can apply the decorator transparently to all methods named test* in the class -
# -*- coding: utf-8 -*-
from functools import wraps
import unittest
def setup_context(method):
# the 'wraps' decorator preserves the original function name
# otherwise unittest would not call it, as its name
# would not start with 'test'
#wraps(method)
def test_wrapper(self, *args, **kw):
with GetSlot() as slot:
self._slot = slot
result = method(self, *args, **kw)
delattr(self, "_slot")
return result
return test_wrapper
class MetaContext(type):
def __new__(mcs, name, bases, dct):
for key, value in dct.items():
if key.startswith("test"):
dct[key] = setup_context(value)
return type.__new__(mcs, name, bases, dct)
class GetSlot(object):
def __enter__(self):
return self
def __exit__(self, *args, **kw):
print "exiting object"
def doStuff(self):
print "doing stuff"
def doOtherStuff(self):
raise ValueError
def getSomething(self):
return "a value"
def UnderTest(*args):
return args[0]
class MyTestCase(unittest.TestCase):
__metaclass__ = MetaContext
def testFirstThing(self):
u = UnderTest(self._slot)
u.doStuff()
self.assertEqual(u.getSomething(), 'a value')
def testSecondThing(self):
u = UnderTest(self._slot)
u.doOtherStuff()
self.assertEqual(u.getSomething(), 'a value')
unittest.main()
(I also included mock implementations of "GetSlot" and the methods and functions in your example so that I myself could test the decorator and metaclass I am suggesting on this answer)
I'd argue you should separate your test of the context manager from your test of the Slot class. You could even use a mock object simulating the initialize/finalize interface of slot to test the context manager object, and then test your slot object separately.
from unittest import TestCase, main
class MockSlot(object):
initialized = False
ok_called = False
error_called = False
def initialize(self):
self.initialized = True
def finalize_ok(self):
self.ok_called = True
def finalize_error(self):
self.error_called = True
class GetSlot(object):
def __init__(self, slot_factory=MockSlot):
self.slot_factory = slot_factory
def __enter__(self):
s = self.s = self.slot_factory()
s.initialize()
return s
def __exit__(self, type, value, traceback):
if type is None:
self.s.finalize_ok()
else:
self.s.finalize_error()
class TestContextManager(TestCase):
def test_getslot_calls_initialize(self):
g = GetSlot()
with g as slot:
pass
self.assertTrue(g.s.initialized)
def test_getslot_calls_finalize_ok_if_operation_successful(self):
g = GetSlot()
with g as slot:
pass
self.assertTrue(g.s.ok_called)
def test_getslot_calls_finalize_error_if_operation_unsuccessful(self):
g = GetSlot()
try:
with g as slot:
raise ValueError
except:
pass
self.assertTrue(g.s.error_called)
if __name__ == "__main__":
main()
This makes code simpler, prevents concern mixing and allows you to reuse the context manager without having to code it in many places.