Passing arguments to context manager - python

Is there a way to pass arguments using context manager? Here is what I'm trying to do:
async with self.request.app['expiration_lock']('param'):
But I am getting an error:
TypeError: 'OrderStatusLock' object is not callable
Class OrderStatusLock:
class OrderStatusLock(_ContextManagerMixin, Lock):
def __init__(self, *args, loop=None):
print(args)
self._waiters = None
self._locked = False
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
async def acquire(self, *args):
print('acq', args)
if (not self._locked and (self._waiters is None or
all(w.cancelled() for w in self._waiters))):
self._locked = True
return True
if self._waiters is None:
self._waiters = collections.deque()
fut = self._loop.create_future()
self._waiters.append(fut)
try:
try:
await fut
finally:
self._waiters.remove(fut)
except CancelledError:
if not self._locked:
self._wake_up_first()
raise
self._locked = True
return True
And if it is possible, what issues I can face, using this? Thank you very much.

There's a lot going on in your question, and I don't know where your _ContextManagerMixin class comes from. I also don't know much about async.
However, here's a simple (non-async) demonstration of a pattern where an argument can be passed to a context manager that alters how the __enter__ method of the context manager operates.
Remember: a context manager is, at its heart, just a class that implements an __enter__ method and an __exit__ method. The __enter__ method is called at the start of the with block, and the __exit__ method is called at the end of the with block.
The __call__ method added here in my example class is called immediately before the __enter__ method and, unlike the __enter__ method, can be called with arguments. The __exit__ method takes care to clean up the changes made to the class's internal state by the __call__ method and the __enter__ method.
from threading import Lock
class LockWrapper:
def __init__(self):
self.lock = Lock()
self.food = ''
def __enter__(self):
self.lock.acquire()
print(f'{self.food} for breakfast, please!')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.lock.release()
self.food = ''
return True
def __call__(self, spam_preferred: bool):
if spam_preferred:
self.food = 'Spam'
else:
self.food = 'Eggs'
return self
breakfast_context_manager = LockWrapper()
with breakfast_context_manager(spam_preferred=True): # prints 'Spam for breakfast, please!'
pass
with breakfast_context_manager(spam_preferred=False): # prints 'Eggs for breakfast, please!'
pass
N.B. My example above will suppress all exceptions that are raised in the body of the with statement. If you don't want to suppress any exceptions, or if you only want to suppress certain kinds of exceptions, you'll need to alter the implementation of the __exit__ method.
Note also that the return values of these functions are quite important. __call__ has to return self if you want the class to be able to then call __enter__. __enter__ has to return self if you want to be able to access the context manager's internal state in the body of the with statement. __exit__ should return True if you want exceptions to be suppressed, and False if you want an encountered exception to continue to endure outside of the with statement.
You can find a good tutorial on context managers here. The tutorial also contains some info on how you might adapt the above pattern for async code using the __aenter__ and __aexit__ methods.

Related

AssertError not catched in unittest in python try-except clause

I have a object created in a test case, and want to make test inside of its method.
But the exception is swallow by the try-except clause.
I know I can change raise the exception in run but it is not what I want. Is there a way that any unittest tool can handle this?
It seems that assertTrue method of unittest.TestCase is just a trivial assert clause.
class TestDemo(unittest.TestCase):
def test_a(self):
test_case = self
class NestedProc:
def method1(self):
print("flag show the method is running")
test_case.assertTrue(False)
def run(self):
try:
self.method1()
except:
pass # can raise here to give the exception but not what I want.
NestedProc().run() # no exception raised
# NestedProc().method1() # exception raised
EDIT
For clarity, I paste my realworld test case here. The most tricky thing here is that ParentProcess will always success leading to AssertError not correctly being propagated to test function.
class TestProcess(unittest.TestCase);
#pytest.mark.asyncio
async def test_process_stack_multiple(self):
"""
Run multiple and nested processes to make sure the process stack is always correct
"""
expect_true = []
def test_nested(process):
expect_true.append(process == Process.current())
class StackTest(plumpy.Process):
def run(self):
# TODO: unexpected behaviour here
# if assert error happend here not raise
# it will be handled by try except clause in process
# is there better way to handle this?
expect_true.append(self == Process.current())
test_nested(self)
class ParentProcess(plumpy.Process):
def run(self):
expect_true.append(self == Process.current())
proc = StackTest()
# launch the inner process
asyncio.ensure_future(proc.step_until_terminated())
to_run = []
for _ in range(100):
proc = ParentProcess()
to_run.append(proc)
await asyncio.gather(*[p.step_until_terminated() for p in to_run])
for proc in to_run:
self.assertEqual(plumpy.ProcessState.FINISHED, proc.state)
for res in expect_true:
self.assertTrue(res)
Any assert* method and even fail() just raises an exception. The easiest method is probably to manually set a flag and fail() afterwards:
def test_a(self):
success = True
class NestedProc:
def method1(self):
nonlocal success
success = False
raise Exception()
...
NestedProc().run()
if not success:
self.fail()

Successor of ContextDecorator doesn't work: 'generator' object has no attribute 'add'

I have a problem with inheriting from the ContextDecorator class. I can't understand why the method session_manager() works:
#contextmanager
def session_manager():
session = Session()
yield session
try:
session.commit()
except Exception as e:
session.rollback()
raise e
finally:
session.close()
But exactly the same code with ContextDecorator successor class gives an error:
class SessionManager(ContextDecorator):
def __init__(self):
self.session = Session()
def __enter__(self):
try:
yield self.session
self.session.commit()
except Exception as e:
self.session.rollback()
raise e
def __exit__(self, *exc):
self.session.close()
Exception:
AttributeError: 'generator' object has no attribute 'add'
The documentation and tutorials do not have complex examples (only with 'print' statements) and they works great: https://docs.python.org/3/library/contextlib.html
I don't understand why method session_manager() works, although it returns a generator:
yield session
Here I write some small and simple code:
https://gist.github.com/tranebaer/46f94263030dd8f7c1bfcf72d0e37610
The __enter__ method is not supposed to be a generator, unless you want to treat the return value as such in the runtime context. It is called when entering the block governed by the with-statement and its return value is bound to the target(s) specified in the as clause, if any. So the attribute error is the result of calling the method add() on the generator inside the block, when you meant it to be the Session object. Possible cleanup and exception handling should take place in the __exit__ method:
from contextlib import closing, ContextDecorator, ExitStack
class SessionManager(ContextDecorator):
def __init__(self, session_cls=Session):
self.session = session_cls()
def __enter__(self):
return self.session
def __exit__(self, type, value, tb):
with closing(self.session), ExitStack() as stack:
stack.callback(self.session.rollback)
if not value:
self.session.commit()
# If commit raises an exception, then rollback is left
# in the exit stack.
stack.pop_all()
Note that you don't need to inherit from ContextDecorator in order to make a context manager. Just implementing __enter__ and __exit__ is enough. In fact in this case it is a bit pointless, because a function decorated with SessionManager has no access to the Session object.

In python is there a way to make a function/class that behaves like a function and a context manager?

In python is there a way to make a function/class that behaves like a function and a context manager?
Note: that I need the function/class to return an object that doesn't have a __exit__ method and I cant change that object (that's why I am wrapping it).
so just making a class with __enter__ and __exit__ won't work because I need it to also behave like a function.
I have tried the contextmanager decorator:
#contextmanager
def my_context_man(my_str):
my_str = 'begging ' + my_str
yield my_str+' after func'
print('end')
And it worked perfectly within the context manger, but not as a function:
a = 'middle'
old_a = my_context_man(a)
print('old_a', old_a)
with my_context_man(a) as new_a:
print('new_a', new_a)
the output:
old_a <contextlib._GeneratorContextManager object at 0x0000000004F832E8>
new_a begging middle after func
end
while the desired output will be:
old_a begging middle after func
new_a begging middle after func
end
edit:
The specific problem that i am having is with the psycopg2 module.
I want to use a different context manager. and it returns a connection object.
def connect(dsn=None, connection_factory=None, cursor_factory=None, **kwargs):
*the connection code*
return conn
I am trying to change it so that people will be able to use it with my context manager but in a way that will not break code.
I cannot change the conn object
Your __new__ method isn't returning an instance of my_context_man, but an instance of str, and str doesn't have an __enter__ method. It's the return value of __enter__ that gets bound to the name after as in the with statement. You want
class my_context_man:
def __init__(self, my_str):
self.msg = my_str
print("beginning " + my_str)
def __enter__(self):
return self.msg
def __exit__(self, exc_type, exc_val, exc_tb):
print('end')

python3 singleton pattern with "with" statement

I want to write a class with singleton pattern to provide some persistent data storage using pickle/dict:
#singleton
class Pdb:
def __init__(self):
self.cache = None
self.dirty = False
try:
with open("data.pck","rb") as fp:
self.cache = pickle.load(fp)
except FileNotFoundError:
pass
except pickle.PickleError:
pass
if self.cache is None:
self.cache = {}
def flush(self):
if self.dirty:
try:
with open("data.pck","wb") as fp:
pickle.dump(self.cache,fp,protocol=4)
except pickle.PickleError:
pass
else:
self.dirty = False
def __del__(self): # PROBLEM HERE
self.flush()
When I was using python 2, I can do it by overriding __del__. But it does not appear to be correct in python 3. How can I do it?
If I do it by "with" statement, I will need to pass the instance to each function that I call:
def func1(db):
db.set(...)
func3(db,x1,x2,...)
with Pdb() as db:
func1(db)
func2(db)
It is complicated. Is there a pythonic way to do a global scope "with" statement?
If I do it by "with" statement, I will need to pass the instance to each function that I call:
No, you don't. Just use your singleton:
# global
db = Pdb()
# any other context
with db:
All that is required is that the expression produces a context manager. Referencing a singleton object with __enter__ and __exit__ methods would satisfy that requirement. You can even ignore the __enter__ return value, as I did above. The global will still be available to all your functions, the only thing that changes is that __enter__ and __exit__ will be called at the appropriate locations.
Note that even in Python 2, you should not rely on __del__ being called. And in the CPython implementation, outside circular references, the rules for when __del__ are called have not changed between Python 2 and 3.

In python, is there a good idiom for using context managers in setup/teardown

I am finding that I am using plenty of context managers in Python. However, I have been testing a number of things using them, and I am often needing the following:
class MyTestCase(unittest.TestCase):
def testFirstThing(self):
with GetResource() as resource:
u = UnderTest(resource)
u.doStuff()
self.assertEqual(u.getSomething(), 'a value')
def testSecondThing(self):
with GetResource() as resource:
u = UnderTest(resource)
u.doOtherStuff()
self.assertEqual(u.getSomething(), 'a value')
When this gets to many tests, this is clearly going to get boring, so in the spirit of SPOT/DRY (single point of truth/dont repeat yourself), I'd want to refactor those bits into the test setUp() and tearDown() methods.
However, trying to do that has lead to this ugliness:
def setUp(self):
self._resource = GetSlot()
self._resource.__enter__()
def tearDown(self):
self._resource.__exit__(None, None, None)
There must be a better way to do this. Ideally, in the setUp()/tearDown() without repetitive bits for each test method (I can see how repeating a decorator on each method could do it).
Edit: Consider the undertest object to be internal, and the GetResource object to be a third party thing (which we aren't changing).
I've renamed GetSlot to GetResource here—this is more general than specific case—where context managers are the way which the object is intended to go into a locked state and out.
How about overriding unittest.TestCase.run() as illustrated below? This approach doesn't require calling any private methods or doing something to every method, which is what the questioner wanted.
from contextlib import contextmanager
import unittest
#contextmanager
def resource_manager():
yield 'foo'
class MyTest(unittest.TestCase):
def run(self, result=None):
with resource_manager() as resource:
self.resource = resource
super(MyTest, self).run(result)
def test(self):
self.assertEqual('foo', self.resource)
unittest.main()
This approach also allows passing the TestCase instance to the context manager, if you want to modify the TestCase instance there.
Manipulating context managers in situations where you don't want a with statement to clean things up if all your resource acquisitions succeed is one of the use cases that contextlib.ExitStack() is designed to handle.
For example (using addCleanup() rather than a custom tearDown() implementation):
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource = stack.enter_context(GetResource())
self.addCleanup(stack.pop_all().close)
That's the most robust approach, since it correctly handles acquisition of multiple resources:
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource1 = stack.enter_context(GetResource())
self._resource2 = stack.enter_context(GetOtherResource())
self.addCleanup(stack.pop_all().close)
Here, if GetOtherResource() fails, the first resource will be cleaned up immediately by the with statement, while if it succeeds, the pop_all() call will postpone the cleanup until the registered cleanup function runs.
If you know you're only ever going to have one resource to manage, you can skip the with statement:
def setUp(self):
stack = contextlib.ExitStack()
self._resource = stack.enter_context(GetResource())
self.addCleanup(stack.close)
However, that's a bit more error prone, since if you add more resources to the stack without first switching to the with statement based version, successfully allocated resources may not get cleaned up promptly if later resource acquisitions fail.
You can also write something comparable using a custom tearDown() implementation by saving a reference to the resource stack on the test case:
def setUp(self):
with contextlib.ExitStack() as stack:
self._resource1 = stack.enter_context(GetResource())
self._resource2 = stack.enter_context(GetOtherResource())
self._resource_stack = stack.pop_all()
def tearDown(self):
self._resource_stack.close()
Alternatively, you can also define a custom cleanup function that accesses the resource via a closure reference, avoiding the need to store any extra state on the test case purely for cleanup purposes:
def setUp(self):
with contextlib.ExitStack() as stack:
resource = stack.enter_context(GetResource())
def cleanup():
if necessary:
one_last_chance_to_use(resource)
stack.pop_all().close()
self.addCleanup(cleanup)
pytest fixtures are very close to your idea/style, and allow for exactly what you want:
import pytest
from code.to.test import foo
#pytest.fixture(...)
def resource():
with your_context_manager as r:
yield r
def test_foo(resource):
assert foo(resource).bar() == 42
The problem with calling __enter__ and __exit__ as you did, is not that you have done so: they can be called outside of a with statement. The problem is that your code has no provision to call the object's __exit__ method properly if an exception occurs.
So, the way to do it is to have a decorator that will wrap the call to your original method in a withstatement. A short metaclass can apply the decorator transparently to all methods named test* in the class -
# -*- coding: utf-8 -*-
from functools import wraps
import unittest
def setup_context(method):
# the 'wraps' decorator preserves the original function name
# otherwise unittest would not call it, as its name
# would not start with 'test'
#wraps(method)
def test_wrapper(self, *args, **kw):
with GetSlot() as slot:
self._slot = slot
result = method(self, *args, **kw)
delattr(self, "_slot")
return result
return test_wrapper
class MetaContext(type):
def __new__(mcs, name, bases, dct):
for key, value in dct.items():
if key.startswith("test"):
dct[key] = setup_context(value)
return type.__new__(mcs, name, bases, dct)
class GetSlot(object):
def __enter__(self):
return self
def __exit__(self, *args, **kw):
print "exiting object"
def doStuff(self):
print "doing stuff"
def doOtherStuff(self):
raise ValueError
def getSomething(self):
return "a value"
def UnderTest(*args):
return args[0]
class MyTestCase(unittest.TestCase):
__metaclass__ = MetaContext
def testFirstThing(self):
u = UnderTest(self._slot)
u.doStuff()
self.assertEqual(u.getSomething(), 'a value')
def testSecondThing(self):
u = UnderTest(self._slot)
u.doOtherStuff()
self.assertEqual(u.getSomething(), 'a value')
unittest.main()
(I also included mock implementations of "GetSlot" and the methods and functions in your example so that I myself could test the decorator and metaclass I am suggesting on this answer)
I'd argue you should separate your test of the context manager from your test of the Slot class. You could even use a mock object simulating the initialize/finalize interface of slot to test the context manager object, and then test your slot object separately.
from unittest import TestCase, main
class MockSlot(object):
initialized = False
ok_called = False
error_called = False
def initialize(self):
self.initialized = True
def finalize_ok(self):
self.ok_called = True
def finalize_error(self):
self.error_called = True
class GetSlot(object):
def __init__(self, slot_factory=MockSlot):
self.slot_factory = slot_factory
def __enter__(self):
s = self.s = self.slot_factory()
s.initialize()
return s
def __exit__(self, type, value, traceback):
if type is None:
self.s.finalize_ok()
else:
self.s.finalize_error()
class TestContextManager(TestCase):
def test_getslot_calls_initialize(self):
g = GetSlot()
with g as slot:
pass
self.assertTrue(g.s.initialized)
def test_getslot_calls_finalize_ok_if_operation_successful(self):
g = GetSlot()
with g as slot:
pass
self.assertTrue(g.s.ok_called)
def test_getslot_calls_finalize_error_if_operation_unsuccessful(self):
g = GetSlot()
try:
with g as slot:
raise ValueError
except:
pass
self.assertTrue(g.s.error_called)
if __name__ == "__main__":
main()
This makes code simpler, prevents concern mixing and allows you to reuse the context manager without having to code it in many places.

Categories

Resources