I'm trying to wrap my mapper/reducer functions with something like:
def log_exceptions_to_sentry(sentry_id, raise_exception):
def decorator(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception, e:
client = Client(sentry_id)
client.captureException(
exc_info=sys.exc_info())
if raise_exception:
raise e
return wrapper
return decorator
and so my mapper/reducer functions look like:
#log_exceptions_to_sentry(SENTRY_ID, False)
def my_mapper_fn(item):
logging.info(item)
But it doesn't seem to work. Without the decorator, I'd find INFO logs of item. But if I put the decorator, it seems the mapper/reducer functions don't get called at all.
I was hoping to make it easy to log any errors my functions might have so I can fix them, as trying to track down MapReduce via AppEngine's logs is almost impossible.
I could wrap the entire function body with try ... except block, but a decorator would be cleaner.
I believe you have an issue with the decorator structure. In partuclar, I think you want to replace
try:
return fn(*args, **kwargs)
with
try:
fn(*args, **kwargs)
I'm missing some of the functions to test this, but you can see simplified decorator examples here if you want to run one: http://simeonfranklin.com/blog/2012/jul/1/python-decorators-in-12-steps/
Try something like this to make sure your code works, then try the more complicated parameterized version after:
sentry_id = id
raise_exception = 1
def basic_decorator(function):
global sentry_id, raise_exception
def wrapper(*args,**kwargs):
try:
function(*args,**kwargs)
except Exception, e:
client = Client(sentry_id)
client.captureException(exc_info=sys.exc_info())
if raise_exception:
raise
return wrapper
#basic_decorator
def my_mapper_fn(item):
logging.info(item)
To parameterize sentry_id and raise_exception, wrap the decorator inside another decorator. The idea is that when the basic decorator is defined, sentry_id, raise_exception, and function will be defined ahead of time and enclosed within its scope. This should look something like
def log_exceptions_to_sentry(sentry_id,raise_exception=1):
def basic_decorator(function):
def wrapper(*args, **kwargs):
try:
function(*args,**kwargs)
except Exception, e:
client = Client(sentry_id)
client.captureException(exc_info=sys.exc_info())
if raise_exception:
raise
return wrapper
return basic_decorator
#log_exceptions_to_sentry(SENTRY_ID,RAISE_EXCEPTION)
def my_mapper_fn(item):
logging.info(item)
I don't know what SENTRY_ID or Client is, since you didn't post it. So I made up my own. Using your code exactly, everything appears to work as expected. I'm not sure what you're seeing that isn't working right.
SENTRY_ID = 1
class Client(object):
def __init__(self, sentry_id): pass
def captureException(self, **kwargs):
print('captureException, ', kwargs['exc_info'])
def log_exceptions_to_sentry(sentry_id, raise_exception):
def decorator(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
client = Client(sentry_id)
client.captureException(
exc_info=sys.exc_info())
if raise_exception:
raise e
return wrapper
return decorator
def fn(item):
logging.debug(item)
logging.info(item)
logging.error(item)
#log_exceptions_to_sentry(SENTRY_ID, False)
def my_mapper_fn(item):
logging.debug(item)
logging.info(item)
logging.error(item)
return 1
#log_exceptions_to_sentry(SENTRY_ID, False)
def my_mapper_fn2(item):
raise Exception()
logging.basicConfig(
level = logging.INFO,
format = '%(levelname)s:%(name)s:%(message)s',
#format = '%(message)s',
)
x = fn({'a':1})
print(x)
x = my_mapper_fn({'b':2})
print(x)
x = my_mapper_fn2({'c':3})
print(x)
Output:
INFO:root:{'a': 1}
ERROR:root:{'a': 1}
None
INFO:root:{'b': 2}
ERROR:root:{'b': 2}
1
captureException, (<type 'exceptions.Exception'>, Exception(), <traceback object at 0x1813cf8>)
None
Related
I want to use a decorator (composer) that recevices as parameter n number of decorators, this decorators will be used to decorate a function. Also I want to pass some parameters from two origins, a parameter named "SKIP" in the composer and another parameter named "parameter" sent by the parameter_sender decorator. Here's what I tried:
def compose(*decorators, SKIP=None):
def something(func):
#wraps(func)
def func_wrap(parameter = None, **kwargs):
try:
if SKIP:
print("I'm here")
return func(parameter = parameter,**kwargs)
else:
for decorator in reversed(decorators):
func = decorator(func, parameter = parameter,**kwargs) # --------- This line is providing the error ------------------
return func
raise exception
except Exception as e:
print(e)
raise exception
return func_wrap
return something
And here is an example of where do I want to use it. In this example I want to SKIP the composing of all the decorators if the variable SKIP is true.
#application.route("/function/<id_something>", methods=['GET'])
#parameter_sender
#compose(decorator_1,decorator_2, SKIP=True)
def function (id_something, **kwargs):
try:
#TODO:
return jsonify("ok")
except Exception as e:
print(e)
But i've got an error that says this:
>>I'm here
>>local variable 'func' referenced before assignment
Even when the if statement is working. PD: It works without the line indicated in the composer.
The following code should do the thing.
You were trying to set a value for a variable from outer scope. In my example I used separate temp variable composition.
def compose(*decorators, SKIP=None):
def something(func):
#wraps(func)
def func_wrap(*args, **kwargs):
try:
if SKIP:
print("I'm here")
return func(*args, **kwargs)
else:
composition = func
for decorator in reversed(decorators):
composition = decorator(composition)
return composition(*args, **kwargs)
except Exception as e:
print(e)
raise
return func_wrap
return something
I found myself using the following pattern in my tests quite often:
def test(params):
e_list = []
for p in params:
try:
run_test(p) # Or a block of codes that can continue or break
except Exception as e:
e_list.append(e)
assert isEmpty(e_list), 'error encountered: {}'.format(e_list)
I find myself rewriting this pattern quite often, especially with long code block for the loop that has some flow controls with continue and break. I am wondering if there is a python-ic wrapping to this pattern.
I have thought about a wrapper function like this:
def assert_all_tests(test_list):
e_list = []
for t in test_list:
try:
t()
except Exception as e:
e_list.append(e)
assert isEmpty(e_list), 'error encountered: {}'.format(e_list)
def test(params):
assert_all_tests([functools.partial(run_test, p) for p in params])
But I dislike this approach because it wrapped away the loop. There is no way for callable t to do flow control of the loop with continue or break (there is no loop any more, only a list comprehension).
Another approach is to use a context class like this:
def test(params):
ErrorHandler.clearErrorList()
for p in params:
with ErrorHandler():
run_test(p) # or code block that can continue or break
ErrorHandler.assertEmptyErrorList()
where ErrorHandler would be a class with appropriate __enter__ and __exit__ and keep an error list in a class variable. But I feel that at the test function level, this is not any simpler than the original pattern: since there is no way for a ErrorHandler instance to know when a loop has began and ended, I still have to write the pre- and post- loop fixtures.
I'd like to hear idea of approaches to wrap around this pattern. Thanks.
EDIT
Thank you all for your comments.
New approach inspired by #paul-cornelius's answer
class ResultCollector(object):
def __init__(self, raise_on_error=True):
self.result_list = []
self.raise_on_error = raise_on_error
def do(self, func, *args, **kwds):
'''do can only deal with code block that can be wrapped into a function'''
try:
return func(*args, **kwds)
except Exception as e:
if not isinstance(e, AssertionError) and self.raise_on_error:
raise
self.result_list.append(e.message or e)
else:
self.result_list.append(None)
def assertClean(self):
assert not [x for x in self.result_list if x is not None], 'test results: {}'.format(self.result_list)
def __enter__(self):
self.result_list = []
return self
def __exit__(self, exc_t, exc_i, exc_tb):
if exc_t:
return None
self.assertClean()
return True
def test():
def can_be_refactored_into_func(p):
assert p%3, 'failed {}'.format(p)
def condition_for_skip(p):
return p%2
def condition_for_break(p):
return p>5
with ResultCollector() as rc:
for p in range(10):
if condition_for_skip(p):
rc.result_list.append('skipped {}'.format(p))
continue
if condition_for_break(p):
rc.result_list.append('ended {}'.format(p))
break
rc.do(can_be_refactored_into_func, p)
It works pretty well when the code into loop block can be divided up into functions like above.
How about a little class that only does the one thing you find yourself doing over and over:
class TestTracker:
def __init__(self):
self.error_list = []
def do_test(self, f, p):
try:
f(p)
except Exception as e:
self.error_list.append(e)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is not None:
self.error_list.append(exc_value)
return True
def test(params):
tt = TestTracker()
for p in params:
tt.do_test(run_test, p)
assert isEmpty(tt.error_list), 'error encountered: {}'.format(tt.error_list)
def test2(params):
tt = TestTracker()
for p in params:
with tt:
# a block of code with loop control statements
pass
assert isEmpty(tt.error_list), 'error encountered: {}'.format(tt.error_list)
I modified this answer to make the class a context manager. The test2 shows how that can be used with loop control statements. If no exception is raised within the context, the arguments to __exit__ will be None.
You could even mix with statements and calls to do_test.
Python can do anything!
Edits :
Add some convenience to TestTracker
class TestTracker:
def __init__(self):
self.error_list = []
def do_test(self, f, p):
try:
f(p)
except Exception as e:
self.error_list.append(e)
def __bool__(self):
return len(self.error_list) == 0
def __str__(self):
return 'error encountered: {}'.format(self.error_list)
def test(params):
tt = TestTracker()
for p in params:
tt.do_test(run_test, p)
assert tt, str(tt)
I use pytest and try to raise an exception at testing phase to test whether the exception could be handled by the method. But seems it always pass, even I delete the try...except... block.
This is the class which has an error and has been handled
class SaltConfig(GridLayout):
def check_phone_number_on_first_contact(self, button):
s = self.instanciate_ServerMsg(tt)
try:
s.send()
except HTTPError as err:
print("[HTTPError] : " + str(err.code))
return
# some code when running without error
def instanciate_ServerMsg():
return ServerMsg()
This is the helper class which generates the ServerMsg object used by the former class.
class ServerMsg(OrderedDict):
def send(self,answerCallback=None):
#send something to server via urllib.urlopen
This is my tests code:
class TestSaltConfig:
def test_check_phone_number_on_first_contact(self):
myError = HTTPError(url="http://127.0.0.1", code=500,
msg="HTTP Error Occurs", hdrs="donotknow", fp=None)
mockServerMsg = mock.Mock(spec=ServerMsg)
mockServerMsg.send.side_effect = myError
mockSalt = mock.Mock(spec=SaltConfig)
mockSalt.instanciate_ServerMsg.return_value = mockServerMsg
mockSalt.check_phone_number_on_first_contact(self, "2")
I think the above code doesn't make much sense since I'm actually testing on a mockObject, but the reason is that I don't know how to raise an exception when calling a method when the exception is already handled.
How to solve it? Thanks
Here is an example using decorator instead of Mock library to achieve your purpose.
I slightly modified your code to make it runnable in my environment.
import unittest
def exception_function(f, exception_type):
def exception_fn(*args, **kwargs):
raise exception_type
def fn(*args, **kwargs):
return exception_fn
return fn
def wrap(f, exception_type):
#exception_function(f, exception_type)
def fn(*args, **kwargs):
return f(*args, **kwargs)
return fn
class ServerMsg():
def send(self):
print("send normally")
class SaltConfig():
def check_phone_number_on_first_contact(self):
s = ServerMsg()
try:
s.send()
except ValueError:
print("raise exception")
class TestSaltConfig(unittest.TestCase):
def test_check_phone_number_on_first_contact(self):
s = SaltConfig()
original_method = ServerMsg.send
print (ServerMsg.send) #<unbound method ServerMsg.send>
s.check_phone_number_on_first_contact() #send normally
ServerMsg.send = wrap(ServerMsg.send, ValueError)
print (ServerMsg.send) #<unbound method ServerMsg.exception_fn>
s.check_phone_number_on_first_contact() #raise exception
ServerMsg.send = original_method
print (ServerMsg.send) #<unbound method ServerMsg.send>
In Celery, you can retry any task in case of exception. You can do it like so:
#task(max_retries=5)
def div(a, b):
try:
return a / b
except ZeroDivisionError, exc:
raise div.retry(exc=exc)
In this case, if you want to to divide by zero, task will be retied five times. But you have to check for errors in you code explicitly. Task will not be retied if you skip try-except block.
I want my functions to look like:
#celery.task(autoretry_on=ZeroDivisionError, max_retries=5)
def div(a, b):
return a / b
Celery (since version 4.0) has exactly what you were looking for:
#app.task(autoretry_for=(SomeException,))
def my_task():
...
See: http://docs.celeryproject.org/en/latest/userguide/tasks.html#automatic-retry-for-known-exceptions
I searched this issue for a while, but found only this feature request.
I decide to write my own decorator for doing auto-retries:
def task_autoretry(*args_task, **kwargs_task):
def real_decorator(func):
#task(*args_task, **kwargs_task)
#functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except kwargs_task.get('autoretry_on', Exception), exc:
wrapper.retry(exc=exc)
return wrapper
return real_decorator
With this decorator I can rewriter my previous task:
#task_autoretry(autoretry_on=ZeroDivisionError, max_retries=5)
def div(a, b):
return a / b
I've modified your answer to work with the existing Celery API (currently 3.1.17)
class MyCelery(Celery):
def task(self, *args_task, **opts_task):
def real_decorator(func):
sup = super(MyCelery, self).task
#sup(*args_task, **opts_task)
#functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except opts_task.get('autoretry_on', Exception) as exc:
logger.info('Yo! We did it!')
wrapper.retry(exc=exc, args=args, kwargs=kwargs)
return wrapper
return real_decorator
Then, in your tasks
app = MyCelery()
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
#app.task(autoretry_on=Exception)
def mytask():
raise Exception('Retrying!')
This allows you to add the autoretry_on functionality to your tasks without having to use a separate decorator to define tasks.
Here is an improved version of the existing answers.
This fully implements the Celery 4.2 behaviour (as documented here) but for Celery 3.1.25.
It also doesn't break the different task decorator forms (with/without parentheses) and returns/raises properly.
import functools
import random
from celery.app.base import Celery as BaseCelery
def get_exponential_backoff_interval(factor, retries, maximum, full_jitter=False):
"""
Calculate the exponential backoff wait time.
(taken from Celery 4 `celery/utils/time.py`)
"""
# Will be zero if factor equals 0
countdown = factor * (2 ** retries)
# Full jitter according to
# https://www.awsarchitectureblog.com/2015/03/backoff.html
if full_jitter:
countdown = random.randrange(countdown + 1)
# Adjust according to maximum wait time and account for negative values.
return max(0, min(maximum, countdown))
class Celery(BaseCelery):
def task(self, *args, **opts):
"""
Overridden to add a back-port of Celery 4's `autoretry_for` task args.
"""
super_method = super(Celery, self).task
def inner_create_task_cls(*args_task, **opts_task):
# http://docs.celeryproject.org/en/latest/userguide/tasks.html#Task.autoretry_for
autoretry_for = tuple(opts_task.get('autoretry_for', ())) # Tuple[Type[Exception], ...]
retry_backoff = int(opts_task.get('retry_backoff', False)) # multiplier, default if True: 1
retry_backoff_max = int(opts_task.get('retry_backoff_max', 600)) # seconds
retry_jitter = opts_task.get('retry_jitter', True) # bool
retry_kwargs = opts_task.get('retry_kwargs', {})
def real_decorator(func):
#super_method(*args_task, **opts_task)
#functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
try:
return func(*func_args, **func_kwargs)
except autoretry_for as exc:
if retry_backoff:
retry_kwargs['countdown'] = get_exponential_backoff_interval(
factor=retry_backoff,
retries=wrapper.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter,
)
raise wrapper.retry(exc=exc, **retry_kwargs)
return wrapper
return real_decorator
# handle both `#task` and `#task(...)` decorator forms
if len(args) == 1:
if callable(args[0]):
return inner_create_task_cls(**opts)(*args)
raise TypeError('argument 1 to #task() must be a callable')
if args:
raise TypeError(
'#task() takes exactly 1 argument ({0} given)'.format(
sum([len(args), len(opts)])))
return inner_create_task_cls(**opts)
I have also written some unit tests for this as am using it in my project.
They can be found in this gist but note they are not easily runnable - treat more as documentation of how the above feature works (and validation that it works properly).
Can you suggest a way to code a drop-in replacement for the "with" statement that will work in Python 2.4?
It would be a hack, but it would allow me to port my project to Python 2.4 more nicely.
EDIT:
Removed irrelevant metaclass sketch
Just use try-finally.
Really, this may be nice as a mental exercise, but if you actually do it in code you care about you will end up with ugly, hard to maintain code.
You could (ab)use decorators to do this, I think. The following works, eg:
def execute_with_context_manager(man):
def decorator(f):
target = man.__enter__()
exc = True
try:
try:
f(target)
except:
exc = False
if not man.__exit__(*sys.exc_info()):
raise
finally:
if exc:
man.__exit__(None, None, None)
return None
return decorator
#execute_with_context_manager(open("/etc/motd"))
def inside(motd_file):
for line in motd_file:
print line,
(Well, in Python 2.4 file objects don't have __enter__ and __exit__ methods, but otherwise it works)
The idea is you're replacing the with line in:
with bar() as foo:
do_something_with(foo)
do_something_else_with(foo)
# etc...
with the decorated function "declaration" in:
#execute_with_context_manager( bar() )
def dummyname( foo ):
do_something_with(foo)
do_something_else_with(foo)
# etc...
but getting the same behaviour (the do_something_... code executed). Note the decorator changes the function declaration into an immediate invocation which is more than a little evil.
Since you need to exit the context manager both during errors and not errors, I don't think it's possible to do a generic usecase with metaclasses, or in fact at all. You are going to need try/finally blocks for that.
But maybe it's possible to do something else in your case. That depends on what you use the context manager for.
Using __del__ can help in some cases, like deallocating resource, but since you can't be sure it gets called, it can only be used of you need to release resources that will be released when the program exits. That also won't work if you are handling exceptions in the __exit__ method.
I guess the cleanest method is to wrap the whole context management in a sort of context managing call, and extract the code block into a method. Something like this (untested code, but mostly stolen from PEP 343):
def call_as_context_manager(mgr, function):
exit = mgr.__exit__
value = mgr.__enter__()
exc = True
try:
try:
function(value)
except:
exc = False
if not exit(*sys.exc_info()):
raise
finally:
if exc:
exit(None, None, None)
How about this?
def improvize_context_manager(*args, **kwargs):
assert (len(args) + len(kwargs)) == 1
if args:
context_manager = args[0]
as_ = None
else: # It's in kwargs
(as_, context_manager) = kwargs.items()[0]
def decorator(f):
exit_ = context_manager.__exit__ # Not calling it yet
enter_ = context_manager.__enter__()
exc = True
try:
try:
if as_:
f(*{as_: enter_})
else:
f()
except:
exc = False
if not exit_(*sys.exc_info()):
raise
finally:
if exc:
exit_(None, None, None)
return None
return decorator
Usage:
#improvize_context_manager(lock)
def null():
do(stuff)
Which parallels the with keyword without as.
Or:
#improvize_context_manager(my_lock=lock)
def null(my_lock):
do(stuff_with, my_lock)
Which parallels the with keyword with the as.
If you are OK with using def just to get a block, and decorators that immediately execute, you could use the function signature to get something more natural for the named case.
import sys
def with(func):
def decorated(body = func):
contexts = body.func_defaults
try:
exc = None, None, None
try:
for context in contexts:
context.__enter__()
body()
except:
exc = sys.exc_info()
raise
finally:
for context in reversed(contexts):
context.__exit__(*exc)
decorated()
class Context(object):
def __enter__(self):
print "Enter %s" % self
def __exit__(self, *args):
print "Exit %s(%s)" % (self, args)
x = Context()
#with
def _(it = x):
print "Body %s" % it
#with
def _(it = x):
print "Body before %s" % it
raise "Nothing"
print "Body after %s" % it