I'm on python 2.7, tornado 4.5
The following code doesn't work: the except block doesn't get triggered. I don't understand why?
#gen.coroutine
def co_do_thing():
yield gen.Task(do_thing)
def do_thing(callback):
try:
a, b = ...
result = maybe_throw(a, b, callback)
except Exception as e:
# this block is not called
if a:
raise ApiError("called with A")
elif b:
raise ApiError("called with B")
else:
raise e
def maybe_throw(arg1, arg2, callback):
if random.random() < 0.5:
raise AssertionError("yikes")
callback("done")
Instead, I can catch the exception in co_do_thing around the call to gen.Task; but then I don't have the context of how I called maybe_throw. In my case, it makes more sense for maybe_throw to raise a lower-level exception, and for the caller to convert that to a human-readable error depending on the inputs.
Do I just need to refactor this to call gen.Task at a lower level? That would be annoying :/
As I tested it seems to work, a exception is raised. Below simple test suite:
import q # q.py is the file with question's code
import unittest
from mock import patch, Mock
from tornado.testing import gen_test, AsyncTestCase
class MyTest(AsyncTestCase):
def setUp(self):
self.mock_random = patch('q.random').start()
AsyncTestCase.setUp(self)
def tearDown(self):
AsyncTestCase.tearDown(self)
patch.stopall()
#gen_test
def test_no_error(self):
self.mock_random.return_value = 0.7
res = yield q.co_do_thing()
self.assertEqual(res, 'done')
#gen_test
def test_exception(self):
self.mock_random.return_value = 0.1
with self.assertRaises(Exception) as ctx:
yield q.co_do_thing()
self.assertEqual(ctx.exception.message, 'called with A')
if __name__ == '__main__':
unittest.main()
And tests passed:
..
----------------------------------------------------------------------
Ran 2 tests in 0.002s
OK
And here is q.py, I've added return statement to test it.
from random import random
from tornado import gen
#gen.coroutine
def co_do_thing():
res = yield gen.Task(do_thing)
# added: to enable to test it meaningfully
raise gen.Return(res)
def do_thing(callback):
try:
a, b = 22, 33
result = maybe_throw(a, b, callback)
except Exception as e:
if a:
raise Exception("called with A")
elif b:
raise Exception("called with B")
else:
raise e
def maybe_throw(arg1, arg2, callback):
if random() < 0.5:
raise AssertionError("yikes")
callback("done")
Related
Currently I have a piece of code that operates as such:
try:
function1()
except:
function2()
except:
function3()
But I get thrown the error: default 'except:' must be last. Basically if function1 fails, try function2. If function 2 fails, try function 3.
How would I achieve this?
You have to nest the exception handlers to match your handling logic.
try:
function1()
except:
try:
function2()
except:
function3()
If you control the function code, then I suggest that you have each one return a status code ... say, 0 for success and -1 for failure.
for f in [function1, function2, function3]:
if f(): break
Does that handle your use case? You keep executing functions until one succeeds.
If you would like to create longer chains of error handlers you might consider using a decorator.
def try_except_decorator(except_func):
def decorator(try_func):
def wrapped():
try:
try_func()
except:
except_func()
return wrapped
return decorator
def function3():
print('function3')
#try_except_decorator(function3)
def function2():
print('function2')
raise ValueError
#try_except_decorator(function2)
def function1():
print('function1')
raise ValueError
function1()
Assuming you don't control functionX, you could write a class that generically wraps functions and suppresses exceptions. This is similar to the decorator idea, but if you can change the source to add the decorator... why not just change the function itself?
class Supressor:
def __init__(self, func, exceptions=(Exception,), exc_val=None):
self._func = func
self._exc = exceptions
self._exc_val = exc_val
try:
self.__doc__ = func.__doc__
except AttributeError:
passs
def __call__(self, *args, **kw):
try:
retval = self._func(*args, **kw)
except Exception as e:
for okay in self._exc:
if isinstance(e, okay):
return self._exc_val
else:
raise
# defined in other module but placed here for test
def function1():
print('f1')
raise ValueError()
return 1
def function2():
print('f2')
raise KeyError()
return 2
def function3():
print('made it')
return 3
function1 = Supressor(function1)
function2 = Supressor(function2)
function3 = Supressor(function3)
if function1() is None:
if function2() is None:
function3()
I have a thread object that I can't distribute across a ProcessPoolExecutor, but would like to return a future. If I already have a future, is there a way to apply to its completed value, eg, Future a -> (a -> b) -> Future b?
import concurrent.futures
import threading
def three(x):
return 2+x
if __name__ == '__main__':
trackedItem = (3, threading.Event())
pool = concurrent.futures.ProcessPoolExecutor(3)
poolJob = (q.submit(three, trackedItem[0]),trackedItem[1]) #(Future(int), Event)
*** something magic goes here ***
#Trying to transform it into Future(int,Event)
Here's a way which uses a simpler setup code, without threading.Event as that doesn't seem necessary to solve the problem. Basically, you can create future_b as a new Future() yourself, and use the add_done_callback method on future_a to set the result of future_b. Here, func_a is the computation to compute the result of future_a, and func_b is the computation to compute the result of future_b using the result of future_a.
from concurrent.futures import ProcessPoolExecutor, Future
def func_a(x):
return 2 + x
def func_b(x):
return 10 * x
if __name__ == '__main__':
pool = ProcessPoolExecutor(3)
future_a = pool.submit(func_a, 3)
future_b = Future()
future_b.set_running_or_notify_cancel()
def callback(f):
x = f.result()
y = func_b(x)
future_b.set_result(y)
future_a.add_done_callback(callback)
print(future_b.result()) # 50
If you want a helper function to do this, you can write one: map_future takes a future and a mapping function, and returns the new mapped future as required. This version handles an exception in case f.result() or func_b throws one:
def map_future(future_a, func):
future_b = Future()
future_b.set_running_or_notify_cancel()
def callback(f):
try:
x = f.result()
y = func(x)
future_b.set_result(y)
except Exception as e:
future_b.set_exception(e)
future_a.add_done_callback(callback)
return future_b
Caveats: this goes against the advice in the documentation for the Future class, which says:
Future instances are created by Executor.submit() and should not be created directly except for testing.
Also, if you have any errors which aren't subclasses of Exception in the callback, they will be "logged and ignored" according to the docs. I've chosen to only catch Exception in this code for simplicity, but you might prefer the sys.exc_info()[0] way of catching every possible thing that could be raised.
#kaya3 provided a great answer but I ran into problem when adding exception handling for it to close the pool. You can find my example cpchung_example below to see how to compose future functionally. It still remains to add exception-handling to it that I dont have a good solution yet.
For comparison, I put them all into one file:
from concurrent.futures import ProcessPoolExecutor, Future
from concurrent.futures.thread import ThreadPoolExecutor
def map_future(future_a, func):
future_b = Future()
future_b.set_running_or_notify_cancel()
def callback(f):
try:
x = f.result()
y = func(x)
future_b.set_result(y)
except Exception as e:
future_b.set_exception(e)
future_a.add_done_callback(callback)
return future_b
def func_a(x):
return 2 + x
def func_b(x):
return 3 * x
def func_c(x):
raise NameError('Hi There')
return 4 * x
def kaya3_example():
future_a = pool.submit(func_a, 3)
future_b = Future()
future_b.set_running_or_notify_cancel()
def callback(f):
x = f.result()
y = func_b(x)
future_b.set_result(y)
future_a.add_done_callback(callback)
print(future_b.result()) # 50
def exception_handling():
try:
future_a = pool.submit(func_a, 3)
future_b = map_future(future_a, func_b)
future_c = map_future(future_b, func_c)
print(future_c.result())
except Exception as e:
pool.shutdown()
pool.shutdown()
def f(x, y):
return x * y
def cpchung_example():
with ThreadPoolExecutor(max_workers=1) as executor:
a = executor.submit(f, 2, 3)
b = executor.submit(f, 4, 5)
c = executor.submit(f, a.result(), b.result())
print(c.result())
if __name__ == '__main__':
pool = ProcessPoolExecutor(3)
kaya3_example()
cpchung_example()
# exception_handling() # not working, still wip
I'm new to python and trying to remove/trim gevent stacktrace output when an exception is raised. I read somewhere that I can make it happen by using AsyncResult, however it seems like I can't figure out how to use this.
Here is an example I started with and iterated over to make it similar to the real code I'm troubleshooting, but I got stuck in the last phase when I tried to add my_decor to work().
Any help fixing this is much appreciated.
from gevent.event import AsyncResult
import gevent
from functools import wraps
def my_decor(k, *args, **kwargs):
#wraps(k)
def wrapper(*args, **kwargs):
r = AsyncResult()
try:
value = k()
except Exception as e:
r.set_exception(e)
else:
r.set(value)
return r.exception or r.value
result = gevent.spawn(wrapper, k)
return result
def f():
def foo():
if True:
raise Exception('tttttttt')
return foo
def p():
def bar():
if True:
raise Exception('ppppppppppppp')
return bar
#my_decor
def work():
foo1 = gevent.spawn(f())
bar1 = gevent.spawn(p())
gevent.joinall([foo1, bar1])
return foo1.get() or bar1.get()
Found the answer, figured it might be a help to those with the same problem.
from gevent.event import AsyncResult
import gevent
from functools import wraps
def my_decor(k):
#wraps(k)
def wrapper(*args, **kwargs):
r = AsyncResult()
try:
value = k(*args, **kwargs)
except Exception as e:
r.set_exception(e)
else:
r.set(value)
return r.exception or r.value
return wrapper
def f(msg):
#my_decor
def foo():
if True:
raise Exception('tttttttt %s' % msg)
# print('test')
return foo
def p(msg):
#my_decor
def bar():
if True:
raise Exception('ppppppppppppp %s', msg)
return bar
def work():
test = "test"
seti = "set"
foo1 = gevent.spawn(f(test)) # returns a function that coroutine uses
bar1 = gevent.spawn(p(seti))
gevent.joinall([foo1, bar1])
return foo1.get() or bar1.get()
res = work()
print res
In the following code, I'm trying to create a class 'TimedExecutor' which would stop the execution of the function(bar) passed to its method 'execute' if exceeds a certain time limit. But, the program execution doesn't stop, even though the error message is displayed.
Note: We must not make any changes to the function bar(), as it is provided by an external module.
import signal
import time
class MyError(Exception):
"""Base error"""
class MyInheritedError(MyError):
"""Class to inherit from base error"""
class TimeoutListener(object):
def __init__(self, timeout_seconds, error_message="Timeout executing."):
self.timeout_seconds = timeout_seconds
self.error_message = error_message
self.alarm = None
def __enter__(self):
signal.signal(signal.SIGALRM, self._handle_timeout)
signal.alarm(self.timeout_seconds)
def __exit__(self, listener_type, value, traceback):
# Disable the alarm.
if self.alarm:
self.alarm = None
else:
signal.alarm(0)
def _handle_timeout(self, signum, frame):
print("Got the signum %s with frame: %s" % (signum, frame))
raise MyInheritedError(self.error_message + "aditya")
class TimedExecutor(object):
#staticmethod
def execute(timeout_secs, functor, *args, **kwargs):
msg = "Timeout executing method - %s." % functor.__name__
timeout_signal = TimeoutListener(timeout_secs, error_message=msg)
try:
with timeout_signal:
output = functor(*args, **kwargs)
except MyInheritedError as ex:
print("%s did not complete in %s: %s."
% (functor.__name__, timeout_secs, repr(ex)))
raise
return output
def bar():
for _ in range(5):
try:
time.sleep(1)
print("SLEEPING")
except MyInheritedError as ex:
print ex
ob = TimedExecutor.execute(2, bar)
Your functor is swallowing the exception you intend to be fatal.
It is bar()'s except clause in one of its loops that prints and then discards the error raised by the TimeoutListener context manager. Then the loop resumes.
bar() should probably not be aware of the exception your TimedExecutor can raise. Instead, bar()'s caller who invokes .execute() should be aware of it:
from aditya.utils import TimedExecutor, TimeoutException
...
try:
TimedExecutor.execute(2, bar)
except TimeoutException:
print("Timed out executing bar")
I have a callback chain with an errback at the end. If any of the callbacks fail, I need to pass an object to be used on errBack.
How can I pass an object from callback to the errback?
The following code exemplifies what I want to do:
from twisted.internet.defer import FAILURE
from twisted.internet import defer
class CodMsg(object):
def __init__(self, code, msg):
self.code = code
self.msg = msg
class Resource(object):
#classmethod
def checkCondition(cls, result):
if result == "error":
cdm = CodMsg(1, 'Error 1')
raise FAILURE, cdm
else:
return "ok"
#classmethod
def erBackTst (cls, result):
####### How to get the value of cdm here? ######## <<<===
print 'Error:'
print result
return result
d = defer.Deferred()
d.addCallback(Resource.checkCondition)
d.addErrback(Resource.erBackTst)
d.callback("error")
print d.result
In this case you can just raise an exception, containing all info you need
For example:
from twisted.internet import defer
class MyCustomException(Exception):
def __init__(self, msg, code):
self.code = code
self.message = msg
def callback(result):
print result
raise MyCustomException('Message', 23)
def errback(failure):
# failure.value is an exception instance that you raised in callback
print failure.value.message
print failure.value.code
d = defer.Deferred()
d.addCallback(callback)
d.addErrback(errback)
d.callback("error")
Also for better understanding deffereds and async programming you can read this nice twisted tutorial http://krondo.com/an-introduction-to-asynchronous-programming-and-twisted/.
It uses a little bit outdated twisted version in examples but it is still an exellent source to start learning twisted