Python AsyncHttpClient inside tornado RequestHandler throws exception - python

I'm going to call an endpoint by tornado AsyncHttpClient in RequestHandler, but it throws runtime exception This event loop is already running
class RegistrationHandler(tornado.web.RequestHandler):
def post(self, *args, **kwargs):
call_async_register("some params")
def call_async_register(parameters):
def call():
http_client = AsyncHTTPClient()
future = Future()
http_request = HTTPRequest(url, request_type.name, headers={'X-Peering': '1'}, body=body)
def handle_future(f: Future):
future.set_result(f.result())
fetched_future = http_client.fetch(http_request)
fetched_future.add_done_callback(handle_future)
return future
try:
instance = io_loop.IOLoop.current()
response = instance.run_sync(call)
return response.body.decode()
except Exception as err:
self.logger.exception("Account Request Failed: {}".format(err))
return None

Here's the problem:
instance = io_loop.IOLoop.current()
response = instance.run_sync(call)
run_sync itself tries to start the ioloop. But as apparent from your code, instance is already running. So you get the error.
If you want to send the value returned by call() method back to the user, convert your methods to coroutines (use async/await syntax).
Example:
class RegistrationHandler(tornado.web.RequestHandler):
async def post(self, *args, **kwargs):
response = await call_async_register("some params")
self.write(response)
async def call_async_register(parameters):
http_client = AsyncHTTPClient()
http_request = HTTPRequest(url, request_type.name, headers={'X-Peering': '1'}, body=body)
try:
response = await http_client.fetch(http_request)
return response.body.decode()
except Exception as err:
self.logger.exception("Account Request Failed: {}".format(err))
return None

Related

Pytest raises not working for custom exception

I have the following defined in an exceptions.py file:
class Error(Exception):
"""Base exception raised by api wrapper"""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
# HTTP response exceptions
class ApiBadRequestError(Error):
"""Bad Request –- Incorrect parameters."""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
I then have a function that correctly raises an ApiBadRequestError exception.
In pytest I'm doing the following:
def test_handle_request_response_raises_correct_exception_for_response_code(
self, status_code, exception_type, client, create_response
):
response = create_response(status_code=status_code)
with pytest.raises(ApiBadRequestError) as e:
a = client._check_response_codes(response)
which is failing the test because inside pytest.raises it's doing isintance(e, ApiBadRequestError) which is returning False. If I however change the test to the following:
def test_handle_request_response_raises_correct_exception_for_response_code(
self, status_code, exception_type, client, create_response
):
response = create_response(status_code=status_code)
with pytest.raises(Exception) as e:
a = client._check_response_codes(response)
it passes as the raised exception is seen as an instance of Exception even though it's an ApiBadRequestError
Any help would be really appreciated as I'm completely stumped here.

verify_jwt_in_request() returns None when called in custom Flask Decorator

I am trying to create a custom decorator that makes use of verify_jwt_in_request() from the flask-jwt-extended library. My code is laid out as below:
#app.route("/test-auth", methods=["POST"])
#custom_auth_required
def test_auth():
print(verify_jwt_in_request())
print(get_jwt_identity())
return Response(json.dumps({"test": "test"}), status=HTTP_200_OK,
mimetype='application/json')
def custom_auth_required(f):
#wraps(f)
def decorated(*args, **kwargs):
params = request.json
headers = request.headers
print(verify_jwt_in_request())
print(get_jwt_identity())
try:
if verify_jwt_in_request():
print("validated")
else:
print("invalid")
return f(*args, **kwargs)
except KeyError:
raise AuthError({"code": "something","description": "something else"}, 401)
return decorated
For some reason my prints in both the API and the decorator return None for both verify_jwt_in_request and get_jwt_identity.
Is there something am missing in my code?
verify_jwt_in_request does not return anything. It will raise an appropriate exception if anything in the token decoding chain fails.

threaded and asyncio API library

In Python I am trying to create an API for a connected device. I want to be available for both threaded (using request) and async applications (using aiohttp).
What I've come up with is wrapping the get method of both requests and aiohttp in a decorator. This decorator is passed at init and API calls are explicitly wrapped using the passed decorator.
It works, but I'd like to know how others think of this approach ? Are there better ways or will I be running into issues later on ?
Any help appreciated !
def threaded_gett(function):
# The threaded decorator
def wrapper(*args, **kwargs):
url, params = function(*args)
response = requests.get(url, params)
_json = response.json()
return function.__self__.process_response(_json)
return wrapper
def async_gett(function):
# The async decorator
def wrapper(*args, **kwargs):
url, params = function(*args)
try:
resp = yield from function.__self__.session.get(url, params=params)
except Exception as ex:
lgr.exception(ex)
else:
_json = yield from resp.json()
yield from resp.release()
return function.__self__.process_response(_json)
# wrapping the decorator in the async coroutine decorator.
wrapper = asyncio.coroutine(wrapper)
return wrapper
class ThreadedApi(BaseApi):
def __init__(self,threaded_gett):
Base.__init(self,threaded_gett)
class AsyncApi(BaseApi):
def __init__(self,async_gett):
Base.__init(self,async_gett)
class BaseApi():
def __init__(self,get_wrapper):
self.status = get_wrapper(self.status)
def status(self):
return <status path>
Your code is not complete but yes, the approach might work in simple cases (when .process_response() is very generic and could be applied to all API calls).

Python Tornado gen.engine exception handling

I am using Tornado 2.4, and I am trying to integrate async call.
Lets say I need to access to a remote resource through a HTTP call, so I made this function in a tornado.web.RequestHandler:
#tornado.web.asynchronous
def get(self, *args):
try:
self.remote_call()
return 'OK'
except Exception, e:
self.handle_exception(e)
#gen.engine
def remote_call(self):
http_client = httpclient.AsyncHTTPClient()
response = yield gen.Task(http_client.fetch, 'http://google.com')
self.process(response)
So my problem is, since remote_call is yielding a Task, it will obviously exit the remote_call function and continue the get function. Then when my task is complete, the engine will process the response.
But if an error happen in the self.process(response), it will not be catch by my except, since this part of the code is not actually called here, but inside the engine where I do have no control.
So my question is, can I have some control on this engine? Can I handle error, can I ask to perform some specific task at the end the function?
I could do this directly in the function like this
#tornado.web.asynchronous
def get(self, *args):
self.remote_call()
return 'OK'
#gen.engine
def remote_call(self):
http_client = httpclient.AsyncHTTPClient()
response = yield gen.Task(http_client.fetch, 'http://google.com')
try:
self.process(response)
except:
self.handle_exception(e)
But I want to make the handle exception generic and not copy pasting this on every of my Handler.
So do I have a way to access to the engine of Tornado?
Note that I am using Tornado 2.4 but I can migrate to 3.0 if needed.
Thanks
You can handle it in 2.4 by decorating your get call with #gen.engine, wrapping the call to self.remote_call in a gen.Task, and then yielding from that:
#tornado.web.asynchronous
#gen.engine
def get(self, *args):
try:
yield gen.Task(self.remote_call)
except Exception, e:
self.handle_exception(e)
self.finish() # Make sure you call this when `get` is asynchronous.
#gen.engine
def remote_call(self):
http_client = httpclient.AsyncHTTPClient()
response = yield gen.Task(http_client.fetch, 'http://google.com')
self.process(response)
This will allow you to handle the exception in get, though you'll still see a traceback from the exception being raise in remote_call.
However, I highly recommend you upgrade. Tornado is now on version 4.0. With 3.0 or later, you can use gen.coroutine instead of gen.engine and web.asynchronous:
#gen.coroutine
def get(self, *args):
try:
yield self.remote_call()
except Exception, e:
self.handle_exception(e)
self.finish()
#gen.coroutine
def remote_call(self):
http_client = httpclient.AsyncHTTPClient()
response = yield http_client.fetch('http://google.com')
self.process(response)
coroutine properly supresses the traceback from any exception thrown in remote_call, as well as letting you handle it in get.
Ok thanks it works. I had to do this however:
#tornado.web.asynchronous
#gen.engine
def get(self, *args):
try:
yield gen.Task(lambda cb: self.remote_call())
except Exception, e:
self.handle_exception(e)
self.finish() # Make sure you call this when `get` is asynchronous.
#gen.engine
def remote_call(self):
http_client = httpclient.AsyncHTTPClient()
response = yield gen.Task(http_client.fetch, 'http://google.com')
self.process(response)

Refactoring a Tornado Request Handler

So I have repeating code that I do for many GETs -- checking whether the response was cached previously and returning that if it is available.
The code I'd like to get working looks like this:
class Handler(web.RequestHandler):
#gen.coroutine
def get_cache(self):
try:
response = yield gen.Task(get_redis)
except:
logging.log()
if response:
self.finish(response)
raise gen.Return()
#gen.coroutine
#asynchronous
def get(self):
self.get_cache()
response = do_sql_get()
self.set_cache(key, response)
self.finish(response)
What's happening now is that it gets the cache if there but continues running the rest of the code in self.get. That it does this makes sense to me, but I'm not sure how to refactor it properly with it stopping as soon as self.finish is called in the self.get_cache method.
get_cache should return a value that indicates whether it finished the request or not (or it should return the cached data and leave it to the caller to finish the request). I would do one of the following:
#gen.coroutine
def serve_from_cache(self):
response = yield gen.Task(get_redis)
if response:
self.finish(response)
raise gen.Return(True)
else:
raise gen.Return(False)
#gen.coroutine
def get(self):
if (yield self.serve_from_cache()):
return
# do work
yield self.set_cache(...)
or
#gen.coroutine
def get_cache(self):
return yield gen.Task(get_redis)
#gen.coroutine
def get(self):
resp = yield self.get_cache()
if resp:
self.finish(resp)
return
# do work...

Categories

Resources