I start a long file-based db search which should run async and leave the browser side alone for other requests, but it seems that it blocks. What is the problem?
class Handler(tornado.web.RequestHandler):
def initialize(self, param):
self.db = param
#tornado.web.asynchronous
#gen.engine
def post(self):
try:
self.set_status(200)
response = yield gen.Task(self.handleSearch, self.request.arguments)
self.finish(response)
except BaseException, s:
logging.exception(s)
self.finish("Error tonight, cause: %s" % s)
def handleSearch(self, request, callback):
return callback(self.db.createList(request))
In order to use tornado async feature, your functions Needs to be async too,otherwise it's not really async
there are a few libraries for tornado out there, check this out for libraries, but if you didn't find your needed library, another solution would be to use fantastic future
so using future in python your code would be like this
from concurrent.futures import ThreadPoolExecutor
class Handler(tornado.web.RequestHandler):
def initialize(self, param):
self.db = param
#gen.coroutine
def post(self):
self.set_status(200)
with ThreadPoolExecutor(1) as execute:
r = yield execute.submit(self.handleSearch, param=request.arguments)
self.finish(r)
def handleSearch(self, param):
try:
return self.db.createList(param) # or time.sleep(4) (sth which block)
except Exception as e:
return False
I already test it, and it works, it's 100% compatible with tornado so your not going to facing any issues
Related
I'm trying to implement accept / error logging for an asynchronous gRPC clientwith gRPC AsyncIO API. I would like to handle common errors (like StatusCode.UNAVAILABLE) in one place instead of in every request.
It's easy for the synchronous version with response.exception():
class LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor):
def __init__(self, logger: Logger):
self.logger = logger
def intercept_unary_unary(self, continuation, client_call_details, request):
self.logger.debug(f"{request=}")
response = continuation(client_call_details, request)
if response.exception():
self.logger.exception(f"{response.code()}")
return response
But things get more complicated when using an asynchronous interceptor.
I tried to use try / except, expecting await returns a response, but this did not lead to anything, because await of continuation returns an undone UnaryUnaryCall and it has no method .exception:
# this does not work
class LoggingClientInterceptor(grpc.aio.UnaryUnaryClientInterceptor):
def __init__(self, logger: Logger):
self.logger = logger
async def intercept_unary_unary(self, continuation, client_call_details, request):
self.logger.debug(f"{request=}")
try:
response = await continuation(client_call_details, request)
return response
except Exception as exc:
self.logger.exception(f"{exc}")
I can await the response code and compare it with OK, and then throw an exception, but it seems to me that this is somehow the wrong way: what if I want to add another interceptor?
code = await response.code()
if code != grpc.StatusCode.OK:
raise SmthException
I have searched extensively, including the code in the official repository, but have not found good examples for asynchronous interceptors
I will be glad if someone will show me the reference sample.
Having similar problem and adjust the code a little bit.
class LoggingClientInterceptor(grpc.aio.UnaryUnaryClientInterceptor):
def __init__(self, logger: Logger):
self.logger = logger
async def intercept_unary_unary(self, continuation, client_call_details, request):
self.logger.debug(f"{request=}")
try:
undone_call = await continuation(client_call_details, request)
response = await undone_call
return response
except Exception as exc:
self.logger.exception(f"{exc}")
raise exc
There is a tricky post handler, sometimes it can take a lots of time (depending on a input values), sometimes not.
What I want is to write back whenever 1 second passes, dynamically allocating the response.
def post():
def callback():
self.write('too-late')
self.finish()
timeout_obj = IOLoop.current().add_timeout(
dt.timedelta(seconds=1),
callback,
)
# some asynchronous operations
if not self.request.connection.stream.closed():
self.write('here is your response')
self.finish()
IOLoop.current().remove_timeout(timeout_obj)
Turns out I can't do much from within callback.
Even raising an exception is suppressed by the inner context and won't be passed through the post method.
Any other ways to achieve the goal?
Thank you.
UPD 2020-05-15:
I found similar question
Thanks #ionut-ticus, using with_timeout() is much more convenient.
After some tries, I think I came really close to what i'm looking for:
def wait(fn):
#gen.coroutine
#wraps(fn)
def wrap(*args):
try:
result = yield gen.with_timeout(
dt.timedelta(seconds=20),
IOLoop.current().run_in_executor(None, fn, *args),
)
raise gen.Return(result)
except gen.TimeoutError:
logging.error('### TOO LONG')
raise gen.Return('Next time, bro')
return wrap
#wait
def blocking_func(item):
time.sleep(30)
# this is not a Subprocess.
# It is a file IO and DB
return 'we are done here'
Still not sure, should wait() decorator being wrapped in a
coroutine?
Some times in a chain of calls of a blocking_func(), there can
be another ThreadPoolExecutor. I have a concern, would this work
without making "mine" one global, and passing to the
Tornado's run_in_executor()?
Tornado: v5.1.1
An example of usage of tornado.gen.with_timeout. Keep in mind the task needs to be async or else the IOLoop will be blocked and won't be able to process the timeout:
#gen.coroutine
def async_task():
# some async code
#gen.coroutine
def get(self):
delta = datetime.timedelta(seconds=1)
try:
task = self.async_task()
result = yield gen.with_timeout(delta, task)
self.write("success")
except gen.TimeoutError:
self.write("timeout")
I'd advise to use https://github.com/aio-libs/async-timeout:
import asyncio
import async_timeout
def post():
try:
async with async_timeout.timeout(1):
# some asynchronous operations
if not self.request.connection.stream.closed():
self.write('here is your response')
self.finish()
IOLoop.current().remove_timeout(timeout_obj)
except asyncio.TimeoutError:
self.write('too-late')
self.finish()
I am trying to understand how to handle a grpc api with bidirectional streaming (using the Python API).
Say I have the following simple server definition:
syntax = "proto3";
package simple;
service TestService {
rpc Translate(stream Msg) returns (stream Msg){}
}
message Msg
{
string msg = 1;
}
Say that the messages that will be sent from the client come asynchronously ( as a consequence of user selecting some ui elements).
The generated python stub for the client will contain a method Translate that will accept a generator function and will return an iterator.
What is not clear to me is how would I write the generator function that will return messages as they are created by the user. Sleeping on the thread while waiting for messages doesn't sound like the best solution.
This is a bit clunky right now, but you can accomplish your use case as follows:
#!/usr/bin/env python
from __future__ import print_function
import time
import random
import collections
import threading
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
import grpc
from translate_pb2 import Msg
from translate_pb2_grpc import TestServiceStub
from translate_pb2_grpc import TestServiceServicer
from translate_pb2_grpc import add_TestServiceServicer_to_server
def translate_next(msg):
return ''.join(reversed(msg))
class Translator(TestServiceServicer):
def Translate(self, request_iterator, context):
for req in request_iterator:
print("Translating message: {}".format(req.msg))
yield Msg(msg=translate_next(req.msg))
class TranslatorClient(object):
def __init__(self):
self._stop_event = threading.Event()
self._request_condition = threading.Condition()
self._response_condition = threading.Condition()
self._requests = collections.deque()
self._last_request = None
self._expected_responses = collections.deque()
self._responses = {}
def _next(self):
with self._request_condition:
while not self._requests and not self._stop_event.is_set():
self._request_condition.wait()
if len(self._requests) > 0:
return self._requests.popleft()
else:
raise StopIteration()
def next(self):
return self._next()
def __next__(self):
return self._next()
def add_response(self, response):
with self._response_condition:
request = self._expected_responses.popleft()
self._responses[request] = response
self._response_condition.notify_all()
def add_request(self, request):
with self._request_condition:
self._requests.append(request)
with self._response_condition:
self._expected_responses.append(request.msg)
self._request_condition.notify()
def close(self):
self._stop_event.set()
with self._request_condition:
self._request_condition.notify()
def translate(self, to_translate):
self.add_request(to_translate)
with self._response_condition:
while True:
self._response_condition.wait()
if to_translate.msg in self._responses:
return self._responses[to_translate.msg]
def _run_client(address, translator_client):
with grpc.insecure_channel('localhost:50054') as channel:
stub = TestServiceStub(channel)
responses = stub.Translate(translator_client)
for resp in responses:
translator_client.add_response(resp)
def main():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
add_TestServiceServicer_to_server(Translator(), server)
server.add_insecure_port('[::]:50054')
server.start()
translator_client = TranslatorClient()
client_thread = threading.Thread(
target=_run_client, args=('localhost:50054', translator_client))
client_thread.start()
def _translate(to_translate):
return translator_client.translate(Msg(msg=to_translate)).msg
translator_pool = futures.ThreadPoolExecutor(max_workers=4)
to_translate = ("hello", "goodbye", "I", "don't", "know", "why",)
translations = translator_pool.map(_translate, to_translate)
print("Translations: {}".format(zip(to_translate, translations)))
translator_client.close()
client_thread.join()
server.stop(None)
if __name__ == "__main__":
main()
The basic idea is to have an object called TranslatorClient running on a separate thread, correlating requests and responses. It expects that responses will return in the order that requests were sent out. It also implements the iterator interface so that you can pass it directly to an invocation of the Translate method on your stub.
We spin up a thread running _run_client which pulls responses out of TranslatorClient and feeds them back in the other end with add_response.
The main function I included here is really just a strawman since I don't have the particulars of your UI code. I'm running _translate in a ThreadPoolExecutor to demonstrate that, even though translator_client.translate is synchronous, it yields, allowing you to have multiple in-flight requests at once.
We recognize that this is a lot of code to write for such a simple use case. Ultimately, the answer will be asyncio support. We have plans for this in the not-too-distant future. But for the moment, this sort of solution should keep you going whether you're running python 2 or python 3.
In Python I am trying to create an API for a connected device. I want to be available for both threaded (using request) and async applications (using aiohttp).
What I've come up with is wrapping the get method of both requests and aiohttp in a decorator. This decorator is passed at init and API calls are explicitly wrapped using the passed decorator.
It works, but I'd like to know how others think of this approach ? Are there better ways or will I be running into issues later on ?
Any help appreciated !
def threaded_gett(function):
# The threaded decorator
def wrapper(*args, **kwargs):
url, params = function(*args)
response = requests.get(url, params)
_json = response.json()
return function.__self__.process_response(_json)
return wrapper
def async_gett(function):
# The async decorator
def wrapper(*args, **kwargs):
url, params = function(*args)
try:
resp = yield from function.__self__.session.get(url, params=params)
except Exception as ex:
lgr.exception(ex)
else:
_json = yield from resp.json()
yield from resp.release()
return function.__self__.process_response(_json)
# wrapping the decorator in the async coroutine decorator.
wrapper = asyncio.coroutine(wrapper)
return wrapper
class ThreadedApi(BaseApi):
def __init__(self,threaded_gett):
Base.__init(self,threaded_gett)
class AsyncApi(BaseApi):
def __init__(self,async_gett):
Base.__init(self,async_gett)
class BaseApi():
def __init__(self,get_wrapper):
self.status = get_wrapper(self.status)
def status(self):
return <status path>
Your code is not complete but yes, the approach might work in simple cases (when .process_response() is very generic and could be applied to all API calls).
Here is my codeļ¼
#/test
class Test(tornado.web.RequestHandler):
#tornado.web.asynchronous
#tornado.gen.coroutine
def get(self):
res = yield self.inner()
self.write(res)
#tornado.gen.coroutine
def inner(self):
import time
time.sleep(15)
raise tornado.gen.Return('hello')
#/test_1
class Test1(tornado.web.RequestHandler):
#tornado.web.asynchronous
#tornado.gen.coroutine
def get(self):
res = yield self.inner()
self.write(res)
#tornado.gen.coroutine
def inner(self):
raise tornado.gen.Return('hello test1')
When I fetch /test and then fetch /test_1, but /test_1 does not response until /test responsed, how to fixed it?
Don't use time.sleep(). time.sleep() will block cpu loop. Instead, use
yield tornado.gen.Task(tornado.ioloop.IOLoop.instance().add_timeout,
time.time() + sleep_seconds)
You've hit both the frequently-asked questions:
http://www.tornadoweb.org/en/stable/faq.html
First, please don't use time.sleep() in a Tornado application, use gen.sleep() instead. Second, be aware that most browsers won't fetch two pages from the same domain simultaneously: use "curl" or "wget" to test your application instead.