I try to call multiple pykafka consumer function using async. However, the first pykafka consumer function will block the other function from working.
The QueueConsumer lib:
import json
from pykafka import KafkaClient
import configparser
import asyncio
class QueueConsumer(object):
def __init__(self):
config = configparser.ConfigParser()
config.read('config.ini')
self.config = config
async def test(self):
defaultTopic = 'test'
client = KafkaClient(hosts=self.config['kafka']['host'])
topic = client.topics[defaultTopic.encode('utf-8')]
consumer = topic.get_simple_consumer()
# msg = next(consumer)
for message in consumer:
print(defaultTopic+' '+message.value.decode("utf-8"))
async def coba(self):
defaultTopic = 'coba'
client = KafkaClient(hosts=self.config['kafka']['host'])
topic = client.topics[defaultTopic.encode('utf-8')]
consumer = topic.get_simple_consumer()
# msg = next(consumer)
for message in consumer:
print(defaultTopic+' '+message.value.decode("utf-8"))
Then I call those function using:
import asyncio
queueConsumer = QueueConsumer()
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(
queueConsumer.test(),
queueConsumer.coba(),
))
loop.close()
The result will only return queue message from topic 'test' only.
Edit:
I try to add another function
async def factorial(self, name, number):
f = 1
for i in range(2, number+1):
print("Task %s: Compute factorial(%s)..." % (name, i))
await asyncio.sleep(1)
f *= i
print("Task %s: factorial(%s) = %s" % (name, number, f))
And then called like:
queueConsumer.test(),
queueConsumer.coba(),
queueConsumer.factorial('a',3),
queueConsumer.factorial('b',5),
queueConsumer.factorial('c',7),
Some print from factorial function is executed. But when print from either test or coba is called, then it just stop the others.
SimpleConsumer.consume is a blocking call, so you'll need to adjust your code to periodically poll for new messages while relinquishing control between polls for other asynchronous operations to take over. One way this could be accomplished is with the use_greenlets=True kwarg on KafkaClient, relying on gevent to handle control flow between multiple asynchronous operations.
Related
I have some HTML pages that I am trying to extract the text from using asynchronous web requests through aiohttp and asyncio, after extracting them I save the files locally. I am using BeautifulSoup(under extract_text()), to process the text from the response and extract the relevant text within the HTML page(exclude the code, etc.) but facing an issue where my synchronous version of the script is faster than my asynchronous + multiprocessing.
As I understand, using the BeautifulSoup function causes the main event loop to block within parse(), so based on these two StackOverflow questions[0, 1], I figured the best thing to do was to run the extract_text() within its own process(as its a CPU task) which should prevent the event loop from blocking.
This results in the script taking 1.5x times longer than the synchronous version(with no multiprocessing).
To confirm that this was not an issue with my implementation of the asynchronous code, I removed the use of the extract_text() and instead saved the raw text from the response object. Doing this resulted in my asynchronous code being much faster, showcasing that the issue is purely from the extract_text() being run on a separate process.
Am I missing some important detail here?
import asyncio
from asyncio import Semaphore
import json
import logging
from pathlib import Path
from typing import List, Optional
import aiofiles
from aiohttp import ClientSession
import aiohttp
from bs4 import BeautifulSoup
import concurrent.futures
import functools
def extract_text(raw_text: str) -> str:
return " ".join(BeautifulSoup(raw_text, "html.parser").stripped_strings)
async def fetch_text(
url: str,
session: ClientSession,
semaphore: Semaphore,
**kwargs: dict,
) -> str:
async with semaphore:
response = await session.request(method="GET", url=url, **kwargs)
response.raise_for_status()
logging.info("Got response [%s] for URL: %s", response.status, url)
text = await response.text(encoding="utf-8")
return text
async def parse(
url: str,
session: ClientSession,
semaphore: Semaphore,
**kwargs,
) -> Optional[str]:
try:
text = await fetch_text(
url=url,
session=session,
semaphore=semaphore,
**kwargs,
)
except (
aiohttp.ClientError,
aiohttp.http_exceptions.HttpProcessingError,
) as e:
logging.error(
"aiohttp exception for %s [%s]: %s",
url,
getattr(e, "status", None),
getattr(e, "message", None),
)
except Exception as e:
logging.exception(
"Non-aiohttp exception occured: %s",
getattr(e, "__dict__", None),
)
else:
loop = asyncio.get_running_loop()
with concurrent.futures.ProcessPoolExecutor() as pool:
extract_text_ = functools.partial(extract_text, text)
text = await loop.run_in_executor(pool, extract_text_)
logging.info("Found text for %s", url)
return text
async def process_file(
url: dict,
session: ClientSession,
semaphore: Semaphore,
**kwargs: dict,
) -> None:
category = url.get("category")
link = url.get("link")
if category and link:
text = await parse(
url=f"{URL}/{link}",
session=session,
semaphore=semaphore,
**kwargs,
)
if text:
save_path = await get_save_path(
link=link,
category=category,
)
await write_file(html_text=text, path=save_path)
else:
logging.warning("Text for %s not found, skipping it...", link)
async def process_files(
html_files: List[dict],
semaphore: Semaphore,
) -> None:
async with ClientSession() as session:
tasks = [
process_file(
url=file,
session=session,
semaphore=semaphore,
)
for file in html_files
]
await asyncio.gather(*tasks)
async def write_file(
html_text: str,
path: Path,
) -> None:
# Write to file using aiofiles
...
async def get_save_path(link: str, category: str) -> Path:
# return path to save
...
async def main_async(
num_files: Optional[int],
semaphore_count: int,
) -> None:
html_files = # get all the files to process
semaphore = Semaphore(semaphore_count)
await process_files(
html_files=html_files,
semaphore=semaphore,
)
if __name__ == "__main__":
NUM_FILES = # passed through CLI args
SEMAPHORE_COUNT = # passed through CLI args
asyncio.run(
main_async(
num_files=NUM_FILES,
semaphore_count=SEMAPHORE_COUNT,
)
)
SnakeViz charts across 1000 samples
Async version with extract_text and multiprocessing
Async version without extract_text
Sync version with extract_text(notice how the html_parser from BeautifulSoup takes up the majority of the time here)
Sync version without extract_text
Here is roughly what your asynchronous program does:
Launch num_files parse() tasks concurrently
Each parse() task creates its own ProcessPoolExecutor and asynchronously awaits for extract_text (which is executed in the previously created process pool).
This is suboptimal for several reasons:
It creates num_files process pools, which are expensive to create and takes memory
Each pool is only used for one single operation, which is counterproductive: as many concurrent operations as possible should be submitted to a given pool
You are creating a new ProcessPoolExecutor each time the parse() function is called. You could try to instantiate it once (as a global for instance, of passed through a function argument):
from concurrent.futures import ProcessPoolExecutor
async def parse(loop, executor, ...):
...
text = await loop.run_in_executor(executor, extract_text)
# and then in `process_file` (or `process_files`):
async def process_file(...):
...
loop = asyncio.get_running_loop()
with ProcessPoolExecutor() as executor:
...
await process(loop, executor, ...)
I benchmarked the overhead of creating a ProcessPoolExecutor on my old MacBook Air 2015 and it shows that it is quite slow (almost 100 ms for pool creation, opening, submit and shutdown):
from time import perf_counter
from concurrent.futures import ProcessPoolExecutor
def main_1():
"""Pool crated once"""
reps = 100
t1 = perf_counter()
with ProcessPoolExecutor() as executor:
for _ in range(reps):
executor.submit(lambda: None)
t2 = perf_counter()
print(f"{(t2 - t1) / reps * 1_000} ms") # 2 ms/it
def main_2():
"""Pool created at each iteration"""
reps = 100
t1 = perf_counter()
for _ in range(reps):
with ProcessPoolExecutor() as executor:
executor.submit(lambda: None)
t2 = perf_counter()
print(f"{(t2 - t1) / reps * 1_000} ms") # 100 ms/it
if __name__ == "__main__":
main_1()
main_2()
You may again hoist it up in the process_files function, which avoid recreating the pool for each file.
Also, try to inspect more closely your first SnakeViz chart in order to know what exactly in process.py:submit is taking that much time.
One last thing, be careful of the semantics of using a context manager on an executor:
from concurrent.futures import ProcessPoolExecutor
with ProcessPoolExecutor() as executor:
for i in range(100):
executor.submit(some_work, i)
Not only this creates and executor and submit work to it but it also waits for all work to finish before exiting the with statement.
I want to use grpc-python in the following scenario, but I don' t know how to realize it.
The scenario is that, in the python server, it uses class to calculate and update the instance' s state, then sends such state to corresponding client; in the client side, more than one clients need to communicate with the server to get its one result and not interfered by others.
Specifically, suppose there is a class with initial value self.i =0, then each time the client calls the class' s update function, it does self.i=self.i+1 and returns self.i. Actually there are two clients call such update function simultaneously, like when client1 calls update at third time, client2 calls update at first time.
I think this may can be solved by creating thread for each client to avoid conflict. If the new client calls, new thead will be created; if existing client calls, existing thread will be used. But I don' t know how to realize it?
Hope you can help me. Thanks in advance.
I think I solved this problem by myself. If you have any other better solutions, you can post here.
I edited helloworld example in grpc-python introduction to explain my aim.
For helloworld.proto
syntax = "proto3";
option java_multiple_files = true;
option java_package = "io.grpc.examples.helloworld";
option java_outer_classname = "HelloWorldProto";
option objc_class_prefix = "HLW";
package helloworld;
// The greeting service definition.
service Greeter {
// Sends a greeting
rpc SayHello (HelloRequest) returns (HelloReply) {}
rpc Unsubscribe (HelloRequest) returns (HelloReply) {}
}
// The request message containing the user's name.
message HelloRequest {
string name = 1;
}
// The response message containing the greetings
message HelloReply {
string message = 1;
}
I add Unsubsribe function to allow one specific client to diconnect from server.
In hello_server.py
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
import threading
from threading import RLock
import time
from concurrent import futures
import logging
class Calcuate:
def __init__(self):
self.i = 0
def add(self):
self.i+=1
return self.i
class PeerSet(object):
def __init__(self):
self._peers_lock = RLock()
self._peers = {}
self.instances = {}
def connect(self, peer):
with self._peers_lock:
if peer not in self._peers:
print("Peer {} connecting".format(peer))
self._peers[peer] = 1
a = Calcuate()
self.instances[peer] = a
output = a.add()
return output
else:
self._peers[peer] += 1
a = self.instances[peer]
output = a.add()
return output
def disconnect(self, peer):
print("Peer {} disconnecting".format(peer))
with self._peers_lock:
if peer not in self._peers:
raise RuntimeError("Tried to disconnect peer '{}' but it was never connected.".format(peer))
del self._peers[peer]
del self.instances[peer]
def peers(self):
with self._peers_lock:
return self._peers.keys()
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def __init__(self):
self._peer_set = PeerSet()
def _record_peer(self, context):
return self._peer_set.connect(context.peer())
def SayHello(self, request, context):
output = self._record_peer(context)
print("[thread {}] Peers: {}, output: {}".format(threading.currentThread().ident, self._peer_set.peers(), output))
time.sleep(1)
return helloworld_pb2.HelloReply(message='Hello, {}, {}!'.format(request.name, output))
def Unsubscribe(self, request, context):
self._peer_set.disconnect(context.peer())
return helloworld_pb2.HelloReply(message='{} disconnected!'.format(context.peer()))
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
serve()
The use of context.peer() is adapted from Richard Belleville' s answer in this post. You can change add() function to any other functions that can be used to update instance' s state.
In hello_client.py
from __future__ import print_function
import logging
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('localhost:50051') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='Tom'))
print("Greeter client received: " + response.message)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='Jerry'))
print("Greeter client received: " + response.message)
stub.Unsubscribe(helloworld_pb2.HelloRequest(name="end"))
if __name__ == '__main__':
logging.basicConfig()
run()
If we run serveral hello_client.py simultaneously, the server can distinguish the different clients and send correct corresponding info to them.
I have a async_receive method of Eventhub developed in python and also has a checkpoint with it. The code was taken from the official Eventhub sample github repo.
Problem- Using the above-mentioned code, I am just able to receive 20-35 messages per minute if I keep the receiver on for the whole day whereas my Eventhub has a lot of stream data ingested (~200 messages per Minute). The enqueued time at eventhub for a message is now lagging behind by 90 minutes due to poor throughput at the receiver's end which means that the data that got enqueued at X minute in the Eventhub got pulled out of it at X+90 minutes
Investigation- I tried to look at the receive subclass in the Eventhub python SDK and came across a prefetch parameter (line 318) which is set to 300 by default. If this is already set to 300 then I should be able to pull more than 30-35 messages by default.
Any idea on how can I increase my pull capacity? I'm stuck at this point and have no direction forward, any help is highly appreciated.
EDIT 1-
I'm now attaching my Python Code as shown below-
import asyncio
import json
import logging
import os
import sys
import time
from datetime import date
import requests
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub.extensions.checkpointstoreblobaio import BlobCheckpointStore
import log_handler
import threading
import traceback
try:
## Set env variables
CONNECTION_STR = os.environ["ECS"].strip()
EVENTHUB_NAME = os.environ['EN'].strip()
EVENTHUB_CONSUMER = os.environ["EC"].strip()
API = os.environ['API_variable'].strip()
AZURE_BLOB_CONNECTION_STR = os.environ["ACS"].strip()
BLOB_CONTAINER_NAME = os.environ["BCN"].strip()
BLOB_ACCOUNT_URL = os.environ["BAU"].strip()
PREFETCH_COUNT = int(os.environ["PREFETCH_COUNT"])
MAX_WAIT_TIME = float(os.environ["MAX_WAIT_TIME"])
except Exception as exception:
logging.debug(traceback.format_exc())
logging.warning(
"*** Please check the environment variables for {}".format(str(exception)))
sys.exit()
def API_CALL(event_data):
"""
Sends the request to the API
"""
try:
url = event_data['image_url']
payload = {"url": url}
## API call to the server
service_response = requests.post(API, json=payload)
logging.info(f"*** service_response.status_code : {service_response.status_code}")
cloud_response = json.loads(
service_response.text) if service_response.status_code == 200 else None
today = date.today()
response_date = today.strftime("%B %d, %Y")
response_time = time.strftime("%H:%M:%S", time.gmtime())
response_data = {
"type": "response_data",
"consumer_group": EVENTHUB_CONSUMER,
'current_date': response_date,
'current_time': response_time,
'image_url': url,
'status_code': service_response.status_code,
'response': cloud_response,
'api_response_time': int(service_response.elapsed.total_seconds()*1000),
"eventhub_data": event_data
}
logging.info(f"*** response_data {json.dumps(response_data)}")
logging.debug(f"*** response_data {json.dumps(response_data)}")
except Exception as exception:
logging.debug(traceback.format_exc())
logging.error(
"**** RaiseError: Failed request url %s, Root Cause of error: %s", url, exception)
async def event_operations(partition_context, event):
start_time = time.time()
data_ = event.body_as_str(encoding='UTF-8')
json_data = json.loads(data_)
## forming data payload
additional_data = {
"type": "event_data",
"consumer_group": EVENTHUB_CONSUMER,
"image_name": json_data["image_url"].split("/")[-1]
}
json_data.update(additional_data)
logging.info(f"*** Data fetched from EH : {json_data}")
logging.debug(f"*** Data fetched from EH : {json_data}")
API_CALL(json_data)
logging.info(f"*** time taken to process an event(ms): {(time.time()-start_time)*1000}")
def between_callback(partition_context, event):
"""
Loop to create threads
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(event_operations(partition_context, event))
loop.close()
async def on_event(partition_context, event):
"""
Put your code here.
Do some sync or async operations.
If the operation is i/o intensive, async will have better performanceself.
"""
t1 = time.time()
_thread = threading.Thread(target=between_callback, args=(partition_context, event))
_thread.start()
logging.info(f"*** time taken to start a thread(ms): {(time.time()-t1)*1000}")
logging.info("*** Fetching the next event")
## Update checkpoint per event
t2 = time.time()
await partition_context.update_checkpoint(event)
logging.info(f"*** time taken to update checkpoint(ms): {(time.time()-t2)*1000}")
async def main(client):
"""
Run the on_event method for each event received
Args:
client ([type]): Azure Eventhub listener client
"""
try:
async with client:
# Call the receive method. Only read current data (#latest)
logging.info("*** Listening to event")
await client.receive(on_event=on_event,
prefetch=PREFETCH_COUNT,
max_wait_time=MAX_WAIT_TIME)
except KeyboardInterrupt:
print("*** Stopped receiving due to keyboard interrupt")
except Exception as err:
logging.debug(traceback.format_exc())
print("*** some error occured :", err)
if __name__ == '__main__':
## Checkpoint initialization
checkpoint_store = BlobCheckpointStore(
blob_account_url=BLOB_ACCOUNT_URL,
container_name=BLOB_CONTAINER_NAME,
credential=AZURE_BLOB_CONNECTION_STR
)
## Client initialization
client = EventHubConsumerClient.from_connection_string(
CONNECTION_STR,
consumer_group=EVENTHUB_CONSUMER,
eventhub_name=EVENTHUB_NAME,
checkpoint_store=checkpoint_store, #COMMENT TO RUN WITHOUT CHECKPOINT
logging_enable=True,
on_partition_initialize=on_partition_initialize,
on_partition_close=on_partition_close,
idle_timeout=10,
on_error=on_error,
retry_total=3
)
logging.info("Connecting to eventhub {} consumer {}".format(
EVENTHUB_NAME, EVENTHUB_CONSUMER))
logging.info("Registering receive callback.")
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main(client))
except KeyboardInterrupt as exception:
pass
finally:
loop.stop()
Execution-flow main()-->on_event()-->Thread(between_callback-->API_CALL)-->update_checkpoint
Change the function for the receiver in the below format when we can able to get the events and run them until they complete.
import asyncio
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub.extensions.checkpointstoreblobaio import BlobCheckpointStore
async def on_event(partition_context, event):
# Print the event data.
print("Received the event: \"{}\" from the partition with ID: \"{}\"".format(event.body_as_str(encoding='UTF-8'), partition_context.partition_id))
# Update the checkpoint so that the program doesn't read the events
# that it has already read when you run it next time.
await partition_context.update_checkpoint(event)
async def main():
# Create an Azure blob checkpoint store to store the checkpoints.
checkpoint_store = BlobCheckpointStore.from_connection_string("AZURE STORAGE CONNECTION STRING", "BLOB CONTAINER NAME")
# Create a consumer client for the event hub.
client = EventHubConsumerClient.from_connection_string("EVENT HUBS NAMESPACE CONNECTION STRING", consumer_group="$Default", eventhub_name="EVENT HUB NAME", checkpoint_store=checkpoint_store)
async with client:
# Call the receive method. Read from the beginning of the partition (starting_position: "-1")
await client.receive(on_event=on_event, starting_position="-1")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
# Run the main method.
loop.run_until_complete(main())
Also as per the suggest in the comment, call the API on an interval bases.
I tried to create a celery task that returns the roles from my discord guild.: (proof of concept)
...
from asgiref.sync import async_to_sync
from celery import shared_task, Task
from discord.http import HTTPClient
class ChatTask(Task):
_client = None
#property
def client(self) -> HTTPClient:
if self._client is None:
client = HTTPClient()
result = async_to_sync(
lambda: client.static_login(settings.BOT_TOKEN, bot=True)
)()
self._client = client
return self._client
#shared_task(base=ChatTask, bind=True)
def get_roles(self):
# Still testing!
client: HTTPClient = self.client
print(client) # Seems to be fine: `<discord.http.HTTPClient object at 0x7fcc6728dbb0>`
roles = async_to_sync(lambda: async_get_roles(client))() # ERROR!: `RuntimeError('Event loop is closed')`
print(roles)
return roles
However I keep getting RuntimeError('Event loop is closed').
Comment: I do know that celery v5 will support async/await. But I do not want to wait until the release at the end of the year. ;)
How can I fix this code to get rid of the error and actually get see the roles printed out to the console?
I am trying to understand how to handle a grpc api with bidirectional streaming (using the Python API).
Say I have the following simple server definition:
syntax = "proto3";
package simple;
service TestService {
rpc Translate(stream Msg) returns (stream Msg){}
}
message Msg
{
string msg = 1;
}
Say that the messages that will be sent from the client come asynchronously ( as a consequence of user selecting some ui elements).
The generated python stub for the client will contain a method Translate that will accept a generator function and will return an iterator.
What is not clear to me is how would I write the generator function that will return messages as they are created by the user. Sleeping on the thread while waiting for messages doesn't sound like the best solution.
This is a bit clunky right now, but you can accomplish your use case as follows:
#!/usr/bin/env python
from __future__ import print_function
import time
import random
import collections
import threading
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
import grpc
from translate_pb2 import Msg
from translate_pb2_grpc import TestServiceStub
from translate_pb2_grpc import TestServiceServicer
from translate_pb2_grpc import add_TestServiceServicer_to_server
def translate_next(msg):
return ''.join(reversed(msg))
class Translator(TestServiceServicer):
def Translate(self, request_iterator, context):
for req in request_iterator:
print("Translating message: {}".format(req.msg))
yield Msg(msg=translate_next(req.msg))
class TranslatorClient(object):
def __init__(self):
self._stop_event = threading.Event()
self._request_condition = threading.Condition()
self._response_condition = threading.Condition()
self._requests = collections.deque()
self._last_request = None
self._expected_responses = collections.deque()
self._responses = {}
def _next(self):
with self._request_condition:
while not self._requests and not self._stop_event.is_set():
self._request_condition.wait()
if len(self._requests) > 0:
return self._requests.popleft()
else:
raise StopIteration()
def next(self):
return self._next()
def __next__(self):
return self._next()
def add_response(self, response):
with self._response_condition:
request = self._expected_responses.popleft()
self._responses[request] = response
self._response_condition.notify_all()
def add_request(self, request):
with self._request_condition:
self._requests.append(request)
with self._response_condition:
self._expected_responses.append(request.msg)
self._request_condition.notify()
def close(self):
self._stop_event.set()
with self._request_condition:
self._request_condition.notify()
def translate(self, to_translate):
self.add_request(to_translate)
with self._response_condition:
while True:
self._response_condition.wait()
if to_translate.msg in self._responses:
return self._responses[to_translate.msg]
def _run_client(address, translator_client):
with grpc.insecure_channel('localhost:50054') as channel:
stub = TestServiceStub(channel)
responses = stub.Translate(translator_client)
for resp in responses:
translator_client.add_response(resp)
def main():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
add_TestServiceServicer_to_server(Translator(), server)
server.add_insecure_port('[::]:50054')
server.start()
translator_client = TranslatorClient()
client_thread = threading.Thread(
target=_run_client, args=('localhost:50054', translator_client))
client_thread.start()
def _translate(to_translate):
return translator_client.translate(Msg(msg=to_translate)).msg
translator_pool = futures.ThreadPoolExecutor(max_workers=4)
to_translate = ("hello", "goodbye", "I", "don't", "know", "why",)
translations = translator_pool.map(_translate, to_translate)
print("Translations: {}".format(zip(to_translate, translations)))
translator_client.close()
client_thread.join()
server.stop(None)
if __name__ == "__main__":
main()
The basic idea is to have an object called TranslatorClient running on a separate thread, correlating requests and responses. It expects that responses will return in the order that requests were sent out. It also implements the iterator interface so that you can pass it directly to an invocation of the Translate method on your stub.
We spin up a thread running _run_client which pulls responses out of TranslatorClient and feeds them back in the other end with add_response.
The main function I included here is really just a strawman since I don't have the particulars of your UI code. I'm running _translate in a ThreadPoolExecutor to demonstrate that, even though translator_client.translate is synchronous, it yields, allowing you to have multiple in-flight requests at once.
We recognize that this is a lot of code to write for such a simple use case. Ultimately, the answer will be asyncio support. We have plans for this in the not-too-distant future. But for the moment, this sort of solution should keep you going whether you're running python 2 or python 3.