I'm trying to create a client for testing my grpc server. In my grpc server I have a rpc NotificationStreaming() which streams a notification (unary-stream). Also I have bunch of synchronous rpc methods (unary-unary).
In the main() below first I establish a connection for the streaming in the separate process and then I perform unary-unary rpc requests sequentially. After each unary-unary rpc request I receive notifications via NOTIFICATION_QUEUE. The streaming stays empty until I call the fist unary-unary method create_project(stub), so I'm expecting to receive a first notification during this method.
The problem is that if I remove sleep(5) my program stuck at this line.
Please give me any ideas about how to use more wise way?
def _notification_stream(notification_queue):
with grpc.insecure_channel(settings.GRPC_PORT) as channel:
stub = main_pb2_grpc.MyAPIStub(channel)
try:
response_stream = stub.NotificationStreaming(Empty())
for notification in response_stream:
r = json_format.MessageToDict(notification, preserving_proto_field_name=True,
including_default_value_fields=True)
notification_queue.put(r['message'])
except grpc.RpcError as e:
misc.log(f"ERROR notification stream: {e}")
def notification_streaming(notification_queue):
_process = mp.Process(target=_notification_stream, daemon=True, kwargs={"notification_queue": notification_queue})
_process.start()
return _process.pid
def main():
NOTIFICATION_QUEUE = mp.Queue()
# start listening to the notification stream
notification_streaming(NOTIFICATION_QUEUE)
sleep(5)
with grpc.insecure_channel(settings.GRPC_PORT) as channel:
stub = main_pb2_grpc.MyAPIStub(channel)
create_project(stub)
while not NOTIFICATION_QUEUE.empty():
misc.log(f"\tnotification: {NOTIFICATION_QUEUE.get(block=True)}")
close_project(stub)
while not NOTIFICATION_QUEUE.empty():
misc.log(f"\tnotification: {NOTIFICATION_QUEUE.get(block=True)}")
load_project(stub)
while not NOTIFICATION_QUEUE.empty():
misc.log(f"\tnotification: {NOTIFICATION_QUEUE.get(block=True)}")
save_project(stub)
while not NOTIFICATION_QUEUE.empty():
misc.log(f"\tnotification: {NOTIFICATION_QUEUE.get(block=True)}")
...
if __name__ == '__main__':
main()
The idea is to make sure that the channel is established before creating a new channel. For this reason channel.subscribe(wait_for_ready_partial, try_to_connect=True) for checking the state of ChannelConnectivity and continue only when grpc.ChannelConnectivity.READY
NOTIFICATION_QUEUE = mp.Queue()
def _notification_stream(notification_queue):
def wait_for_ready(channel_connectivity, _notification_queue=None):
if channel_connectivity is grpc.ChannelConnectivity.READY:
_notification_queue.put(settings.STOP_WORD)
with grpc.insecure_channel(settings.GRPC_PORT) as channel:
wait_for_ready_partial = functools.partial(wait_for_ready, _notification_queue=notification_queue)
channel.subscribe(wait_for_ready_partial, try_to_connect=True)
stub = main_pb2_grpc.MyAPIStub(channel)
response_stream = stub.NotificationStreaming(Empty())
for notification in response_stream:
r = json_format.MessageToDict(notification, preserving_proto_field_name=True,
including_default_value_fields=True)
notification_queue.put(r['message'])
def notification_streaming(notification_queue):
_process = mp.Process(target=_notification_stream, daemon=True, kwargs={"notification_queue": notification_queue})
_process.start()
while True:
if notification_queue.get(block=True) == settings.STOP_WORD:
break
def main():
# establish the channel for Notification streaming (unary-stream)
notification_streaming(NOTIFICATION_QUEUE)
# the main grpc unary-unary intecations
with grpc.insecure_channel(settings.GRPC_PORT) as channel:
stub = main_pb2_grpc.MyAPIStub(channel)
create_project(stub)
close_project(stub)
load_project(stub)
...
if __name__ == '__main__':
main()
Related
I am running into some trouble with Azure Event Bub with Python. Below is my strater code for connection (Taken from microsoft docs)
import asyncio
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub.extensions.checkpointstoreblobaio import BlobCheckpointStore
async def on_event(partition_context, event):
# Print the event data.
print("Received the event: \"{}\" from the partition with ID: \"{}\"".format(event.body_as_str(encoding='UTF-8'), partition_context.partition_id))
# Update the checkpoint so that the program doesn't read the events
# that it has already read when you run it next time.
await partition_context.update_checkpoint(event)
async def main():
# Create an Azure blob checkpoint store to store the checkpoints.
checkpoint_store = BlobCheckpointStore.from_connection_string("AZURE STORAGE CONNECTION STRING", "BLOB CONTAINER NAME")
# Create a consumer client for the event hub.
client = EventHubConsumerClient.from_connection_string("EVENT HUBS NAMESPACE CONNECTION STRING", consumer_group="$Default", eventhub_name="EVENT HUB NAME", checkpoint_store=checkpoint_store)
async with client:
# Call the receive method. Read from the beginning of the partition (starting_position: "-1")
await client.receive(on_event=on_event, starting_position="-1")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
# Run the main method.
loop.run_until_complete(main())
Here, the receiver/consumer keeps listening. If I remove any of the awaits the consumer throws an error.
Does anyone know how to stop the consumer after running for some time like timeout).
#Abhishek
There are 2 options here :
You could stop listening when there is an inactivity for certain period time.
You could stop listening after fixed duration.
Have detailed both in below steps.
OPTION 1
You could use the max_wait_time parameter in order to stop listening in case there is no activity for certain time.
I did spin up a simple use case of the above. But you could optimize this further.
import asyncio
from azure.eventhub.aio import EventHubConsumerClient
event_hub_connection_str = '<CON_STR>'
eventhub_name = '<EventHub_NAME>'
consumer = EventHubConsumerClient.from_connection_string(
conn_str=event_hub_connection_str,
consumer_group='$Default',
eventhub_name=eventhub_name # EventHub name should be specified if it doesn't show up in connection string.
)
#this event gets called when the message is received or Max_Wait_time is clocked
async def on_event(partition_context, event):
print(event) #Optional - to see output
#Checks whether there is any event returned None. None is returned when this event is called after the Max_Wait_time is crossed
if(event !=None):
print("Received the event: \"{}\" from the partition with ID: \"{}\"".format(event.body_as_str(encoding='UTF-8'), partition_context.partition_id))
#you can update other code like updating blob store
else:
print("Timeout is Hit")
#updating the
global receive
receive = False
async def close():
print("Closing the client.")
await consumer.close()
print("Closed")
async def main():
recv_task = asyncio.ensure_future(consumer.receive(on_event=on_event,max_wait_time=15))
while(True): # keep receiving for 3 seconds
await asyncio.sleep(3)
if(receive != True):
print("Cancelling the Task")
recv_task.cancel() # stop receiving by cancelling the task
break;
receive = True
asyncio.run(main())
asyncio.run(close())#closing the Client
With regards to the above code. If there is no activity 15 seconds the async task gets cancelled and the consumer clients gets closed. The program is eventually exited gracefully.
OPTION 2
If you are looking for a code in which the you would like to make client to listen for fixed time like 1 hour or some thing. You could refer the below code
Reference Code
event_hub_connection_str = '<>'
eventhub_name = '<>'
import asyncio
from azure.eventhub.aio import EventHubConsumerClient
consumer = EventHubConsumerClient.from_connection_string(
conn_str=event_hub_connection_str,
consumer_group='$Default',
eventhub_name=eventhub_name # EventHub name should be specified if it doesn't show up in connection string.
)
async def on_event(partition_context, event):
# Put your code here.
# If the operation is i/o intensive, async will have better performance.
print("Received event from partition: {}".format(partition_context.partition_id))
# The receive method is a coroutine which will be blocking when awaited.
# It can be executed in an async task for non-blocking behavior, and combined with the 'close' method.
async def main():
recv_task = asyncio.ensure_future(consumer.receive(on_event=on_event))
await asyncio.sleep(15) # keep receiving for 3 seconds
recv_task.cancel() # stop receiving
async def close():
print("Closing.....")
await consumer.close()
print("Closed")
asyncio.run(main())
asyncio.run(close())#closing the Client
The below code that is responsible for the client to be listening for a certain time :
recv_task =
asyncio.ensure_future(consumer.receive(on_event=on_event))
await asyncio.sleep(3) # keep receiving for 3 seconds
recv_task.cancel()
You could increase the time as per your need.
#Satya V I tried the option 2 but however I am seeing the error ,
There is no current event loop in thread 'MainThread'.
But However your code helped me in a better way . I had configured the code with Storage Account check point
import asyncio
import os
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub.extensions.checkpointstoreblobaio import BlobCheckpointStore
CONNECTION_STR = ''
EVENTHUB_NAME = ''
STORAGE_CONNECTION_STR = ''
BLOB_CONTAINER_NAME = ""
async def on_event(partition_context, event):
print("Received event from partition: {}.".format(partition_context.partition_id))
await partition_context.update_checkpoint(event)
async def receive(client):
await client.receive(
on_event=on_event,
starting_position="-1", # "-1" is from the beginning of the partition.
)
async def main():
checkpoint_store = BlobCheckpointStore.from_connection_string(STORAGE_CONNECTION_STR, BLOB_CONTAINER_NAME)
client = EventHubConsumerClient.from_connection_string(
CONNECTION_STR,
consumer_group="$Default",
eventhub_name=EVENTHUB_NAME,
checkpoint_store=checkpoint_store, # For load-balancing and checkpoint. Leave None for no load-balancing.
)
async with client:
recv_task = asyncio.ensure_future(receive(client))
await asyncio.sleep(4) # keep receiving for 3 seconds
recv_task.cancel() # stop receiving
await client.close()
async def close():
print("Closing.....")
print("Closed")
if __name__ == '__main__':
asyncio.run(main())
asyncio.run(close())#closing the Client
Goal is to allocate a thread and wait for the callback. Single thread is going to run the while loop forever. Difficulty here is that we are not directly calling or controlling the callback and we do not know in advance how long it will takes to the remote server to invoke callback.
I've tried to look for a solution in the asyncio module using asyncio.Future but unsuccessfully.
from a_module import Server # <a_module> is fictitious
import random
import time
class App(Server):
def __init__(self):
self.response = None
def send_requests(self):
"""Send request to remote server"""
self.send_number_to_server(42) # inherited from Server
# This is going to loop forever. We should "suspend" the
# current thread, allocate a new thread to wait for the
# callback and then comeback here to return the (not None)
# response.
while self.response is None:
# Wait for the callback before terminating this method.
time.sleep(1) # seconds
return self.response
def callback(self, message):
"""Inherited form parent class 'Server'. When the request sent
with App.send_req has been processed by the remote server,
this function is invoked in the background."""
self.response = message
if __name__ == '__main__':
app = App()
response = app.send_requests()
print(response)
Since callback is "invoked in the background", Server is presumably already running a background thread. In that case, you want your main thread to run the event loop, and the server's background thread to notify you when it is done. Assuming send_number_to_server is not blocking, you could do it like this:
class App(Server):
def __init__(self):
self._loop = asyncio.get_event_loop()
async def send_requests(self):
self.send_number_to_server(42)
self._future_resp = asyncio.Future()
resp = await self._future_resp
self._future_resp = None
return resp
def callback(self, message):
# called from a different thread
self._loop.call_soon_threadsafe(self._future_resp.set_result, message)
async def main():
app = App()
response = await app.send_requests()
print(response)
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
While using websocket client to send test messages to a django server, I cannot get a script to work which can both send and receive messages.
The following python script is what I have attempted:
import websocket
import threading
import json
from time import sleep
# handle message event
def on_message(ws, message):
print("message recieved: %s" % message)
# handle close event
def on_close(ws):
print("channel closed")
# execute as main script
if __name__ == "__main__":
websocket.enableTrace(True)
# new app object connecting to headstation
ws = websocket.WebSocketApp("ws://192.168.0.106:8000/?testI123", on_message = on_message, on_close = on_close)
# run in a new thread - kill if script ends
ws_listener = threading.Thread(target=ws.run_forever())
ws_listener.daemon = True
# start second thread
ws_listener.start()
# attempt connection 5 times
timeout = 5
while not ws.sock.connected and timeout:
sleep(1)
timeout -= 1
# error on timeout
if (timeout == 0):
print("Connection to server timed out")
print("test 1")
# periodically send test message to server
message_num = 0
while ws.sock.connected:
# send node id and message
message = 'hello %d'%message_num
ws.send(message)
sleep(1)
message_num += 1
This connections successfully, indicted by the server, and receives messages sent from the server, but does not send anything.
Periodically, something like this is displayed on the terminal:
send: b'\x8a\x84\xe2\xe9\xa8\xe2\x8f\xdc\xe2\x84'
If I simply use
ws = websocket.WebSocket()
ws.connect(url)
ws.send("hello")
then this works perfectly. Suggesting it is something wrong with my little python script displayed above.
Found the problem, stupid mistake of course:
ws_listener = threading.Thread(target=ws.run_forever())
should be:
ws_listener = threading.Thread(target=ws.run_forever)
without parentheses.
First one passes result of ws.run_forever to the target, second one sets ws.run_forever as the target, which was the intended outcome.
Let's say I want to create a chat-like application. A client can send text to the server and vice versa. The order of text exchanges can be arbitrary.
The server depends on another stream which controls the server response stream.
The GRPC stream is exposed as a python generator. How can the server now wait for client input and input on the other stream at the same time? Normally one would use something like select(), but here we have generators.
I have some example code which implements the wanted behavior but requires an additional thread on the client and server side. How can I achieve the same result without a thread?
Proto:
syntax = 'proto3';
service Scenario {
rpc Chat(stream DPong) returns (stream DPong) {}
}
message DPong {
string name = 1;
}
Server:
import random
import string
import threading
import grpc
import scenario_pb2_grpc
import scenario_pb2
import time
from concurrent import futures
class Scenario(scenario_pb2_grpc.ScenarioServicer):
def Chat(self, request_iterator, context):
def stream():
while 1:
time.sleep(1)
yield random.choice(string.ascii_letters)
output_stream = stream()
def read_incoming():
while 1:
received = next(request_iterator)
print('received: {}'.format(received))
thread = threading.Thread(target=read_incoming)
thread.daemon = True
thread.start()
while 1:
yield scenario_pb2.DPong(name=next(output_stream))
if __name__ == '__main__':
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
scenario_pb2.add_ScenarioServicer_to_server(
Scenario(), server)
server.add_insecure_port('[::]:50052')
server.start()
print('listening ...')
while 1:
time.sleep(1)
Client
import threading
import grpc
import time
import scenario_pb2_grpc, scenario_pb2
def run():
channel = grpc.insecure_channel('localhost:50052')
stub = scenario_pb2_grpc.ScenarioStub(channel)
print('client connected')
def stream():
while 1:
yield scenario_pb2.DPong(name=input('$ '))
input_stream = stub.Chat(stream())
def read_incoming():
while 1:
print('received: {}'.format(next(input_stream).name))
thread = threading.Thread(target=read_incoming)
thread.daemon = True
thread.start()
while 1:
time.sleep(1)
if __name__ == '__main__':
print('client starting ...')
run()
It is not currently possible to do this without spending the threads that you're spending. We're thinking about implementing enhancements that would allow implementations to avoid taking another thread, but those would be months away at earliest.
I'm currently playing with aiohttp to see how it will perform as a server application for mobile app with websocket connection.
Here is simple "Hello world" example (as gist here):
import asyncio
import aiohttp
from aiohttp import web
class WebsocketEchoHandler:
#asyncio.coroutine
def __call__(self, request):
ws = web.WebSocketResponse()
ws.start(request)
print('Connection opened')
try:
while True:
msg = yield from ws.receive()
ws.send_str(msg.data + '/answer')
except:
pass
finally:
print('Connection closed')
return ws
if __name__ == "__main__":
app = aiohttp.web.Application()
app.router.add_route('GET', '/ws', WebsocketEchoHandler())
loop = asyncio.get_event_loop()
handler = app.make_handler()
f = loop.create_server(
handler,
'127.0.0.1',
8080,
)
srv = loop.run_until_complete(f)
print("Server started at {sock[0]}:{sock[1]}".format(
sock=srv.sockets[0].getsockname()
))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(handler.finish_connections(1.0))
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(app.finish())
loop.close()
The problem
Now I would like to use structure described below (node server = python aiohttp). To be more specific, use Redis Pub/Sub mechanism with asyncio-redis to read and write both to websocket connection and Redis in my WebsocketEchoHandler.
WebsocketEchoHandler is a dead simple loop so I'm not sure how should this be done. Using Tornado and brükva I would just use callbacks.
Extra (offtopic perhaps) question
Since I'm using Redis already, which of two approaches should I take:
Like in "classic" web app, have a controller/view for everything, use Redis just for messaging etc.
Web app should be just a layer between client and Redis used also as task queue (simplest Python RQ). Every request should be delegated to workers.
EDIT
Image from http://goldfirestudios.com/blog/136/Horizontally-Scaling-Node.js-and-WebSockets-with-Redis
EDIT 2
It seems that I need to clarify.
Websocket-only handler is shown above
Redis Pub/Sub handler might look like that:
class WebsocketEchoHandler:
#asyncio.coroutine
def __call__(self, request):
ws = web.WebSocketResponse()
ws.start(request)
connection = yield from asyncio_redis.Connection.create(host='127.0.0.1', port=6379)
subscriber = yield from connection.start_subscribe()
yield from subscriber.subscribe(['ch1', 'ch2'])
print('Connection opened')
try:
while True:
msg = yield from subscriber.next_published()
ws.send_str(msg.value + '/answer')
except:
pass
finally:
print('Connection closed')
return ws
This handler just subscribes to Redis channel ch1 and ch2 and sends every received message from those channels to websocket.
I want to have this handler:
class WebsocketEchoHandler:
#asyncio.coroutine
def __call__(self, request):
ws = web.WebSocketResponse()
ws.start(request)
connection = yield from asyncio_redis.Connection.create(host='127.0.0.1', port=6379)
subscriber = yield from connection.start_subscribe()
yield from subscriber.subscribe(['ch1', 'ch2'])
print('Connection opened')
try:
while True:
# If message recived from redis OR from websocket
msg_ws = yield from ws.receive()
msg_redis = yield from subscriber.next_published()
if msg_ws:
# push to redis / do something else
self.on_msg_from_ws(msg_ws)
if msg_redis:
self.on_msg_from_redis(msg_redis)
except:
pass
finally:
print('Connection closed')
return ws
But following code is always called sequentially so reading from websocket blocks reading from Redis:
msg_ws = yield from ws.receive()
msg_redis = yield from subscriber.next_published()
I want reading to be done on event where event is message received from one of two sources.
You should use two while loops - one that handles messages from the websocket, and one that handles messages from redis. Your main handler can just kick off two coroutines, one handling each loop, and then wait on both of them:
class WebsocketEchoHandler:
#asyncio.coroutine
def __call__(self, request):
ws = web.WebSocketResponse()
ws.start(request)
connection = yield from asyncio_redis.Connection.create(host='127.0.0.1', port=6379)
subscriber = yield from connection.start_subscribe()
yield from subscriber.subscribe(['ch1', 'ch2'])
print('Connection opened')
try:
# Kick off both coroutines in parallel, and then block
# until both are completed.
yield from asyncio.gather(self.handle_ws(ws), self.handle_redis(subscriber))
except Exception as e: # Don't do except: pass
import traceback
traceback.print_exc()
finally:
print('Connection closed')
return ws
#asyncio.coroutine
def handle_ws(self, ws):
while True:
msg_ws = yield from ws.receive()
if msg_ws:
self.on_msg_from_ws(msg_ws)
#asyncio.coroutine
def handle_redis(self, subscriber):
while True:
msg_redis = yield from subscriber.next_published()
if msg_redis:
self.on_msg_from_redis(msg_redis)
This way you can read from any of the two potential sources without having to care about the other.
recent we can use async await in python 3.5 and above..
async def task1(ws):
async for msg in ws:
if msg.type == WSMsgType.TEXT:
data = msg.data
print(data)
if data:
await ws.send_str('pong')
## ch is a redis channel
async def task2(ch):
async for msg in ch1.iter(encoding="utf-8", decoder=json.loads):
print("receving", msg)
user_token = msg['token']
if user_token in r_cons.keys():
_ws = r_cons[user_token]
await _ws.send_json(msg)
coroutines = list()
coroutines.append(task1(ws))
coroutines.append(task2(ch1))
await asyncio.gather(*coroutines)
this is what I do.when the websockets need to wait message from mutli source.
main point here is using asyncio.gather to run two corotine together like
#dano mentioned.