Python: Asyncio NATS.io blocking - python

I have troubles to make Python Asyncio NATS.io running sequentialy. I have two classes: Account and Bridge
Account holds the logic of application and it is communicating thought Bridge with external service via NATS.io.
Main file:
loop = asyncio.get_event_loop()
account = Account(loop, options)
asyncio.async(account.start())
loop.run_forever()
Account class:
class Account:
bridge = Bridge()
def connect(self):
result = self.bridge.connect(self.id)
return result
Bridge class:
def connect(self, account_id):
data = None
try:
response = yield from self.nc.timed_request("bank.account.connect",
BankRequest(
method="connect",
data={...}
), 10)
data = json.loads(response.data.decode())
except ErrTimeout:
status = Messages.REQUEST_TIMED_OUT
return Result(data=data)
I need to call account.connect() from anywhere inside account class and get result of connection (sequentialy). now I'm getting generator object

your connect() methods should probably be coroutines:
class Account:
bridge = Bridge() # you probably want to put this in `def __init__(self)`!
#asyncio.coroutine
def connect(self):
result = yield from self.bridge.connect(self.id)
return result
class Bridge:
#asyncio.coroutine
def connect(self, account_id):
data = None
try:
response = yield from self.nc.timed_request("bank.account.connect",
BankRequest(
method="connect",
data={...}
), 10)
data = json.loads(response.data.decode())
except ErrTimeout:
status = Messages.REQUEST_TIMED_OUT
return Result(data=data)
and:
resp = yield from account.connect()

Related

How to create mutliple tasks asyncio and get returns values

I'm beggining with asyncio and I'm struggling awaiting multiple return values from differents functions.
I want to do something like thread but using asyncio since I'm using async librairies from Azure.
I need to launch in my Thread class some function and I want to launch them in conccurency and await for their results and see if the results are good (I'm in a django test).
Here my current code:
class DeviceThread(threading.Thread):
def __init__(self, test_case, device_id, module_id, cert_prim, cert_sec):
threading.Thread.__init__(self)
self.test_case = test_case
self.device_id = device_id
self.module_id = module_id
self.cert_prim = cert_prim
self.cert_sec = cert_sec
async def get_twin(self, device):
try:
# 0 - Get the twin for the device
twin = await device.get_twin()
info(twin)
return twin['desired']['test'] == "test1device"
except Exception as e:
info(format_exc())
return False
async def receive_message_cloud(self, device):
try:
# 1- Waiting for the first message from the server (Blocking call)
message = await device.receive_message_cloud()
return message.data.decode() == "testmessage1"
except Exception as e:
info(format_exc())
return False
async def send_message_cloud(self, device):
try:
# 2 -Sending a message to the backend
await device.send_message_cloud("testmessage1")
return True
except Exception as e:
info(format_exc())
return False
async def receive_direct_call_and_respond(self, device):
try:
# 3 - Receiving a direct call method from the backend
method_request = await device.receive_method_request()
# 4 - Sending result of direct call
payload = {"test": "testok"}
status = 200
await device.send_method_response(method_request, status, payload)
return (method_request.name == "testmethod1") and (method_request.payload == "testpayload1")
except Exception as e:
info(format_exc())
return False
async def update_twin_properties(self, device):
try:
# 5 - Updating twin properties
new_properties = {'test', 'test2device'}
device.patch_twin_reported_properties(new_properties)
return True
except Exception as e:
info(format_exc())
return False
async def perform(self, device):
# Creating the tasks that will execute in parrallel
twin_get_res = asyncio.create_task(self.get_twin(device))
rec_mess_cloud = asyncio.create_task(self.receive_message_cloud(device))
send_mess_cloud = asyncio.create_task(self.send_message_cloud(device))
rec_dir_call = asyncio.create_task(self.receive_direct_call_and_respond(device))
up_twin_prop = asyncio.create_task(self.update_twin_properties(device))
# Verify the execution of the routine when done
self.test_case.assertTrue(await twin_get_res)
self.test_case.assertTrue(await rec_mess_cloud)
self.test_case.assertTrue(await send_mess_cloud)
self.test_case.assertTrue(await rec_dir_call)
self.test_case.assertTrue(await up_twin_prop)
def run(self):
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Getting writing the cert content to disk
open("cert.pem", "w").write(self.cert_prim)
# Creating a device
device = IOTHubDevice()
device.authenticate_device(self.device_id, "cert.pem")
# Removing cert previously created
remove("cert.pem")
asyncio.run(self.perform(device))
except Exception as e:
info(format_exc())
self.test_case.assertFalse(True)
As you've seen I'm in a thread and I want to verify some function of Azure IOTHub.
But I get this error :
RuntimeError: Task got Future attached to a different loop
So my question is : how can I get all these tasks running in the loop and get their individual results ?
Thanks for your answer !

Python3 asyncio - callback for add_done_callback do not updates self variable in server class

I have two servers, created with asyncio.start_server:
asyncio.start_server(self.handle_connection, host = host, port = port) and running in one loop:
loop.run_until_complete(asyncio.gather(server1, server2))
loop.run_forever()
I'm using asyncio.Queue to communicate between servers. Messages from Server2, added via queue.put(msg) successfully receives by queue.get() in Server1. I'm running queue.get() by asyncio.ensure_future and using as callback for
add_done_callback method from Server1:
def callback(self, future):
msg = future.result()
self.msg = msg
But this callback not working as expected - self.msg do not updates. What am I doing wrong?
UPDATED
with additional code to show max full example:
class Queue(object):
def __init__(self, loop, maxsize: int):
self.instance = asyncio.Queue(loop = loop, maxsize = maxsize)
async def put(self, data):
await self.instance.put(data)
async def get(self):
data = await self.instance.get()
self.instance.task_done()
return data
#staticmethod
def get_instance():
return Queue(loop = asyncio.get_event_loop(), maxsize = 10)
Server class:
class BaseServer(object):
def __init__(self, host, port):
self.instance = asyncio.start_server(self.handle_connection, host = host, port = port)
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
pass
def get_instance(self):
return self.instance
#staticmethod
def create():
return BaseServer(None, None)
Next I'm running the servers:
loop.run_until_complete(asyncio.gather(server1.get_instance(), server2.get_instance()))
loop.run_forever()
In the handle_connection of server2 I'm calling queue.put(msg), in the handle_connection of server1 I'm registered queue.get() as task:
task_queue = asyncio.ensure_future(queue.get())
task_queue.add_done_callback(self.process_queue)
The process_queue method of server1:
def process_queue(self, future):
msg = future.result()
self.msg = msg
The handle_connection method of server1:
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
task_queue = asyncio.ensure_future(queue.get())
task_queue.add_done_callback(self.process_queue)
while self.msg != SPECIAL_VALUE:
# doing something
Although task_queue is done, self.process_queue called, self.msg never updates.
Basically as you are using asynchronous structure, I think you can directly await the result:
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
msg = await queue.get()
process_queue(msg) # change it to accept real value instead of a future.
# do something

Gradually create async tasks and wait for all of them to complete

I am trying to make a program to make a lot of web-socket connections to the server I've created:
class WebSocketClient():
#asyncio.coroutine
def run(self):
print(self.client_id, 'Connecting')
ws = yield from aiohttp.ws_connect(self.url)
print(self.client_id, 'Connected')
print(self.client_id, 'Sending the message')
ws.send_str(self.make_new_message())
while not ws.closed:
msg = yield from ws.receive()
if msg.tp == aiohttp.MsgType.text:
print(self.client_id, 'Received the echo')
yield from ws.close()
break
print(self.client_id, 'Closed')
#asyncio.coroutine
def make_clients():
for client_id in range(args.clients):
yield from WebSocketClient(client_id, WS_CHANNEL_URL.format(client_id=client_id)).run()
event_loop.run_until_complete(make_clients())
The problem is that all the clients do their jobs one after another:
0 Connecting
0 Connected
0 Sending the message
0 Received the echo
0 Closed
1 Connecting
1 Connected
1 Sending the message
1 Received the echo
1 Closed
...
I've tried to use asyncio.wait, but all the clients start together. I want them to be created gradually and connected to the server immediately once each of them is created. At the same time continuing creating new clients.
What approach should I apply to accomplish this?
Using asyncio.wait is a good approach. You can combine it with asyncio.ensure_future and asyncio.sleep to create tasks gradually:
#asyncio.coroutine
def make_clients(nb_clients, delay):
futures = []
for client_id in range(nb_clients):
url = WS_CHANNEL_URL.format(client_id=client_id)
coro = WebSocketClient(client_id, url).run()
futures.append(asyncio.ensure_future(coro))
yield from asyncio.sleep(delay)
yield from asyncio.wait(futures)
EDIT: I implemented a FutureSet class that should do what you want. This set can be filled with futures and removes them automatically when they're done. It is also possible to wait for all the futures to complete.
class FutureSet:
def __init__(self, maxsize, *, loop=None):
self._set = set()
self._loop = loop
self._maxsize = maxsize
self._waiters = []
#asyncio.coroutine
def add(self, item):
if not asyncio.iscoroutine(item) and \
not isinstance(item, asyncio.Future):
raise ValueError('Expecting a coroutine or a Future')
if item in self._set:
return
while len(self._set) >= self._maxsize:
waiter = asyncio.Future(loop=self._loop)
self._waiters.append(waiter)
yield from waiter
item = asyncio.async(item, loop=self._loop)
self._set.add(item)
item.add_done_callback(self._remove)
def _remove(self, item):
if not item.done():
raise ValueError('Cannot remove a pending Future')
self._set.remove(item)
if self._waiters:
waiter = self._waiters.pop(0)
waiter.set_result(None)
#asyncio.coroutine
def wait(self):
return asyncio.wait(self._set)
Example:
#asyncio.coroutine
def make_clients(nb_clients, limit=0):
futures = FutureSet(maxsize=limit)
for client_id in range(nb_clients):
url = WS_CHANNEL_URL.format(client_id=client_id)
client = WebSocketClient(client_id, url)
yield from futures.add(client.run())
yield from futures.wait()

Consuming on multiple topics with pika and TornadoConnection

I have a class for a pika consumer client that's based on pika's example code for the TornadoConnection. I'm trying to consume from a topic queue. The problem is that since the connection is established in an asynchronous way, there is no way for me to know when the channel is established or when the queue is declared. My class:
class PikaClient(object):
""" Based on:
http://pika.readthedocs.org/en/latest/examples/tornado_consumer.html
https://reminiscential.wordpress.com/2012/04/07/realtime-notification-delivery-using-rabbitmq-tornado-and-websocket/
"""
def __init__(self, exchange, exchange_type):
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self.exchange = exchange
self.exchange_type = exchange_type
self.queue = None
self.event_listeners = set([])
def connect(self):
logger.info('Connecting to RabbitMQ')
cred = pika.PlainCredentials('guest', 'guest')
param = pika.ConnectionParameters(
host='localhost',
port=5672,
virtual_host='/',
credentials=cred,
)
return pika.adapters.TornadoConnection(param,
on_open_callback=self.on_connection_open)
def close_connection(self):
logger.info('Closing connection')
self._connection.close()
def on_connection_closed(self, connection, reply_code, reply_text):
self._channel = None
if not self._closing:
logger.warning('Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(5, self.reconnect)
def on_connection_open(self, connection):
logger.info('Connected to RabbitMQ')
self._connection.add_on_close_callback(self.on_connection_closed)
self._connection.channel(self.on_channel_open)
def reconnect(self):
if not self._closing:
# Create a new connection
self._connection = self.connect()
def on_channel_closed(self, channel, reply_code, reply_text):
logger.warning('Channel %i was closed: (%s) %s',
channel, reply_code, reply_text)
self._connection.close()
def on_channel_open(self, channel):
logger.info('Channel open, declaring exchange')
self._channel = channel
self._channel.add_on_close_callback(self.on_channel_closed)
self._channel.exchange_declare(self.on_exchange_declareok,
self.exchange,
self.exchange_type,
passive=True,
)
def on_exchange_declareok(self, unused_frame):
logger.info('Exchange declared, declaring queue')
self._channel.queue_declare(self.on_queue_declareok,
exclusive=True,
auto_delete=True,
)
def on_queue_declareok(self, method_frame):
self.queue = method_frame.method.queue
def bind_key(self, routing_key):
logger.info('Binding %s to %s with %s',
self.exchange, self.queue, routing_key)
self._channel.queue_bind(self.on_bindok, self.queue,
self.exchange, routing_key)
def add_on_cancel_callback(self):
logger.info('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
logger.info('Consumer was cancelled remotely, shutting down: %r',
method_frame)
if self._channel:
self._channel.close()
def on_message(self, unused_channel, basic_deliver, properties, body):
logger.debug('Received message # %s from %s',
basic_deliver.delivery_tag, properties.app_id)
#self.notify_listeners(body)
def on_cancelok(self, unused_frame):
logger.info('RabbitMQ acknowledged the cancellation of the consumer')
self.close_channel()
def stop_consuming(self):
if self._channel:
logger.info('Sending a Basic.Cancel RPC command to RabbitMQ')
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
def start_consuming(self):
logger.info('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message, no_ack=True)
def on_bindok(self, unused_frame):
logger.info('Queue bound')
self.start_consuming()
def close_channel(self):
logger.info('Closing the channel')
self._channel.close()
def open_channel(self):
logger.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def run(self):
self._connection = self.connect()
def stop(self):
logger.info('Stopping')
self._closing = True
self.stop_consuming()
logger.info('Stopped')
An example for code using it (inside a WebSocketHandler.open):
self.pc = PikaClient('agents', 'topic')
self.pc.run()
self.pc.bind_key('mytopic.*')
When trying to run this, bind_key throws an exception because the _channel is still None. But I haven't found a way to block until the channel and queue are established. Is there any way to do this with a dynamic list of topics (that might change after the consumer starts running)?
You actually do have a way to know when the queue is established - the method on_queue_declareok(). That callback will executed once the self.queue_declare method has finished, and self.queue_declare is executed once _channel.exchance_declare has finished, etc. You can follow the chain all the way back to your run method:
run -> connect -> on_connection_open -> _connection.channel -> on_channel_open -> _channel.exchange_declare -> on_exchange_declareok -> _channel.queue_declare -> on_queue_declareok
So, you just add your call(s) to bind_key to on_queue_declareok, and that will trigger a call to on_bindok, which will call start_consuming. At that point your client is actually listening for messages. If you want to be able to dynamically provide topics, just take them in the constructor of PikaClient. Then you can call bind_key on each inside on_queue_declareok. You'd also need to add a flag to indicate you've already started consuming, so you don't try to do that twice.
Something like this (assume all methods not shown below stay the same):
def __init__(self, exchange, exchange_type, topics=None):
self._topics = [] if topics is None else topics
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._consuming = False
self.exchange = exchange
self.exchange_type = exchange_type
self.queue = None
self.event_listeners = set([])
def on_queue_declareok(self, method_frame):
self.queue = method_frame.method.queue
for topic in self._topics:
self.bind_key(topic)
def start_consuming(self):
if self._consuming:
return
logger.info('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message, no_ack=True)
self._consuming = True

How to get client connection object in tornado websocket, Python

I am using tornado websocket for simple test code.
In the test code, i want to get tornado.websocket.WebSocketHandler.
For example, I used this way below.
class ConvPlayerInterface(object):
class WebsocketHandler(tornado.websocket.WebSocketHandler):
client = None
queue = ipcQueue.IpcQueue()
def open(self):
print 'new connection'
self.client = self #in my simple code, it handles only one client.
self.write_message("Connection Open")
def on_message(self, message):
self.queue.put(message)
def on_close(self):
print 'connection closed'
def __init__(self, url='/ws'):
self.application = tornado.web.Application([(url, self.WebsocketHandler),])
self.httpServer = tornado.httpserver.HTTPServer(self.application)
self.httpServer.listen(8888)
self.queue = self.WebsocketHandler.queue
self.ioLoop = threading.Thread(target = tornado.ioloop.IOLoop.instance().start)
def start(self):
self.ioLoop.start()
def get(self):
return self.queue.get()
def put(self, command):
self.WebsocketHandler.client.write_message(command)
But the point when it calls self.WebsocketHandler.client.write_message(command) in put() method, Python says client is Non type.
Any advice?
And how usually it is used to get client connection handler object in tornado?
In this part of your code
def put(self, command):
self.WebsocketHandler.client.write_message(command)
you are accessing to WebsocketHandler class, not a class member.
And the "client" attribute of WebsocketHandler is None, as expected.
WebsocketHandler instance will be created for each request tornado will accept, so there can be several websocket handlers simultaneously.
If you really want to have handle only one connection - you can do something like this:
class ConvPlayerInterface(object):
the_only_handler = None
class WebsocketHandler(tornado.websocket.WebSocketHandler):
client = None
queue = ipcQueue.IpcQueue()
def open(self):
print 'new connection'
ConvPlayerInterface.the_only_handler = self
self.write_message("Connection Open")
def on_message(self, message):
self.queue.put(message)
def on_close(self):
ConvPlayerInterface.the_only_handler = None
print 'connection closed'
def __init__(self, url='/ws'):
self.application = tornado.web.Application([(url, self.WebsocketHandler),])
self.httpServer = tornado.httpserver.HTTPServer(self.application)
self.httpServer.listen(8888)
self.queue = self.WebsocketHandler.queue
self.ioLoop = threading.Thread(target = tornado.ioloop.IOLoop.instance().start)
def start(self):
self.ioLoop.start()
def get(self):
return self.queue.get()
def put(self, command):
if self.the_only_handler is not None
self.the_only_handler.write_message(command)

Categories

Resources