I am writing a program that accepts RPC requests over AMQP for executing network requests (CoAP). When processing RPC requests, the aioamqp callback generates tasks that are responsible for network IO. These tasks can be considered background tasks, that will run indefinitely for streaming network responses over AMQP (in this case one RPC requests triggers a RPC response and data streaming).
I noticed that in my original code the network task would be destroyed after seemingly random time intervals (before it was finished), asyncio would then print the following warning "Task was destroyed but it is pending". This issue is similar to the one described here: https://bugs.python.org/issue21163.
For now I have circumvented the issue by storing a hard reference in a module-level list, which prevents the GC from destroying the task object. However, I was wondering if there is a better work around? Ideally I would want to call await task in the RPC callback, but I noticed that this prevents any further AMQP operations from completing -> e.g. creating a new amqp channel stalls and receiving rpc requests over amqp also stalls. I am unsure what is causing this stalling however (as the callback is itself a coroutine, I would expect waiting would not stall the entire aioamqp library).
I am posting the source below for the RPC client and server, both are based on the aioamqp/aiocoap examples. In the server, on_rpc_request is the amqp rpc callback and send_coap_obs_request is the networking coroutine that gets destroyed when the 'obs_tasks.append(task)' statement is removed.
client.py:
"""
CoAP RPC client, based on aioamqp implementation of RPC examples from RabbitMQ tutorial
"""
import base64
import json
import uuid
import asyncio
import aioamqp
class CoAPRpcClient(object):
def __init__(self):
self.transport = None
self.protocol = None
self.channel = None
self.callback_queue = None
self.waiter = asyncio.Event()
async def connect(self):
""" an `__init__` method can't be a coroutine"""
self.transport, self.protocol = await aioamqp.connect()
self.channel = await self.protocol.channel()
result = await self.channel.queue_declare(queue_name='', exclusive=True)
self.callback_queue = result['queue']
await self.channel.basic_consume(
self.on_response,
no_ack=True,
queue_name=self.callback_queue,
)
async def on_response(self, channel, body, envelope, properties):
if self.corr_id == properties.correlation_id:
self.response = body
self.waiter.set()
async def call(self, n):
if not self.protocol:
await self.connect()
self.response = None
self.corr_id = str(uuid.uuid4())
await self.channel.basic_publish(
payload=str(n),
exchange_name='',
routing_key='coap_request_rpc_queue',
properties={
'reply_to': self.callback_queue,
'correlation_id': self.corr_id,
},
)
await self.waiter.wait()
await self.protocol.close()
return json.loads(self.response)
async def rpc_client():
coap_rpc = CoAPRpcClient()
request_dict = {}
request_dict_json = json.dumps(request_dict)
print(" [x] Send RPC coap_request({})".format(request_dict_json))
response_dict = await coap_rpc.call(request_dict_json)
print(" [.] Got {}".format(response_dict))
asyncio.get_event_loop().run_until_complete(rpc_client())
server.py:
"""
CoAP RPC server, based on aioamqp implementation of RPC examples from RabbitMQ tutorial
"""
import base64
import json
import sys
import logging
import warnings
import asyncio
import aioamqp
import aiocoap
amqp_protocol = None
coap_client_context = None
obs_tasks = []
AMQP_COAP_NOTIFICATIONS_EXCHANGE_NAME = 'topic_coap'
AMQP_COAP_NOTIFICATIONS_TOPIC_NAME = 'topic'
AMQP_COAP_NOTIFICATIONS_ROUTING_KEY = 'coap.response'
def create_response_dict(coap_request, coap_response):
response_dict = {'request_uri': "", 'code': 0}
response_dict['request_uri'] = coap_request.get_request_uri()
response_dict['code'] = coap_response.code
if len(coap_response.payload) > 0:
response_dict['payload'] = base64.b64encode(coap_response.payload).decode('utf-8')
return response_dict
async def handle_coap_response(amqp_envelope, amqp_properties, coap_request, coap_response):
# create response dict:
response_dict = create_response_dict(coap_request, coap_response)
message = json.dumps(response_dict)
# create new channel:
global amqp_protocol
amqp_channel = await amqp_protocol.channel()
await amqp_channel.basic_publish(
payload=message,
exchange_name='',
routing_key=amqp_properties.reply_to,
properties={
'correlation_id': amqp_properties.correlation_id,
},
)
await amqp_channel.basic_client_ack(delivery_tag=amqp_envelope.delivery_tag)
print(" [.] handle_coap_response() published response: {}".format(response_dict))
def incoming_observation(coap_request, coap_response):
asyncio.async(handle_coap_notification(coap_request, coap_response))
async def handle_coap_notification(coap_request, coap_response):
# create response dict:
response_dict = create_response_dict(coap_request, coap_response)
message = json.dumps(response_dict)
# create new channel:
global amqp_protocol
amqp_channel = await amqp_protocol.channel()
await amqp_channel.exchange(AMQP_COAP_NOTIFICATIONS_EXCHANGE_NAME, AMQP_COAP_NOTIFICATIONS_TOPIC_NAME)
await amqp_channel.publish(message, exchange_name=AMQP_COAP_NOTIFICATIONS_EXCHANGE_NAME, routing_key=AMQP_COAP_NOTIFICATIONS_ROUTING_KEY)
print(" [.] handle_coap_notification() published response: {}".format(response_dict))
async def send_coap_obs_request(amqp_envelope, amqp_properties, request_dict, coap_request):
observation_is_over = asyncio.Future()
try:
global coap_client_context
requester = coap_client_context.request(coap_request)
requester.observation.register_errback(observation_is_over.set_result)
requester.observation.register_callback(lambda data, coap_request=coap_request: incoming_observation(coap_request, data))
try:
print(" [..] Sending CoAP obs request: {}".format(request_dict))
coap_response = await requester.response
except socket.gaierror as e:
print("Name resolution error:", e, file=sys.stderr)
return
except OSError as e:
print("Error:", e, file=sys.stderr)
return
if coap_response.code.is_successful():
print(" [..] Received CoAP response: {}".format(coap_response))
await handle_coap_response(amqp_envelope, amqp_properties, coap_request, coap_response)
else:
print(coap_response.code, file=sys.stderr)
if coap_response.payload:
print(coap_response.payload.decode('utf-8'), file=sys.stderr)
sys.exit(1)
exit_reason = await observation_is_over
print("Observation is over: %r"%(exit_reason,), file=sys.stderr)
finally:
if not requester.response.done():
requester.response.cancel()
if not requester.observation.cancelled:
requester.observation.cancel()
async def on_rpc_request(amqp_channel, amqp_body, amqp_envelope, amqp_properties):
print(" [.] on_rpc_request(): received RPC request: {}".format(amqp_body))
request_dict = {} # hardcoded to vdna.be for SO example
aiocoap_code = aiocoap.GET
aiocoap_uri = "coap://vdna.be/obs"
aiocoap_payload = ""
# as we are ready to send the CoAP request, ack the client already indicating we have received the RPC request
await amqp_channel.basic_client_ack(delivery_tag=amqp_envelope.delivery_tag)
coap_request = aiocoap.Message(code=aiocoap_code, uri=aiocoap_uri, payload=aiocoap_payload)
coap_request.opt.observe = 0
task = asyncio.ensure_future(send_coap_obs_request(amqp_envelope, amqp_properties, request_dict, coap_request))
# we have to keep a hard ref to this task, otherwise the python garbage collector destroyes the task before it is completed. See https://bugs.python.org/issue21163
# this is apparent from the "Task was destroyed but it is pending" exception thrown after random (lengthy) time intervals, probably the time interval is related to when the gc is triggered
# await task # this does not seem to work, as it prevents new amqp operations from executing (e.g. amqp channels do not get created)
# we are actually not interested in waiting for the task anyway, so instead just keep a hard ref to the task in the obs_tasks list
obs_tasks.append(task) # TODO: when do we remove the task from the list?
async def amqp_connect():
try:
(transport, protocol) = await aioamqp.connect('localhost', 5672)
print(" [x] Connected to AMQP broker")
return (transport, protocol)
except aioamqp.AmqpClosedConnection as ex:
print("closed connections: {}".format(ex))
raise ex
async def main():
"""Open AMQP connection to broker, subscribe to coap_request_rpc_queue and setup aiocoap client context """
try:
global amqp_protocol
(amqp_transport, amqp_protocol) = await amqp_connect()
channel = await amqp_protocol.channel()
await channel.queue_declare(queue_name='coap_request_rpc_queue')
await channel.basic_qos(prefetch_count=10, prefetch_size=0, connection_global=False)
await channel.basic_consume(on_rpc_request, queue_name='coap_request_rpc_queue')
print(" [x] Awaiting CoAP request RPC requests")
except aioamqp.AmqpClosedConnection as ex:
print("amqp_connect: closed connections: {}".format(ex))
exit()
global coap_client_context
coap_client_context = await aiocoap.Context.create_client_context()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.set_debug(True)
asyncio.async(main())
loop.run_forever()
When a task is scheduled, it's _step callback is scheduled in the loop. That callback maintains a reference to the task through self. I have not checked the code, but I have high confidence that the loop maintains a reference to its callbacks. However, when a task awaits some awaitable or future, the _step callback is not scheduled. In that case, the task adds a done callback that retains a reference to the task, but the loop does not retain references to tasks waiting for futures.
So long as something retains a reference to the future that the task is waiting on, all is well. However, if nothing retains a hard reference to the future, then the future can get garbage collected, and when that happens the task can get garbage collected.
So, I'd look for things that your task calls where the future the task is waiting on might not be referenced.
In general the future needs to be referenced so someone can set its result eventually, so it is very likely a bug if you have unreferenced futures.
Related
I have multiple couroutines each of which waits for content in a queue to start processing.
The content for the queues is populated by channel subscribers whose job is only to receive messages a push an item in the appropriate queue.
After the data is consumed by one queue processor and new data is generated it's dispatched to the appropriate message channel where this process is repeated until the data is ready to be relayed to an api that provisions it.
import asyncio
from random import randint
from Models.ConsumerStrategies import Strategy
from Helpers.Log import Log
import Connectors.datastore as ds
import json
__name__ = "Consumer"
MIN = 1
MAX = 4
async def consume(configuration: dict, queue: str, processor: Strategy) -> None:
"""Consumes new items in queue and publish a message into the appropriate channel with the data generated for the next consumer,
if no new content is available sleep for a random number of seconds between MIN and MAX global variables
Args:
configuration (dict): configuration dictionary
queue (str): queue being consumed
processor (Strategy): consumer strategy
"""
logger = Log().get_logger(processor.__name__, configuration['logFolder'], configuration['logFormat'], configuration['USE'])
while True:
try:
ds_handle = await ds.get_datastore_handle(ds.get_uri(conf=configuration))
token = await ds_handle.lpop(queue)
if token is not None:
result = await processor.consume(json.loads(token), ds_handle)
status = await processor.relay(result, ds_handle)
logger.debug(status)
else:
wait_for = randint(MIN,MAX)
logger.debug(f'queue: {queue} empty waiting: {wait_for} before retry')
await asyncio.sleep(wait_for)
ds_handle.close()
except Exception as e:
logger.error(f"{e}")
logger.error(f"{e.with_traceback}")
What I'm noticing is that after a 24h run I'm getting these errors:
Task was destroyed but it is pending!
task: <Task pending name='Task-2' coro=<consume() running at Services/Consumer.py:26> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f86bc29cbe0>()]> cb=[_chain_future.<locals>._call_set_state() at asyncio/futures.py:391]>
Task was destroyed but it is pending!
task: <Task pending name='Task-426485' coro=<RedisConnection._read_data() done, defined at aioredis/connection.py:180> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f86bc29ccd0>()]> cb=[RedisConnection.__init__.<locals>.<lambda>() at aioredis/connection.py:168]>
Which I'm not sure on how to interpret, resolve or recover from, my assumption is that first I should probably switch to redis streams instead of using channels and queues.
But, going back to this scenarios I have channel subscribers on different processes while the consumer run in the same process as different tasks in the loop.
What I'm assuming is happening here is that since the consumer is basically polling a queue at some point the connection pool manager or redis itself eventually starts hanging up on the connection open of the consumer and it gets cancelled.
Cause I'm not seeing any further message from that queue processor, but I also see that wait_for_future which I'm uncertain it may come from the subscriber ensure_future on the message reader
import asyncio
from multiprocessing import process
from Helpers.Log import Log
import Services.Metas as metas
import Models.SubscriberStrategies as processor
import Connectors.datastore as ds_linker
import Models.Exceptions as Exceptions
async def subscriber(conf: dict, channel: str, processor: processor.Strategy) -> None:
"""Subscription handler. Receives the channel name, datastore connection and a parsing strategy.
Creates a task that listens on the channel and process every message and processing strategy for the specific message
Args:
conf (dict): configuration dictionary
channel (str): channel to subscribe to
ds (aioredis.connection): connection handler to datastore
processor (processor.Strategy): processor message handler
"""
async def reader(ch):
while await ch.wait_message():
msg = await ch.get_json()
await processor.handle_message(msg=msg)
ds_uri = ds_linker.get_uri(conf=conf)
ds = await ds_linker.get_datastore_handle(ds_uri)
pub = await ds.subscribe(channel)
ch = pub[0]
tsk = asyncio.ensure_future(reader(ch))
await tsk
I could use some help to sort this out and properly understand what's happening under the hood. thanks
Took a few days to solve just to reproduce the issue, I've found people with the same problem in the issues for the aioredis github repo.
So I had to go through all the connection open/close with redis to be sure added:
ds_handle.close()
await ds_handle.wait_closed()
I also proceeded to improve the exception management in the consumer:
while True:
try:
ds_handle = await ds.get_datastore_handle(ds.get_uri(conf=configuration))
token = await ds_handle.lpop(queue)
if token is not None:
result = await processor.consume(json.loads(token), ds_handle)
status = await processor.relay(result, ds_handle)
logger.debug(status)
else:
wait_for = randint(MIN,MAX)
logger.debug(f'queue: {queue} empty waiting: {wait_for} before retry')
await asyncio.sleep(wait_for)
except Exception as e:
logger.error(f"{e}")
logger.error(f"{traceback.print_exc()}")
finally:
ds_handle.close()
await ds_handle.wait_closed()
and the same for the producer:
try:
async def reader(ch):
while await ch.wait_message():
msg = await ch.get_json()
await processor.handle_message(msg=msg)
ds_uri = ds_linker.get_uri(conf=conf)
ds = await ds_linker.get_datastore_handle(ds_uri)
pub = await ds.subscribe(channel)
ch = pub[0]
tsk = asyncio.ensure_future(reader(ch))
await tsk
except Exception as e:
logger.debug(f'{e}')
logger.error(f'{traceback.format_exc()}')
finally:
ds.close()
await ds.wait_closed()
so there are never connections left open with redis that might end up killing one of the processor's coroutines as time goes by.
For me it solved the issue, since at the time I'm writing this it has been more than 2 weeks uptime with no more reported accidents of the same kind.
Anyway, there is also a new aioredis major release, it's really recent news (this was on 1.3.1 and 2.0.0 should work using the same model as redis-py, so things have changed as well by this time).
Binance API & python-binance offers async functionality for non-blocking execution as per discussed in Async basics for Binance.
I am using BinanceSocketManager listening (async non-blocking) to live data via websocket.
In scenarios like network intermittent connection lost, I wish to add an auto-reconnect feature to my project. But I can't seems to find any info with BinanceSocketManager. I was only able to find a guide which uses ThreadedWebsocketManager, but it was not an async implementation.
Does anyone know how to implement a Binance websocket disconnect detection and auto-reconnect mechanism?
Here is some code of what I have so far:
import asyncio
from binance import AsyncClient, BinanceSocketManager
async def main():
client = await AsyncClient.create()
await kline_listener(client)
async def kline_listener(client):
bm = BinanceSocketManager(client)
async with bm.kline_socket(symbol='BTCUSDT') as stream:
while True:
res = await stream.recv()
print(res)
# a way detect websocket error/disconnect, callback 'disconnect_callback'
async def disconnect_callback():
await client.close_connection()
await main() # restart client and kline socket
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
In case someone else is looking at this, for this, you should be looking at the BinanceAPIException. Code could look something like this then:
from binance import AsyncClient, BinanceSocketManager
from binance.exceptions import BinanceAPIException
async def main():
client = await AsyncClient.create()
bm = BinanceSocketManager(client, user_timeout=60)
# start any sockets here, i.e a trade socket
kline_candles = bm.kline_socket('BNBUSDT', interval=client.KLINE_INTERVAL_1MINUTE)
# start receiving messages
try:
status = await client.get_system_status()
print(status['msg'])
async with kline_candles as stream:
for _ in range(5):
res = await stream.recv() # create/await response
await process_message(msg=res, client=client) # process message
except BinanceAPIException as e:
print(e)
await disconnect_callback(client=client)
async def disconnect_callback(client):
await client.close_connection() # close connection
time.sleep(60) # wait a minute before restarting
await main() # restart client and kline socket
async def process_message(msg, client):
if msg['e'] == 'error':
await disconnect_callback(client=client)
print('ERROR OCCURED')
else:
candle = msg['k'] # get only the candle info within the general dict
start_time = datetime.utcfromtimestamp(candle['t']/1000).strftime('%Y-%m-%d %H:%M:%S')
close_time = datetime.utcfromtimestamp(candle['T']/1000).strftime('%Y-%m-%d %H:%M:%S')
print(f'__ start: {start_time}, close: {close_time}')
print(msg)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
The disconnect has not been tested yet, but I assume this will work. If anyone has any additional notes, just let me know.
I have tested above code and it proves rather stable.
Here are some improvements I have made.
I'm not sure what happens if your internet connection is completely gone when this line is executed:
client = await AsyncClient.create()
This could probably be solved like this (I'm open for better ideas):
while True:
try:
client = await AsyncClient.create()
except Exception as error_msg:
print(f"error: {error_msg}")
# should we add a sleep here?
# time.sleep(3)
else:
print("finally got through the loop")
break
Surrounding this with a try/except is a good idea:
bm = BinanceSocketManager(client, user_timeout=60)
The call to stream.recv() should be extended with asyncio.wait_for() to cover the situation, when there is no data coming in for a longer period of time. It usually means there's something wrong.
async with kline_candles as stream:
for _ in range(5):
try:
res = await asyncio.wait_for(stream.recv(), timeout=60) # create/await response
await process_message(msg=res, client=client) # process message
except (asyncio.TimeoutError, websockets.exceptions.ConnectionClosed, asyncio.exceptions.CancelledError, asyncio.exceptions.TimeoutError) as error_msg_1:
print(f"Error! in main loop 1:\n{error_msg_1}")
await disconnect_callback(client=client)
Quart is a Python web framework which re-implements the Flask API on top of the asyncio coroutine system of Python. In my particular case, I have a Quart websocket endpoint which is supposed to have not just one source of incoming events, but two possible sources of events which are supposed to continue the asynchronous loop.
An example with one event source:
from quart import Quart, websocket
app = Quart(__name__)
#app.websocket("/echo")
def echo():
while True:
incoming_message = await websocket.receive()
await websocket.send(incoming_message)
Taken from https://pgjones.gitlab.io/quart/
This example has one source: the incoming message stream. But what is the correct pattern if I had two possible sources, one being await websocket.receive() and another one being something along the lines of await system.get_next_external_notification() .
If either of them arrives, I'd like to send a websocket message.
I think I'll have to use asyncio.wait(..., return_when=FIRST_COMPLETED), but how do I make sure that I miss no data (i.e. for the race condition that websocket.receive() and system.get_next_external_notification() both finish almost exactly at the same time) ? What's the correct pattern in this case?
An idea you could use is a Queue to join the events together from different sources, then have an async function listening in the background to that queue for requests. Something like this might get you started:
import asyncio
from quart import Quart, websocket
app = Quart(__name__)
#app.before_serving
async def startup():
print(f'starting')
app.q = asyncio.Queue(1)
asyncio.ensure_future(listener(app.q))
async def listener(q):
while True:
returnq, msg = await q.get()
print(msg)
await returnq.put(f'hi: {msg}')
#app.route("/echo/<message>")
async def echo(message):
while True:
returnq = asyncio.Queue(1)
await app.q.put((returnq, message))
response = await returnq.get()
return response
#app.route("/echo2/<message>")
async def echo2(message):
while True:
returnq = asyncio.Queue(1)
await app.q.put((returnq, message))
response = await returnq.get()
return response
I'm trying to write some tests for some asynchronous Python code using the aioamqp message broker, but pytest and callbacks fail me.
Simply put, when the aioamqp basic_consume() function receives a message and calls the assigned asynchronous callback, inside the callback I can do whatever I like -- reference unassigned variables, assert something outrageous -- and pytest happily passes the test. Clearly an exception gets raised under the hood and the test is interrupted, since the callback function never runs further than the first failing line, but the failure never rises all the way to pytest.
Here's a code snippet to demonstrate:
import aioamqp
import asyncio
import pytest
MQ_HOST = '0.0.0.0'
MQ_PORT = 5672
MQ_LOGIN = 'login'
MQ_PASSWORD = 'password'
class MockMQ:
def __init__(self):
self.loop = asyncio.get_event_loop()
self.transport = None
self.protocol = None
async def connect(self):
try:
self.transport, self.protocol = await aioamqp.connect(
host=MQ_HOST, port=MQ_PORT, login=MQ_LOGIN, password=MQ_PASSWORD
)
self.channel = await self.protocol.channel()
except aioamqp.AmqpClosedConnection:
print('closed connection')
return
async def close(self):
await self.protocol.close()
self.transport.close()
async def publish(self, data, queue_name, exchange='', properties=None):
queue = await self.channel.queue_declare(queue_name)
await self.channel.publish(data, exchange, queue_name, properties=properties)
async def consume(self, callback, queue_name):
await self.channel.basic_consume(callback, queue_name=queue_name)
#pytest.mark.asyncio
async def test_mq():
"""Basic ping-pong test for RabbitMQ."""
QUEUE_NAME = 'my_queue'
#pytest.mark.asyncio
async def callback(channel, body, envelope, properties):
"""This is the callback called when a MQ message is consumed."""
print('we are here')
await channel.basic_client_ack(envelope.delivery_tag)
print(body) # this gets printed as well
foo = bar * 2 # this is where we fail
assert body == b'bar'
print('we never arrive here')
mq = MockMQ()
await mq.connect()
await mq.consume(callback, QUEUE_NAME)
await mq.publish(b'foo', QUEUE_NAME)
await asyncio.sleep(1.0)
await mq.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(test_mq())
Running this via the main program with IPython results correctly in an exception, since it doesn't get swallowed by pytest.
What is the proper way of writing tests for pytest in this case? pytest-asyncio does not seem to affect this issue in the least.
EDIT: I might as well add that my dev environment uses Django and pytest-django, but removing it doesn't change the result either.
I am trying to implement a simple web sockets server in Python by using this module. For learning purposes, the server should reply with a reversed version of what it received. For example, if the client sends "Hello Server", the server should respond with "revreS olleH". My code is based off the documentation here
Since an example of a consumer() and producer() function/coroutine wasn't provided in the documentation, I took a stab at creating them but think I am misunderstanding something not obvious to me. The code is currently returning the string 'nothing' instead of the reversed version of what the client sent.
FYI, since the machine I am using has Python 3.4.3, the code had to be adjusted to accommodate for that version. That's why you'll see newer code commented out, for now. Lots of documentation is included too as I learn this stuff.
Now, the codez...
index.py:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#########################
# Dependencies
#########################
# asyncio
# websockets
#########################
# Modules
#########################
import asyncio
import websockets
#########################
# Functions
#########################
# async indicates an asynchronous function.
# Calling them doesn't actually run them,
# but instead a coroutine object is returned,
# which can then be passed to the event loop to be executed later on.
# Python ≥ 3.5: async def producer(reply):
#asyncio.coroutine
def producer(reply=None):
"""Sends the reply to producer_handler."""
if reply is None:
return 'nothing'
else:
return reply
# Python ≥ 3.5: async def consumer(message):
#asyncio.coroutine
def consumer(message):
"""Reverses message then sends it to the producer."""
reply = message[::-1]
#await producer(reply)
yield from producer(reply)
# async def consumer_handler(websocket):
#asyncio.coroutine
def consumer_handler(websocket):
"""Handles incoming websocket messages."""
while True:
# await calls an asynchronous function.
#message = await websocket.recv()
message = yield from websocket.recv()
# Python ≥ 3.5: await consumer(message)
yield from consumer(message)
#async def producer_handler(websocket):
#asyncio.coroutine
def producer_handler(websocket):
"""Handles outgoing websocket messages."""
while True:
#message = await producer()
message = yield from producer()
#await websocket.send(message)
yield from websocket.send(message)
#async def handler(websocket, path):
#asyncio.coroutine
def handler(websocket, path):
"""Enables reading and writing messages on the same websocket connection."""
# A Future is an object that is supposed to have a result in the future.
# ensure_future:
# schedules the execution of a coroutine object,
# wraps it in a future, then returns a Task object.
# If the argument is a Future, it is returned directly.
# Python ≥ 3.5
#consumer_task = asyncio.ensure_future(consumer_handler(websocket))
#producer_task = asyncio.ensure_future(producer_handler(websocket))
consumer_task = asyncio.async(consumer_handler(websocket))
producer_task = asyncio.async(producer_handler(websocket))
# .wait:
# wait for the Futures and coroutine objects given
# by the sequence futures to complete. Coroutines will be
# wrapped in Tasks. Returns two sets of Future: (done, pending).
#done, pending = await asyncio.wait(
done, pending = yield from asyncio.wait(
# The futures.
[consumer_task, producer_task],
# FIRST_COMPLETED: the function will return when
# any future finishes or is cancelled.
return_when=asyncio.FIRST_COMPLETED,
)
for task in pending:
task.cancel()
#########################
# Start script
#########################
def main():
# Creates a WebSocket server.
start_server = websockets.serve(handler, '127.0.0.1', 8000)
# Get the event loop for the current context.
# Run until the Future is done.
asyncio.get_event_loop().run_until_complete(start_server)
# Run until stop() is called.
asyncio.get_event_loop().run_forever()
#########################
# Script entry point.
#########################
if __name__ == '__main__':
main()
index.html:
<!DOCTYPE html>
<html>
<head>
<title>WebSocket demo</title>
</head>
<body>
<script>
// Create the websocket.
var ws = new WebSocket("ws://127.0.0.1:8000/"),
messages = document.createElement('ul');
// Called when the websocket is opened.
ws.onopen = function(event) {
ws.send('Hello Server!');
};
// Called when a message is received from server.
ws.onmessage = function(event) {
var messages = document.getElementsByTagName('ul')[0],
message = document.createElement('li'),
content = document.createTextNode(event.data);
message.appendChild(content);
messages.appendChild(message);
};
document.body.appendChild(messages);
</script>
</body>
</html>
Not completely sure on this, but I think you misinterpreted the docs. The consumer shouldn't be calling the producer.
The "Hello Server!" the HTML file sends goes through consumer_handler to consumer to producer, but the yield from statements means that the reversed string ends up back in the consumer_handler, as the result of yield from consumer(message).
On the other hand, producer_handler calls producer many times without an argument (from message = yield from producer()), which is what creates the nothing that gets sent to the HTML file. It doesn't receive the consumer's string.
Instead, there should be a queue or something where the consumer pushes to and the producer takes from, like in this example.
Thanks.