I'm using the Autobahn asyncio system (to talk the Websocket WAMP protocol), which works fine and I can handle incoming RPC calls and pubsub.
My problem is I now have to connect TCP sockets and send information over these sockets as soon as an RPC call comes in through the Autobahn part.
The autobahn part works fine like this :
from autobahn.asyncio.component import Component, run
from asyncio import sleep
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
#comp.on_join
async def joined(session, details):
print("Connected to websocket")
def on_message(msg):
msg = json.loads(msg)
print(msg)
def some_rpc(with_data):
print("Doing something with the data")
return json.dumps({'status': 'OK'})
try:
session.subscribe(on_message, u'some_pubsub_topic')
session.register(some_rpc, u'some_rpc_call')
print("RPC and Pubsub initialized")
except Exception as e:
print("could not subscribe to topic: {0}".format(e))
if __name__ == "__main__":
run([comp])
However now I need to be able to connect to multiple regular TCP sockets :
class SocketClient(asyncio.Protocol):
def __init__(self, loop):
self.data = b''
self.loop = loop
def connection_made(self, transport):
self.transport = transport
print('connected')
def data_received(self, data):
print('Data received: {!r}'.format(data.decode()))
def send(self, data):
self.transport.write(data)
def connection_lost(self, exc):
print('The server closed the connection')
print('Stop the event loop')
self.loop.stop()
loop = asyncio.get_event_loop()
c=loop.create_connection(lambda: SocketClient(loop),
'192.168.0.219', 6773)
loop.run_until_complete(c)
loop.run_forever()
loop.close()
The problem is that, when I combine both and do this :
def some_rpc(with_data):
c.send('test')
return json.dumps({'status': 'OK'})
It barks at me and tells me :
StopIteration
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File
"/usr/lib/python3.5/site-packages/autobahn/wamp/websocket.py", line
95, in onMessage
self._session.onMessage(msg) File "/usr/lib/python3.5/site-packages/autobahn/wamp/protocol.py", line
894, in onMessage
on_reply = txaio.as_future(endpoint.fn, *invoke_args, **invoke_kwargs) File "/usr/lib/python3.5/site-packages/txaio/aio.py", line 400, in
as_future
return create_future_error(create_failure()) File "/usr/lib/python3.5/site-packages/txaio/aio.py", line 393, in
create_future_error
reject(f, error) File "/usr/lib/python3.5/site-packages/txaio/aio.py", line 462, in reject
future.set_exception(error.value) File "/usr/lib64/python3.5/asyncio/futures.py", line 365, in set_exception
raise TypeError("StopIteration interacts badly with generators " TypeError: StopIteration interacts badly with generators and cannot be
raised into a Future
Does anyone have any idea on how to call the send function from within the RPC call function ?
In this code:
c=loop.create_connection(lambda: SocketClient(loop),
'192.168.0.219', 6773)
# [...]
def some_rpc(with_data):
c.send('test')
return json.dumps({'status': 'OK'})
create_connection is a coroutine function, so c contains a coroutine object. Such object does have a send method, but one that is entirely unrelated to sending things over the network. After calling create_connection, you probably want to get the resulting transport with something like:
transport, ignore = loop.run_until_complete(c)
and then use transport.write(), not c.send().
Related
So here's the basic code (sorry it's long)
import argparse
import asyncio
from contextvars import ContextVar
import sys
# This thing is the offender
message_var = ContextVar("message")
class ServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info("peername")
print("Server: Connection from {}".format(peername))
self.transport = transport
def data_received(self, data):
message = data.decode()
print("Server: Data received: {!r}".format(message))
print("Server: Send: {!r}".format(message))
self.transport.write(data)
print("Server: Close the client socket")
self.transport.close()
class ClientProtocol(asyncio.Protocol):
def __init__(self, on_conn_lost):
self.on_conn_lost = on_conn_lost
self.transport = None
self.is_connected: bool = False
def connection_made(self, transport):
self.transport = transport
self.is_connected = True
def data_received(self, data):
# reading back supposed contextvar
message = message_var.get()
print(f"{message} : {data.decode()}")
def connection_lost(self, exc):
print("The server closed the connection")
self.is_connected = False
self.on_conn_lost.set_result(True)
def send(self, message: str):
# Setting context var
message_var.set(message)
if self.transport:
self.transport.write(message.encode())
def close(self):
self.transport.close()
self.is_connected = False
if not self.on_conn_lost.done():
self.on_conn_lost.set_result(True)
async def get_input(client: ClientProtocol):
loop = asyncio.get_running_loop()
while client.is_connected:
message = await loop.run_in_executor(None, input, ">>>")
if message == "q":
client.close()
return
client.send(message)
async def main(args):
host = "127.0.0.1"
port = 5001
loop = asyncio.get_running_loop()
if args.server:
server = await loop.create_server(lambda: ServerProtocol(), host, port)
async with server:
await server.serve_forever()
return
on_conn_lost = loop.create_future()
client = ClientProtocol(on_conn_lost)
await loop.create_connection(lambda: client, host, port)
await get_input(client)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--server", "-s", default=False, action="store_true", help="Start server"
)
arguments = parser.parse_args(sys.argv[1:])
asyncio.run(main(args=arguments))
This crashes with the following exception:
Exception in callback _ProactorReadPipeTransport._loop_reading(<_OverlappedF...shed result=4>)
handle: <Handle _ProactorReadPipeTransport._loop_reading(<_OverlappedF...shed result=4>)>
Traceback (most recent call last):
File "C:\Users\brent\AppData\Local\Programs\Python\Python310\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\brent\AppData\Local\Programs\Python\Python310\lib\asyncio\proactor_events.py", line 320, in _loop_reading
self._data_received(data, length)
File "C:\Users\brent\AppData\Local\Programs\Python\Python310\lib\asyncio\proactor_events.py", line 270, in _data_received
self._protocol.data_received(data)
File "E:\Development\Python\ibcs2023\_prep\experimental\asyncio_context.py", line 40, in data_received
message = message_var.get()
LookupError: <ContextVar name='message' at 0x0000023F30A54FE0>
The server closed the connection
Why does calling message = message_var.get() cause a crash? Why can't Python find the context var? Why is data_received not in the same context as send? How can I keep them in the same context?
I'm working on a larger project with the main branch of Textual and it uses a contextvar that loses context every time a message is received using a modified version of the code above.
Keeping a separated "context" for each task is exactly what contextvars are about. You could only assert that the send and data_received methods were called within the same context if you had control over the "uperlying" (as opposed to 'underlying') driver of your Protocol class - that is not the case, and both are called in different contexts. I mean, the answer to "How can I keep them in the same context?" is: you can't unless you write your own implementation of the code which makes this work inside asyncio.
There is no way you can keep track of metadata from a message, and retrieve this metadata on getting the reply, unless there is a marker on the message itself, that will survive the round-trip. That is: your networking/communication protocol itself have to spec a way to identify messages It might be as simple as a sequential integer number prefixing every string, for example - or, in this case where you simply echo the message back, it could be the message itself. Once you have that, a simple dictionary having these message IDs as keys, will work for what you seem to intend in this example.
I have a Python Tornado Websocket server that stores clients in a shared set() so that I know how many clients are connected.
The challenge is that calling on_close after WebSocketClosedError raises a KeyError and the client-instance is not removed from the set of connected clients. This error has caused my server to accumulate over 1000 clients even when the active clients are only around 5.
My Code:
import tornado.iostream
import tornado.websocket
import asyncio
class SocketHandler(tornado.websocket.WebSocketHandler):
socket_active_message = {"status": "Socket Connection Active"}
waiters = set()
def initialize(self):
self.client_name = "newly_connected"
def open(self):
print('connection opened')
# https://kite.com/python/docs/tornado.websocket.WebSocketHandler.set_nodelay
self.set_nodelay(True)
SocketHandler.waiters.add(self)
def on_close(self):
print("CLOSED!", self.client_name)
SocketHandler.waiters.remove(self)
def check_origin(self, origin):
# Override the origin check if needed
return True
async def send_updates(self, message):
print('starting socket service loop')
loop_counter = 0
while True:
try:
await self.write_message({'status': 82317581})
except tornado.websocket.WebSocketClosedError:
self.on_close()
except tornado.iostream.StreamClosedError:
self.on_close()
except Exception as e:
self.on_close()
print('Exception e:', self.client_name)
await asyncio.sleep(0.05)
async def on_message(self, message):
print("RECEIVED :", message)
self.client_name = message
await self.send_updates(message)
def run_server():
# Create tornado application and supply URL routes
webApp = tornado.web.Application(
[
(
r"/",
SocketHandler,
{},
),
]
)
application = tornado.httpserver.HTTPServer(webApp)
webApp.listen(3433)
# Start IO/Event loop
tornado.ioloop.IOLoop.instance().start()
run_server()
The Stack-trace:
Traceback (most recent call last):
File "/mnt/c/Users/EE/projects/new/venv/lib/python3.8/site-packages/tornado/web.py", line 1699, in _execute
result = await result
File "/mnt/c/Users/EE/projects/new/venv/lib/python3.8/site-packages/tornado/websocket.py", line 278, in get
await self.ws_connection.accept_connection(self)
File "/mnt/c/Users/EE/projects/new/venv/lib/python3.8/site-packages/tornado/websocket.py", line 881, in accept_connection
await self._accept_connection(handler)
File "/mnt/c/Users/EE/projects/new/venv/lib/python3.8/site-packages/tornado/websocket.py", line 964, in _accept_connection
await self._receive_frame_loop()
File "/mnt/c/Users/EE/projects/new/venv/lib/python3.8/site-packages/tornado/websocket.py", line 1118, in _receive_frame_loop
await self._receive_frame()
File "/mnt/c/Users/EE/projects/new/venv/lib/python3.8/site-packages/tornado/websocket.py", line 1209, in _receive_frame
await handled_future
File "/mnt/c/Users/EE/projects/new/venv/lib/python3.8/site-packages/tornado/ioloop.py", line 743, in _run_callback
ret = callback()
File "/mnt/c/Users/EE/projects/new/venv/lib/python3.8/site-packages/tornado/websocket.py", line 658, in <lambda>
self.stream.io_loop.add_future(result, lambda f: f.result())
File "ask_So.py", line 50, in on_message
await self.send_updates(message)
File "ask_So.py", line 39, in send_updates
self.on_close()
File "ask_So.py", line 26, in on_close
SocketHandler.waiters.remove(self)
KeyError: <__main__.SocketHandler object at 0x7ffef9f25520>
I have tried moving the waiters set outside the class but it still produces the same behaviour.
To simulate WebSocketClosedError: open many browser tabs as clients and close one browser tab at a time.
It seems like self.on_close() is being called twice. Once you're calling it manually from inside send_updates() and then later, when a connection is actually closed, Tornado is also calling self.on_close(). Since the self object was already removed from the set the first time, it raises a KeyError the second time.
If you want to close the connection, just call self.close(). The self.on_close() method will be called by Tornado automatically.
Also, you can handle the exception in a try...except block inside on_close.
Update
The previous part of this answer should fix the KeyError related problem. This update is regarding why the clients are not being removed from waiters set.
So, I tested your code and found a major problem with it here:
async def on_message(self, message):
print("RECEIVED :", message)
self.client_name = message
await self.send_updates(message) # <- This is problematic
Whenever a client sends a message, it will run self.send_updates method. So even if there's only one client that sends a message, let's say, 10 times, send_updates will also be called 10 times and, as a result, you will have 10 while loops running simultaneously!
As the number of loops increase, it ultimately blocks the server. That means Tornado has no time to run other code as it's busy juggling so many while loops. Hence, the clients from the waiters are never removed.
Solution
Instead of calling send_updates everytime a message arrives, you can call it just one time. Just have a single while loop to send updates to all clients.
I'd update the code like this:
class SocketHandler(...):
# Make it a classmethod so that it can be
# called without an instance
#classmethod
async def send_updates(cls):
print('starting socket service loop')
loop_counter = 0
while True:
for waiter in cls.waiters:
# use `waiter` instead of `self`
try:
await waiter.write_message({'status': 82317581})
...
await asyncio.sleep(0.05)
Instead of calling send_updates from on_message, you'll have to tel IOLoop to call it once:
def run_server():
...
# schedule SocketHandler.send_updates to be run
tornado.ioloop.IOLoop.current().add_callback(SocketHandler.send_updates)
tornado.ioloop.IOLoop.current().start()
This will have only one while loop running for all clients.
Given the following program:
1 import asyncio
2 async def run():
3 try:
4 server = await asyncio.start_server(on_connected, '127.0.0.1', 15500)
5
6 async with server:
7 await server.serve_forever()
8 except:
9 print("exception!")
10
11 async def on_connected(reader, writer):
12 while True:
13 data = await reader.readline()
14 print(1 / 0)
15
16 asyncio.run(run())
When I run it, and use nc to connect to it & send data, it will raise an exception in line 14. However I am unable to handle it. Instead, I will get an exception printed and the program will hang.
nc test:
$ nc localhost 15500
test
program output:
$ python3 serv
Task exception was never retrieved
future: <Task finished coro=<on_connected() done, defined at serv:14> exception=ZeroDivisionError('division by zero')>
Traceback (most recent call last):
File "serv", line 14, in on_connected
print(1 / 0)
ZeroDivisionError: division by zero
While I of course could just add a try/except around line 14, I want to implement a general exception handling which shall handle all errors which can ever occur inside of on_connected.
How can I do this?
Asyncio provides a method to set a general exception handler
Let's apply it to this example:
Let's just define our loop exception handler: handle_exception
And inside this method let's manage exceptions. Next code just customize logging messages.
import asyncio
import logging
log = logging.getLogger(__name__)
async def run():
loop = asyncio.get_running_loop()
loop.set_exception_handler(handle_exception)
server = await asyncio.start_server(on_connected, '127.0.0.1', 5062)
async with server:
await server.serve_forever()
async def on_connected(reader, writer):
while True:
data = await reader.readline()
raise print(1 / 0)
def handle_exception(loop, context):
# context["message"] will always be there; but context["exception"] may not
msg = context.get("exception", context["message"])
if name := context.get("future").get_coro().__name__ == "on_connected":
if type(msg) == ZeroDivisionError:
log.error(f"Caught ZeroDivisionError from on_connected: {msg}")
return
log.info(f"Caught another minor exception from on_connected: {msg}")
else:
log.error(f"Caught exception from {name}: {msg}")
Output on this case is the following:
Caught ZeroDivisionError from on_connected: division by zero
Reference that I have used to look for further information:
https://www.roguelynn.com/words/asyncio-exception-handling/
https://github.com/econchick/mayhem/blob/master/part-3/mayhem_2.py
Please, let me know if this answer your question.
When developing server code, bug causing an exception often occurs and then the server process just hangs and must be killed, which is super anoying.
I wanted it to exit cleanly as soon as one exception is raised in the protocol code. To do so, I created a handler class that closes the server and registered it as the exception handler. The call to await server.serve_forever() will throw CancelledError, so you may want to catch it to exit cleanly.
import asyncio
class ExcHandler:
def __init__(self, server):
self._server = server
def __call__(self, loop, context):
print("exception occured, closing server")
loop.default_exception_handler(context)
self._server.close()
class EchoServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
raise RuntimeError("oups")
async def main():
loop = asyncio.get_running_loop()
server = await loop.create_server(
lambda: EchoServerProtocol(),
'127.0.0.1', 8888)
loop.set_exception_handler(ExcHandler(server))
async with server:
try:
await server.serve_forever()
except asyncio.exceptions.CancelledError:
print("server cancelled")
asyncio.run(main())
Apparently, the server catching exceptions is by design (See Python issue #42526). The rationale behind that is the server keeps processing other requests, even when one of them crash. I guess this is suitable for production where the show must go on even in case of glitches.
I'm getting an Exception in a Tornado WebSocket server but It gives no information in the trace to know which line of code or which step in my program it is originating from. I would like to find out so that I try-catch the origin of the exception.
Error Trace: (No mention of any part of my files)
[E 200527 21:07:19 base_events:1608] Task exception was never retrieved
future: <Task finished coro=<WebSocketProtocol13.write_message.<locals>.wrapper() done, defined at /usr/local/lib/python3.7/site-packages/tornado/websocket.py:1102> exception=WebSocketClosedError()>
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/tornado/websocket.py", line 1104, in wrapper
await fut
tornado.iostream.StreamClosedError: Stream is closed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/tornado/websocket.py", line 1106, in wrapper
raise WebSocketClosedError()
tornado.websocket.WebSocketClosedError
[E 200527 21:07:19 base_events:1608] Task exception was never retrieved
That Same Group of Traceback repeats over 16 times.
Here is my code:
import tornado.iostream
import tornado.web
import tornado.gen
import tornado.websocket
import asyncio
class SocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
def initialize(self):
self.client_name = "newly_connected"
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
def open(self):
print('connection opened')
# SocketHandler.waiters.add(self)
def on_close(self):
print("CLOSED!", self.client_name)
try:
SocketHandler.waiters.remove(self)
except KeyError:
print('tried removing new client')
def check_origin(self, origin):
# Override the origin check if needed
return True
#classmethod
async def send_updates(cls, message):
if len(cls.waiters) < 2:
while True:
chat = {}
# Prevent RuntimeError: Set changed size during iteration
waiters_copy = cls.waiters.copy()
for waiter in waiters_copy:
try:
await waiter.write_message(chat)
except tornado.websocket.WebSocketClosedError:
pass
except tornado.iostream.StreamClosedError:
pass
except Exception as e:
print('Exception e:', waiter.client_name)
pass
# sleep a bit
await asyncio.sleep(0.05)
else:
print('broadcast loop already running')
async def on_message(self, message):
print("RECEIVED :", message)
self.client_name = message
self.first_serve_cache_on_connnect()
SocketHandler.waiters.add(self)
await SocketHandler.send_updates(message)
def first_serve_cache_on_connnect(self):
print('serving cache on connect')
temp_calc_results = self.namespace.results
try:
self.write_message(temp_calc_results)
except Exception as e:
pass
I have tried catching the exceptions that may cause any error while sending messages to the websocket clients but this error still happens when clients connect to the server.
The message "task exception was never retrieved" is not about a missing try/except block, it's about a missing await. In first_serve_cache_on_connect, you call write_message without await, so first_serve_cache_on_connect is no longer running by the time the exception is raised and it has nowhere to go but the logs.
This is basically harmless, but if you want to clean up your logs, you need to make first_serve_cache_on_connect an async def coroutine, call it with await in on_message, and call write_message with await.
I'm trying to close the transport right after sending the UDP packet and I'm getting an Exception in callback _SelectorDatagramTransport._read_ready()
import asyncio
class MyProtocol:
def __init__(self, message, loop):
self.message = message
self.loop = loop
self.transport = None
def connection_made(self, transport):
self.transport = transport
print("Send:", self.message)
self.transport.sendto(self.message.encode())
self.transport.close() # <----------
def error_received(self, exc):
print('Error received', exc)
def connection_lost(self, exc):
print("Socket closed, stop the event loop")
self.loop.stop()
loop = asyncio.get_event_loop()
message = "hello"
connect = loop.create_datagram_endpoint(lambda: MyProtocol(message, loop), remote_addr=('127.0.0.1', 2222))
transport, protocol = loop.run_until_complete(connect)
loop.run_forever()
The full stack trace that I get is while running the snippet above in CPython 3.5.1 is:
Socket closed, stop the event loop
Exception in callback _SelectorDatagramTransport._read_ready()
handle: <Handle _SelectorDatagramTransport._read_ready()>
Traceback (most recent call last):
File "/home/ecerulm/.pyenv/versions/3.5.1/lib/python3.5/asyncio/selector_events.py", line 1002, in _read_ready
data, addr = self._sock.recvfrom(self.max_size)
AttributeError: 'NoneType' object has no attribute 'recvfrom'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ecerulm/.pyenv/versions/3.5.1/lib/python3.5/asyncio/events.py", line 125, in _run
self._callback(*self._args)
File "/home/ecerulm/.pyenv/versions/3.5.1/lib/python3.5/asyncio/selector_events.py", line 1008, in _read_ready
self._fatal_error(exc, 'Fatal read error on datagram transport')
File "/home/ecerulm/.pyenv/versions/3.5.1/lib/python3.5/asyncio/selector_events.py", line 587, in _fatal_error
self._loop.call_exception_handler({
AttributeError: 'NoneType' object has no attribute 'call_exception_handler'
I believe the exception is only generated if the UDP packet is actively refused, with an ICMP Destination Unreachable (which I'm not interested in).
So the question is what is the right way of doing this. I'm not interested in this connection anymore after sending so I want to get rid of the transport as soon as possible. The documentation for DatagramTransport.sendto() just says that the methods doesn't block. But how do I know when the sending is completed? (And by complete I mean when is handed over to the OS, not delivered to the remote).
Is there any other asyncio coroutine to send an UDP packet asynchronously and simple await (maybe even skipping the whole create_datagram_endpoint) ?
Is there any other asyncio coroutine to send an UDP packet asynchronously and simple await?
I would, base on DatagramTransport source, wrap it in Future to be yieldable/awaitable. It will raise exception on error and return True on success. The example PoC code:
import asyncio
import socket
class UDPClient():
def __init__(self, host, port, loop=None):
self._loop = asyncio.get_event_loop() if loop is None else loop
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.setblocking(False)
self._addr = (host, port)
self._future = None
self._data = None
def sendto(self, data):
self._future = asyncio.Future(loop=self._loop)
self.data = data if isinstance(data, bytes) else str(data).encode('utf-8')
loop.add_writer(self._sock.fileno(), self._sendto)
return self._future
def _sendto(self):
try:
self._sock.sendto(self.data, self._addr)
except (BlockingIOError, InterruptedError):
return
except OSError as exc:
self.abort(exc)
except Exception as exc:
self.abort(exc)
else:
self.close()
self._future.set_result(True)
def abort(self, exc):
self.close()
self._future.set_exception(exc)
def close(self):
self._loop.remove_writer(self._sock.fileno())
self._sock.close()
Than simple example would look like:
#asyncio.coroutine
def test():
yield from UDPClient('127.0.0.1', 1234).sendto('ok')
# or 3.5+ syntax
# async def test():
# await UDPClient('127.0.0.1', 1234).sendto('ok')
loop = asyncio.get_event_loop()
loop.run_until_complete(test())