I am trying to add two coroutines to asyncio loop and getting an error:
RuntimeError: This event loop is already running
My objective is to communicate to a server (that I have no control of). This server expects an initial connection from the client. The server then provided a port to the client on this connection. The client has to use this port to create a second connection. This second connection is used by the server to send unsolicited messages to the client. The first connection remains up throughout for other two-way communications.
To recreate this scenario, I have some code that reproduces the error:
class Connection():
def __init__(self, ip, port, ioloop):
self.ip = ip
self.port = port
self.ioloop = ioloop
self.reader, self.writer = None, None
self.protocol = None
self.fileno = None
async def __aenter__(self):
# Applicable when doing 'with Connection(...'
log.info("Entering and Creating Connection")
self.reader, self.writer = (
await asyncio.open_connection(self.ip, self.port, loop=self.ioloop)
)
self.protocol = self.writer.transport.get_protocol()
self.fileno = self.writer.transport.get_extra_info('socket').fileno()
log.info(f"Created connection {self}")
return self
async def __aexit__(self, *args):
# Applicable when doing 'with Connection(...'
log.info(f"Exiting and Destroying Connection {self}")
if self.writer:
self.writer.close()
def __await__(self):
# Applicable when doing 'await Connection(...'
return self.__aenter__().__await__()
def __repr__(self):
return f"[Connection {self.ip}:{self.port}, {self.protocol}, fd={self.fileno}]"
async def send_recv_message(self, message):
log.debug(f"send: '{message}'")
self.writer.write(message.encode())
await self.writer.drain()
log.debug("awaiting data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv: '{data}'")
return data
class ServerConnection(Connection):
async def setup_connection(self):
event_port = 8889 # Assume this came from the server
print("In setup connection")
event_connection = await EventConnection('127.0.0.1', event_port, self.ioloop)
self.ioloop.run_until_complete(event_connection.recv_message())
class EventConnection(Connection):
async def recv_message(self):
log.debug("awaiting recv-only data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv only: '{data}'")
return data
async def main(loop):
client1 = await ServerConnection('127.0.0.1', 8888, loop)
await client1.setup_connection()
await client1.send_recv_message("Hello1")
await client1.send_recv_message("Hello2")
await asyncio.sleep(5)
if __name__ == '__main__':
#logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
ioloop = asyncio.get_event_loop()
print('starting loop')
ioloop.run_until_complete(main(ioloop))
print('completed loop')
ioloop.close()
The error occurs in ServerConnection.setup_connection() method where run_until_complete is being called.
I am probably doing something wrong due to lack of understanding asyncio. Basically, how do I setup a secondary connection which will get event notifications (unsolicited) while setting up the first connection?
Thanks.
Followup
Since the code is very similar (a few changes to add more functionality to it), I hope it's not bad etiquette to followup to the original post as the resulting error is still the same.
The new issue is that when it receives the unsolicited message (which is received by EventConnection), the recv_message calls process_data method. I would like to make process_data be a future so that recv_message completes (ioloop should stop). The ensure_future would then pick it up and continue running again to use ServerConnection to do a request/response to the server. Before it does that though, it has to go to some user code (represented by external_command() and from whom I would prefer to hide the async stuff). This would make it synchronous again. Hence, once they've done what they need to, they should call execute_command on ServerConnection, which then kicks off the loop again.
The problem is, my expectation for using ensure_future didn't pan out as it seems the loop didn't stop from running. Hence, when the code execution reaches execute_command which does the run_until_complete, an exception with the error "This event loop is already running" occurs.
I have two questions:
How can I make it so that the ioloop can stop after process_data is
placed into ensure_future, and subsequently be able to run it again
in execute_command?
Once recv_message has received something, how can we make it so that
it can receive more unsolicited data? Is it enough/safe to just use
ensure_future to call itself again?
Here's the example code that simulates this issue.
client1 = None
class ServerConnection(Connection):
connection_type = 'Server Connection'
async def setup_connection(self):
event_port = 8889 # Assume this came from the server
print("In setup connection")
event_connection = await EventConnection('127.0.0.1', event_port, self.ioloop)
asyncio.ensure_future(event_connection.recv_message())
async def _execute_command(self, data):
return await self.send_recv_message(data)
def execute_command(self, data):
response_str = self.ioloop.run_until_complete(self._execute_command(data))
print(f"exec cmd response_str: {response_str}")
def external_command(self, data):
self.execute_command(data)
class EventConnection(Connection):
connection_type = 'Event Connection'
async def recv_message(self):
global client1
log.debug("awaiting recv-only data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv-only: '{data}'")
asyncio.ensure_future(self.process_data(data))
asyncio.ensure_future(self.recv_message())
async def process_data(self, data):
global client1
await client1.external_command(data)
async def main(ioloop):
global client1
client1 = await ServerConnection('127.0.0.1', 8888, ioloop)
await client1.setup_connection()
print(f"after connection setup loop running is {ioloop.is_running()}")
await client1.send_recv_message("Hello1")
print(f"after Hello1 loop running is {ioloop.is_running()}")
await client1.send_recv_message("Hello2")
print(f"after Hello2 loop running is {ioloop.is_running()}")
while True:
print(f"inside while loop running is {ioloop.is_running()}")
t = 10
print(f"asyncio sleep {t} sec")
await asyncio.sleep(t)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
ioloop = asyncio.get_event_loop()
print('starting loop')
ioloop.run_until_complete(main(ioloop))
print('completed loop')
ioloop.close()
Try replacing:
self.ioloop.run_until_complete
With
await
Related
I'm trying to understand how to use asyncio streams for multiple connections that will keep sending messages until a predefined condition or a socket timeout. Looking at Python docs, they provide the following example for a TCP server based on asyncio streams:
import asyncio
async def handle_echo(reader, writer):
data = await reader.read(100)
message = data.decode()
addr = writer.get_extra_info('peername')
print(f"Received {message!r} from {addr!r}")
print(f"Send: {message!r}")
writer.write(data)
await writer.drain()
print("Close the connection")
writer.close()
async def main():
server = await asyncio.start_server(
handle_echo, '127.0.0.1', 8888)
addrs = ', '.join(str(sock.getsockname()) for sock in server.sockets)
print(f'Serving on {addrs}')
async with server:
await server.serve_forever()
asyncio.run(main())
What I'm trying to do is more complex and it looks more like so (a lot of it is pseudocode, written in capital letters or with implementation omitted):
import asyncio
async def io_control(queue):
while true:
...
# do I/O control in this function ...
async def data_processing(queue):
while true:
...
# perform data handling
async def handle_data(reader, writer):
data = await reader.read()
message = data.decode()
addr = writer.get_extra_info('peername')
print(f"Received {message!r} from {addr!r}")
#do stuff with a queue - pass messages to other two async functions as needed
#keep open until something happens
if(ERROR or SOCKET_TIMEOUT):
writer.close()
async def server(queue):
server = await asyncio.start_server(
handle_data, '127.0.0.1', 8888)
addrs = ', '.join(str(sock.getsockname()) for sock in server.sockets)
print(f'Serving on {addrs}')
async with server:
await server.serve_forever()
async def main():
queue_io = asyncio.Queue()
queue_data = asyncio.Queue()
asyncio.run(server(queue_data))
asyncio.run(data_handling(queue_data))
asyncio.run(io_control(queue_io))
asyncio.run(main())
Does this look feasible? I'm not used to working with co-routines (I'm coming from more of a multi-threading paradigm), so I'm not sure if what I'm doing is right or if I have to explicitly include yields or do any extra stuff.
If I understand correctly, you just need the TCP server to be able to handle multiple concurrent connections. The start_server function should already give you everything you need.
The first parameter client_connected_cb is a coroutine function called whenever a client establishes a connection. If you introduce a loop into that function (in your example code handle_data), you can keep the connection open until some criterion is met. What conditions exactly should lead to closing the connection is up to you, and the implementation details will obviously depend on that. The simplest approach I can imagine is something like this:
import asyncio
import logging
log = logging.getLogger(__name__)
async def handle_data(reader, writer):
while True:
data = (await reader.readline()).decode().strip()
if not data:
log.debug("client disconnected")
break
response = await your_data_processing_function(data)
writer.write(response.encode())
await writer.drain()
...
async def main():
server = await asyncio.start_server(handle_data, '127.0.0.1', 8888)
async with server:
await server.serve_forever()
if __name__ == '__main__':
asyncio.run(main())
There is theoretically no limit for the number of concurrent connections.
If your client_connected_cb is a coroutine function, each new connection will schedule a new task for the event loop. That is where the concurrency comes from. The magic then happens at the point of awaiting new data from the client; that is where the event loop can switch execution to another coroutine. All this happens behind the scenes, so to speak.
If you want to introduce a timeout, you could wrap the awaitable readline coroutine in a wait_for for example and then catch the TimeoutError exiting the loop.
Hope this helps.
I have a python socket server using asyncio and websockets. When the websocket is active 100+ devices will connect and hold their connection waiting for commands/messages.
There are two threads the first thread accepts connections and adds their details to a global variable then waits for messages from the device:
async def thread1(websocket, path):
client_address = await websocket.recv()
CONNECTIONS[client_address] = websocket
async for message in websocket:
... do something with message
start_server = websockets.serve(thread1, host, port)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.ensure_future(thread2())
asyncio.get_event_loop().run_forever()
The second thread processes some user data and once it needs to send a command it accesses a global variable to get the websocket info:
thread2()
...some data processing
soc = CONNECTIONS[ipaddress]
await soc.send("some message")
My question: What's the best way to allow another thread to send messages?
I can keep the global variable safe using thread locking and a function made only to process that data, however global variables aren't ideal. I cannot send information between threads since thread1 is stuck waiting to receive messages.
The first thing I would like to say is the incorrect use of the term thread. You use asyncio and here the concept is used - coroutine (coroutine is wrapped into a asyncio task). How it differs from threads can be found, for example, here.
The websockets server spawns a new task for each incoming connection (there are the same number of connections and spawned tasks). I don't see anything wrong with the global object, at least in a small script. However, below I gave an example where I placed this in a separate class.
Also, in this case, special synchronization between coroutines is not required, since they are implemented through cooperative multitasking (in fact, all are executed in one thread, transferring control at certain points.)
Here is a simple example in which the server stores a dictionary of incoming connections and starts a task that every 2 seconds, notifies all clients and sends them the current time. The server also prints confirmation from clients to the console.
# ws_server.py
import asyncio
import websockets
import datetime
class Server:
def __init__(self, host, port):
self.host = host
self.port = port
self.connections = {}
self.is_active = False
self.server = None
async def start(self):
self.is_active = True
self.server = await websockets.serve(self.handler, self.host, self.port)
asyncio.create_task(self.periodic_notifier())
async def stop(self):
self.is_active = False
self.server.close()
await self.wait_closed()
async def wait_closed(self):
await self.server.wait_closed()
async def handler(self, websocket, path):
self.connections[websocket.remote_address] = websocket
try:
async for message in websocket:
print(message)
except ConnectionClosedError as e:
pass
del self.connections[websocket.remote_address]
print(f"Connection {websocket.remote_address} is closed")
async def periodic_notifier(self):
while self.is_active:
await asyncio.gather(
*[ws.send(f"Hello time {datetime.datetime.now()}") for ws in self.connections.values()],
return_exceptions=True)
await asyncio.sleep(2)
async def main():
server = Server("localhost", 8080)
await server.start()
await server.wait_closed()
asyncio.run(main())
# ws_client.py
import asyncio
import websockets
async def client():
uri = "ws://localhost:8080"
async with websockets.connect(uri) as websocket:
async for message in websocket:
print(message)
await websocket.send(f"ACK {message}")
asyncio.run(client())
I have a publisher/subscriber architecture running on my websocket server, where the publisher runs in one thread, and the websocket server in another. I connect to the server from the publisher over localhost, and the server distributes the published messages to any other connected clients on the /sub path. However, since the publisher thread not always has new data to publish, it has a tendency to disconnect after a timeout of 50 sec. To fix this, I implemented a heartbeat ping function:
async def ping(websocket):
while True:
await asyncio.sleep(30)
print("[%s] Pinging server..." % datetime.now())
await websocket.send('ping')
This keeps the publisher from disconnecting. However, when I'm trying to run this concurrently with the coroutine that sends the actual data, I cannot get both ping() and send_data() to run in parallel. I've tried just awaiting both functions as well as asyncio.gather() (which according to documentation is supposed to run tasks concurrently) as well as flipping the order, but it seems like in all cases only the first function call is ran.
My thread class for reference:
class Publisher(threading.Thread):
"""
Thread acting as the websocket publisher
Pulls data from the data merger queue and publishes onto the websocket server
"""
def __init__(self, loop, q, addr, port):
threading.Thread.__init__(self)
self.loop = loop
self.queue = q
self.id = threading.get_ident()
self.addr = addr
self.port = port
self.name = 'publisher'
print("Publisher thread started (ID:%s)" % self.id)
def run(self):
self.loop.run_until_complete(self.publish())
asyncio.get_event_loop().run_forever()
async def ping(self, websocket):
while True:
await asyncio.sleep(30)
print("[%s] Pinging server..." % datetime.now())
await websocket.send('ping')
async def send_data(self, websocket):
while True:
try:
msg = json.dumps(self.queue.get()) # Get the data from the queue
print(msg)
await asyncio.sleep(0.1)
if not msg:
print("No message")
break
await websocket.send(msg)
except websockets.exceptions.ConnectionClosedError:
print("Connection closed")
break
async def publish(self):
uri = 'ws://' + str(self.addr) + ':' + str(self.port) + '/pub'
async with websockets.connect(uri) as websocket:
await asyncio.gather(
self.ping(websocket),
self.send_data(websocket)
)
I'm currently struggling with something "simple".
I'd like to have a python WebSocket Server, which is capable of closing down by outside events (e.g. a Ctrl+C from the command line).
Here is my code so far:
PORT = 8765
class Server(object):
def __init__(self):
self.online_players = dict()
self.online_players_lock = asyncio.Lock()
self.websocket_server = None
async def add_online_player(self, id, player):
async with self.online_players_lock:
self.online_players[id] = player
async def remove_online_player(self, id):
async with self.online_players_lock:
if id in self.online_players.keys():
del self.online_players[id]
def start(self):
end = False
loop = asyncio.new_event_loop()
thread = threading.Thread(target=listen, args=(loop, self))
thread.start()
while not end:
try:
time.sleep(500)
except KeyboardInterrupt:
end = True
loop.call_soon_threadsafe(stop_listening, loop, server)
async def on_connect(websocket, path, server):
print("New user...")
id = await websocket.recv()
player = WebSocketPlayer(id, websocket, server)
await server.add_online_player(id, player)
# from this point on WebSocketPlayer class handles communication
await player.listen()
def listen(loop, server:Server):
asyncio.set_event_loop(loop)
bound_handler = functools.partial(on_connect, server=server)
start_server_task = websockets.serve(bound_handler, "localhost", PORT, ping_timeout=None, loop=loop)
start_server = loop.run_until_complete(start_server_task)
server.websocket_server = start_server
print("Server running ...")
loop.run_forever()
async def stop_listening(loop, server:Server):
await server.websocket_server.wait_close()
loop.stop()
loop.close()
if __name__ == "__main__":
server = Server()
server.start()
Signal handlers from asyncio like loop.add_signal_handler(signum, callback, *args) are not an option for me, because they only work on Unix.
The error that I currently get is that the stop_listening method was never awaited, which kind of makes sense to me. So I am not that much interested in fixing my code example, but more in general how is it possible to achieve my goal, or how is it usually solved?
Thank you very much in advance
Nevermind, this question is related to this question: Why does the asyncio's event loop suppress the KeyboardInterrupt on Windows? which is actually bug of asyncio on Windows.
I'm currently playing with aiohttp to see how it will perform as a server application for mobile app with websocket connection.
Here is simple "Hello world" example (as gist here):
import asyncio
import aiohttp
from aiohttp import web
class WebsocketEchoHandler:
#asyncio.coroutine
def __call__(self, request):
ws = web.WebSocketResponse()
ws.start(request)
print('Connection opened')
try:
while True:
msg = yield from ws.receive()
ws.send_str(msg.data + '/answer')
except:
pass
finally:
print('Connection closed')
return ws
if __name__ == "__main__":
app = aiohttp.web.Application()
app.router.add_route('GET', '/ws', WebsocketEchoHandler())
loop = asyncio.get_event_loop()
handler = app.make_handler()
f = loop.create_server(
handler,
'127.0.0.1',
8080,
)
srv = loop.run_until_complete(f)
print("Server started at {sock[0]}:{sock[1]}".format(
sock=srv.sockets[0].getsockname()
))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(handler.finish_connections(1.0))
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(app.finish())
loop.close()
The problem
Now I would like to use structure described below (node server = python aiohttp). To be more specific, use Redis Pub/Sub mechanism with asyncio-redis to read and write both to websocket connection and Redis in my WebsocketEchoHandler.
WebsocketEchoHandler is a dead simple loop so I'm not sure how should this be done. Using Tornado and brükva I would just use callbacks.
Extra (offtopic perhaps) question
Since I'm using Redis already, which of two approaches should I take:
Like in "classic" web app, have a controller/view for everything, use Redis just for messaging etc.
Web app should be just a layer between client and Redis used also as task queue (simplest Python RQ). Every request should be delegated to workers.
EDIT
Image from http://goldfirestudios.com/blog/136/Horizontally-Scaling-Node.js-and-WebSockets-with-Redis
EDIT 2
It seems that I need to clarify.
Websocket-only handler is shown above
Redis Pub/Sub handler might look like that:
class WebsocketEchoHandler:
#asyncio.coroutine
def __call__(self, request):
ws = web.WebSocketResponse()
ws.start(request)
connection = yield from asyncio_redis.Connection.create(host='127.0.0.1', port=6379)
subscriber = yield from connection.start_subscribe()
yield from subscriber.subscribe(['ch1', 'ch2'])
print('Connection opened')
try:
while True:
msg = yield from subscriber.next_published()
ws.send_str(msg.value + '/answer')
except:
pass
finally:
print('Connection closed')
return ws
This handler just subscribes to Redis channel ch1 and ch2 and sends every received message from those channels to websocket.
I want to have this handler:
class WebsocketEchoHandler:
#asyncio.coroutine
def __call__(self, request):
ws = web.WebSocketResponse()
ws.start(request)
connection = yield from asyncio_redis.Connection.create(host='127.0.0.1', port=6379)
subscriber = yield from connection.start_subscribe()
yield from subscriber.subscribe(['ch1', 'ch2'])
print('Connection opened')
try:
while True:
# If message recived from redis OR from websocket
msg_ws = yield from ws.receive()
msg_redis = yield from subscriber.next_published()
if msg_ws:
# push to redis / do something else
self.on_msg_from_ws(msg_ws)
if msg_redis:
self.on_msg_from_redis(msg_redis)
except:
pass
finally:
print('Connection closed')
return ws
But following code is always called sequentially so reading from websocket blocks reading from Redis:
msg_ws = yield from ws.receive()
msg_redis = yield from subscriber.next_published()
I want reading to be done on event where event is message received from one of two sources.
You should use two while loops - one that handles messages from the websocket, and one that handles messages from redis. Your main handler can just kick off two coroutines, one handling each loop, and then wait on both of them:
class WebsocketEchoHandler:
#asyncio.coroutine
def __call__(self, request):
ws = web.WebSocketResponse()
ws.start(request)
connection = yield from asyncio_redis.Connection.create(host='127.0.0.1', port=6379)
subscriber = yield from connection.start_subscribe()
yield from subscriber.subscribe(['ch1', 'ch2'])
print('Connection opened')
try:
# Kick off both coroutines in parallel, and then block
# until both are completed.
yield from asyncio.gather(self.handle_ws(ws), self.handle_redis(subscriber))
except Exception as e: # Don't do except: pass
import traceback
traceback.print_exc()
finally:
print('Connection closed')
return ws
#asyncio.coroutine
def handle_ws(self, ws):
while True:
msg_ws = yield from ws.receive()
if msg_ws:
self.on_msg_from_ws(msg_ws)
#asyncio.coroutine
def handle_redis(self, subscriber):
while True:
msg_redis = yield from subscriber.next_published()
if msg_redis:
self.on_msg_from_redis(msg_redis)
This way you can read from any of the two potential sources without having to care about the other.
recent we can use async await in python 3.5 and above..
async def task1(ws):
async for msg in ws:
if msg.type == WSMsgType.TEXT:
data = msg.data
print(data)
if data:
await ws.send_str('pong')
## ch is a redis channel
async def task2(ch):
async for msg in ch1.iter(encoding="utf-8", decoder=json.loads):
print("receving", msg)
user_token = msg['token']
if user_token in r_cons.keys():
_ws = r_cons[user_token]
await _ws.send_json(msg)
coroutines = list()
coroutines.append(task1(ws))
coroutines.append(task2(ch1))
await asyncio.gather(*coroutines)
this is what I do.when the websockets need to wait message from mutli source.
main point here is using asyncio.gather to run two corotine together like
#dano mentioned.