I'm currently struggling with something "simple".
I'd like to have a python WebSocket Server, which is capable of closing down by outside events (e.g. a Ctrl+C from the command line).
Here is my code so far:
PORT = 8765
class Server(object):
def __init__(self):
self.online_players = dict()
self.online_players_lock = asyncio.Lock()
self.websocket_server = None
async def add_online_player(self, id, player):
async with self.online_players_lock:
self.online_players[id] = player
async def remove_online_player(self, id):
async with self.online_players_lock:
if id in self.online_players.keys():
del self.online_players[id]
def start(self):
end = False
loop = asyncio.new_event_loop()
thread = threading.Thread(target=listen, args=(loop, self))
thread.start()
while not end:
try:
time.sleep(500)
except KeyboardInterrupt:
end = True
loop.call_soon_threadsafe(stop_listening, loop, server)
async def on_connect(websocket, path, server):
print("New user...")
id = await websocket.recv()
player = WebSocketPlayer(id, websocket, server)
await server.add_online_player(id, player)
# from this point on WebSocketPlayer class handles communication
await player.listen()
def listen(loop, server:Server):
asyncio.set_event_loop(loop)
bound_handler = functools.partial(on_connect, server=server)
start_server_task = websockets.serve(bound_handler, "localhost", PORT, ping_timeout=None, loop=loop)
start_server = loop.run_until_complete(start_server_task)
server.websocket_server = start_server
print("Server running ...")
loop.run_forever()
async def stop_listening(loop, server:Server):
await server.websocket_server.wait_close()
loop.stop()
loop.close()
if __name__ == "__main__":
server = Server()
server.start()
Signal handlers from asyncio like loop.add_signal_handler(signum, callback, *args) are not an option for me, because they only work on Unix.
The error that I currently get is that the stop_listening method was never awaited, which kind of makes sense to me. So I am not that much interested in fixing my code example, but more in general how is it possible to achieve my goal, or how is it usually solved?
Thank you very much in advance
Nevermind, this question is related to this question: Why does the asyncio's event loop suppress the KeyboardInterrupt on Windows? which is actually bug of asyncio on Windows.
Related
My design is probably wrong here but I'll ask anyway. Currently, I am using websockets to create a websocket server. I am performing a lot of other work in my program and would like the websocket server to be in its own thread. I create a class that subclasses threading.Thread and overrides the run method. This class's stop method is where I close the websocket server, and stop & close the event loop that I created.
class MyWsServer(threading.Thread):
def __init__(self, address, port):
threading.Thread.__init__(self)
self.port = port
self.address = address
self.server = None
self.running = False
self.loop = None
def start_ws_server(self):
self.start()
def run(self):
if not self.loop:
self.loop = asyncio.new_event_loop()
ws_server = websockets.serve(self.ws_handler, self.address, self.port,
ping_timeout=None, ping_interval=None, loop=self.loop)
self.running = True
self.server = ws_server
self.loop.run_until_complete(self.server)
self.loop.run_forever()
def stop_ws_server(self):
self.running = False
self.server.ws_server.close()
self.loop.stop()
self.loop.close()
async def ws_handler(self, websocket, path):
while self.running:
print(self.running)
# simulate work
print("doing some work")
sleep(5)
print("Sending data")
data = json.dumps({"test": "test test"})
try:
await websocket.send(data)
result = await websocket.recv()
print(result)
result = json.loads(result)
print(f"json: {result}")
except websockets.ConnectionClosed:
print(f"Terminated")
break
print("out of ws_handler")
This class instance is accessible elsewhere in the program and when calling the stop() method, I get the following error regarding self.loop.close(). Nothing further up in the stack trace is worth noting.
File "C:\Users\User\AppData\Local\Programs\Python\Python39\lib\asyncio\proactor_events.py", line 674, in close
raise RuntimeError("Cannot close a running event loop")
RuntimeError: Cannot close a running event loop
So my questions are:
Why doesn't self.loop.stop() stop the event loop?
Is there a better solution for stopping the event loop and the endgoal, stopping the thread?
Also just to note, I don't think setting self.running to False in stop_ws_server() does anything since this thread should be stuck on result = await websocket.recv() the majority of the time. I don't seem to have a clean way to exit the websocket handler. The last three lines in stop_ws_server() seem rather bruteforce-ish to me.
I've also seen some post on Stack Overflow and regarding asyncio objects and them not being threadsafe as well as plenty of posts recommending some sort of usage for asyncio's loop.call_soon_threadsafe(). I'm not sure I how I can use that with my current setup though.
Well, I greatly appreciate any help. Thanks. Please let me know if things could need more clarification.
Well it seems quite hackish to me but I took guidance from another solution here on Stack Overflow. Here is the answer of use, https://stackoverflow.com/a/67767248/5879710. The new code is as follows. I removed some lines from the previous code as well such as now unused variables and print statements.
class MyWsServer(Process):
def __init__(self, address, port):
super().__init__()
self.port = port
self.address = address
def run(self):
loop = asyncio.new_event_loop()
ws_server = websockets.serve(self.ws_handler, self.address, self.port,
ping_timeout=None, ping_interval=None, loop=loop)
loop.run_until_complete(ws_server)
loop.run_forever()
async def ws_handler(self, websocket, path):
while True:
# simulate work
print("doing some work")
sleep(5)
print("Sending data")
data = json.dumps({"test": "test test"})
try:
await websocket.send(data)
result = await websocket.recv()
print(result)
result = json.loads(result)
print(f"json: {result}")
except websockets.ConnectionClosed:
print(f"Terminated")
break
To start the process, I call start() on the WsListener class instance that I create elsewhere in the code and I call terminate() to kill the process. I would have liked a more graceful way to kill the event loop and other created resources in the process but I haven't found a better way yet.
Edit: Forgot to mention that I am using Multiprocessing now
I have a python socket server using asyncio and websockets. When the websocket is active 100+ devices will connect and hold their connection waiting for commands/messages.
There are two threads the first thread accepts connections and adds their details to a global variable then waits for messages from the device:
async def thread1(websocket, path):
client_address = await websocket.recv()
CONNECTIONS[client_address] = websocket
async for message in websocket:
... do something with message
start_server = websockets.serve(thread1, host, port)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.ensure_future(thread2())
asyncio.get_event_loop().run_forever()
The second thread processes some user data and once it needs to send a command it accesses a global variable to get the websocket info:
thread2()
...some data processing
soc = CONNECTIONS[ipaddress]
await soc.send("some message")
My question: What's the best way to allow another thread to send messages?
I can keep the global variable safe using thread locking and a function made only to process that data, however global variables aren't ideal. I cannot send information between threads since thread1 is stuck waiting to receive messages.
The first thing I would like to say is the incorrect use of the term thread. You use asyncio and here the concept is used - coroutine (coroutine is wrapped into a asyncio task). How it differs from threads can be found, for example, here.
The websockets server spawns a new task for each incoming connection (there are the same number of connections and spawned tasks). I don't see anything wrong with the global object, at least in a small script. However, below I gave an example where I placed this in a separate class.
Also, in this case, special synchronization between coroutines is not required, since they are implemented through cooperative multitasking (in fact, all are executed in one thread, transferring control at certain points.)
Here is a simple example in which the server stores a dictionary of incoming connections and starts a task that every 2 seconds, notifies all clients and sends them the current time. The server also prints confirmation from clients to the console.
# ws_server.py
import asyncio
import websockets
import datetime
class Server:
def __init__(self, host, port):
self.host = host
self.port = port
self.connections = {}
self.is_active = False
self.server = None
async def start(self):
self.is_active = True
self.server = await websockets.serve(self.handler, self.host, self.port)
asyncio.create_task(self.periodic_notifier())
async def stop(self):
self.is_active = False
self.server.close()
await self.wait_closed()
async def wait_closed(self):
await self.server.wait_closed()
async def handler(self, websocket, path):
self.connections[websocket.remote_address] = websocket
try:
async for message in websocket:
print(message)
except ConnectionClosedError as e:
pass
del self.connections[websocket.remote_address]
print(f"Connection {websocket.remote_address} is closed")
async def periodic_notifier(self):
while self.is_active:
await asyncio.gather(
*[ws.send(f"Hello time {datetime.datetime.now()}") for ws in self.connections.values()],
return_exceptions=True)
await asyncio.sleep(2)
async def main():
server = Server("localhost", 8080)
await server.start()
await server.wait_closed()
asyncio.run(main())
# ws_client.py
import asyncio
import websockets
async def client():
uri = "ws://localhost:8080"
async with websockets.connect(uri) as websocket:
async for message in websocket:
print(message)
await websocket.send(f"ACK {message}")
asyncio.run(client())
I have a bog-standard synchronous python program that needs to be able to read data from websockets and update the GUI with the data. However, asyncio creep is constantly tripping me up.
How do I make a module that:
accepts multiple subscriptions to multiple sources
sends an update to the requester whenever there's data
opens exactly one websocket connection per URL
resets the websocket if it closes
Here's what I have already, but it's failing at many points:
run_forever() means that the loop gets stuck before the subscription completes and then handle() is stuck in the falsey while loop
it does not seem to want to restart sockets when they're down because a websockets object does not have a connected property (websocket without an s does, but I'm not clear on the differences and can't find info online either)
I'm absolutely not sure if my approach is remotely correct.
Been fighting with this for weeks. Would appreciate some pointers.
class WSClient():
subscriptions = set()
connections = {}
started = False
def __init__(self):
self.loop = asyncio.get_event_loop()
def start(self):
self.started = True
self.loop.run_until_complete(self.handle())
self.loop.run_until_forever() # problematic, because it does not allow new subscribe() events
async def handle(self):
while len(self.connections) > 0:
# listen to every websocket
futures = [self.listen(self.connections[url]) for url in self.connections]
done, pending = await asyncio.wait(futures)
# the following is apparently necessary to avoid warnings
# about non-retrieved exceptions etc
try:
data, ws = done.pop().result()
except Exception as e:
print("OTHER EXCEPTION", e)
for task in pending:
task.cancel()
async def listen(self, ws):
try:
async for data in ws:
data = json.loads(data)
# call the subscriber (listener) back when there's data
[s.listener._handle_result(data) for s in self.subscriptions if s.ws == ws]
except Exception as e:
print('ERROR LISTENING; RESTARTING SOCKET', e)
await asyncio.sleep(2)
self.restart_socket(ws)
def subscribe(self, subscription):
task = self.loop.create_task(self._subscribe(subscription))
asyncio.gather(task)
if not self.started:
self.start()
async def _subscribe(self, subscription):
try:
ws = self.connections.get(subscription.url, await websockets.connect(subscription.url))
await ws.send(json.dumps(subscription.sub_msg))
subscription.ws = ws
self.connections[subscription.url] = ws
self.subscriptions.add(subscription)
except Exception as e:
print("ERROR SUBSCRIBING; RETRYING", e)
await asyncio.sleep(2)
self.subscribe(subscription)
def restart_socket(self, ws):
for s in self.subscriptions:
if s.ws == ws and not s.ws.connected:
print(s)
del self.connections[s.url]
self.subscribe(s)
I have a bog-standard synchronous python program that needs to be able to read data from websockets and update the GUI with the data. However, asyncio creep is constantly tripping me up.
As you mentioned GUI, then it is probably not a "bog-standard synchronous python program". Usually a GUI program has a non-blocking event-driven main thread, which allows concurrent user behaviors and callbacks. That is very much similar to asyncio, and it is usually a common way for asyncio to work together with GUIs to use GUI-specific event loop to replace default event loop in asyncio, so that your asyncio coroutines just run in GUI event loop and you can avoid calling run_forever() blocking everything.
An alternative way is to run asyncio event loop in a separate thread, so that your program could at the same time wait for websocket data and wait for user clicks. I've rewritten your code as follows:
import asyncio
import threading
import websockets
import json
class WSClient(threading.Thread):
def __init__(self):
super().__init__()
self._loop = None
self._tasks = {}
self._stop_event = None
def run(self):
self._loop = asyncio.new_event_loop()
self._stop_event = asyncio.Event(loop=self._loop)
try:
self._loop.run_until_complete(self._stop_event.wait())
self._loop.run_until_complete(self._clean())
finally:
self._loop.close()
def stop(self):
self._loop.call_soon_threadsafe(self._stop_event.set)
def subscribe(self, url, sub_msg, callback):
def _subscribe():
if url not in self._tasks:
task = self._loop.create_task(
self._listen(url, sub_msg, callback))
self._tasks[url] = task
self._loop.call_soon_threadsafe(_subscribe)
def unsubscribe(self, url):
def _unsubscribe():
task = self._tasks.pop(url, None)
if task is not None:
task.cancel()
self._loop.call_soon_threadsafe(_unsubscribe)
async def _listen(self, url, sub_msg, callback):
try:
while not self._stop_event.is_set():
try:
ws = await websockets.connect(url, loop=self._loop)
await ws.send(json.dumps(sub_msg))
async for data in ws:
data = json.loads(data)
# NOTE: please make sure that `callback` won't block,
# and it is allowed to update GUI from threads.
# If not, you'll need to find a way to call it from
# main/GUI thread (similar to `call_soon_threadsafe`)
callback(data)
except Exception as e:
print('ERROR; RESTARTING SOCKET IN 2 SECONDS', e)
await asyncio.sleep(2, loop=self._loop)
finally:
self._tasks.pop(url, None)
async def _clean(self):
for task in self._tasks.values():
task.cancel()
await asyncio.gather(*self._tasks.values(), loop=self._loop)
You can try tornado and autobahn-twisted for websockets.
I am trying to add two coroutines to asyncio loop and getting an error:
RuntimeError: This event loop is already running
My objective is to communicate to a server (that I have no control of). This server expects an initial connection from the client. The server then provided a port to the client on this connection. The client has to use this port to create a second connection. This second connection is used by the server to send unsolicited messages to the client. The first connection remains up throughout for other two-way communications.
To recreate this scenario, I have some code that reproduces the error:
class Connection():
def __init__(self, ip, port, ioloop):
self.ip = ip
self.port = port
self.ioloop = ioloop
self.reader, self.writer = None, None
self.protocol = None
self.fileno = None
async def __aenter__(self):
# Applicable when doing 'with Connection(...'
log.info("Entering and Creating Connection")
self.reader, self.writer = (
await asyncio.open_connection(self.ip, self.port, loop=self.ioloop)
)
self.protocol = self.writer.transport.get_protocol()
self.fileno = self.writer.transport.get_extra_info('socket').fileno()
log.info(f"Created connection {self}")
return self
async def __aexit__(self, *args):
# Applicable when doing 'with Connection(...'
log.info(f"Exiting and Destroying Connection {self}")
if self.writer:
self.writer.close()
def __await__(self):
# Applicable when doing 'await Connection(...'
return self.__aenter__().__await__()
def __repr__(self):
return f"[Connection {self.ip}:{self.port}, {self.protocol}, fd={self.fileno}]"
async def send_recv_message(self, message):
log.debug(f"send: '{message}'")
self.writer.write(message.encode())
await self.writer.drain()
log.debug("awaiting data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv: '{data}'")
return data
class ServerConnection(Connection):
async def setup_connection(self):
event_port = 8889 # Assume this came from the server
print("In setup connection")
event_connection = await EventConnection('127.0.0.1', event_port, self.ioloop)
self.ioloop.run_until_complete(event_connection.recv_message())
class EventConnection(Connection):
async def recv_message(self):
log.debug("awaiting recv-only data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv only: '{data}'")
return data
async def main(loop):
client1 = await ServerConnection('127.0.0.1', 8888, loop)
await client1.setup_connection()
await client1.send_recv_message("Hello1")
await client1.send_recv_message("Hello2")
await asyncio.sleep(5)
if __name__ == '__main__':
#logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
ioloop = asyncio.get_event_loop()
print('starting loop')
ioloop.run_until_complete(main(ioloop))
print('completed loop')
ioloop.close()
The error occurs in ServerConnection.setup_connection() method where run_until_complete is being called.
I am probably doing something wrong due to lack of understanding asyncio. Basically, how do I setup a secondary connection which will get event notifications (unsolicited) while setting up the first connection?
Thanks.
Followup
Since the code is very similar (a few changes to add more functionality to it), I hope it's not bad etiquette to followup to the original post as the resulting error is still the same.
The new issue is that when it receives the unsolicited message (which is received by EventConnection), the recv_message calls process_data method. I would like to make process_data be a future so that recv_message completes (ioloop should stop). The ensure_future would then pick it up and continue running again to use ServerConnection to do a request/response to the server. Before it does that though, it has to go to some user code (represented by external_command() and from whom I would prefer to hide the async stuff). This would make it synchronous again. Hence, once they've done what they need to, they should call execute_command on ServerConnection, which then kicks off the loop again.
The problem is, my expectation for using ensure_future didn't pan out as it seems the loop didn't stop from running. Hence, when the code execution reaches execute_command which does the run_until_complete, an exception with the error "This event loop is already running" occurs.
I have two questions:
How can I make it so that the ioloop can stop after process_data is
placed into ensure_future, and subsequently be able to run it again
in execute_command?
Once recv_message has received something, how can we make it so that
it can receive more unsolicited data? Is it enough/safe to just use
ensure_future to call itself again?
Here's the example code that simulates this issue.
client1 = None
class ServerConnection(Connection):
connection_type = 'Server Connection'
async def setup_connection(self):
event_port = 8889 # Assume this came from the server
print("In setup connection")
event_connection = await EventConnection('127.0.0.1', event_port, self.ioloop)
asyncio.ensure_future(event_connection.recv_message())
async def _execute_command(self, data):
return await self.send_recv_message(data)
def execute_command(self, data):
response_str = self.ioloop.run_until_complete(self._execute_command(data))
print(f"exec cmd response_str: {response_str}")
def external_command(self, data):
self.execute_command(data)
class EventConnection(Connection):
connection_type = 'Event Connection'
async def recv_message(self):
global client1
log.debug("awaiting recv-only data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv-only: '{data}'")
asyncio.ensure_future(self.process_data(data))
asyncio.ensure_future(self.recv_message())
async def process_data(self, data):
global client1
await client1.external_command(data)
async def main(ioloop):
global client1
client1 = await ServerConnection('127.0.0.1', 8888, ioloop)
await client1.setup_connection()
print(f"after connection setup loop running is {ioloop.is_running()}")
await client1.send_recv_message("Hello1")
print(f"after Hello1 loop running is {ioloop.is_running()}")
await client1.send_recv_message("Hello2")
print(f"after Hello2 loop running is {ioloop.is_running()}")
while True:
print(f"inside while loop running is {ioloop.is_running()}")
t = 10
print(f"asyncio sleep {t} sec")
await asyncio.sleep(t)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
ioloop = asyncio.get_event_loop()
print('starting loop')
ioloop.run_until_complete(main(ioloop))
print('completed loop')
ioloop.close()
Try replacing:
self.ioloop.run_until_complete
With
await
I have a script running where the main thread takes input from stdin and then passes it to a child thread using a queue. In the child thread I'm using asyncio coroutines to spin up a listener on a socket and wait for connections. Once a connection is made I can now send data through the listener from the main thread.
It all seems to work well enough, but since asyncio.BaseEventLoop is not thread safe am I going to run into problems?
This is my attempt to solve the problem of using a blocking library like python's cmd module with asyncio.
My code is below.
import sys
import asyncio
from time import sleep
from threading import Thread
from queue import Queue
stdin_q = Queue()
clients = {} # task -> (reader, writer)
def client_connected_handler(client_reader, client_writer):
# Start a new asyncio.Task to handle this specific client connection
task = asyncio.Task(handle_client(client_reader, client_writer))
clients[task] = (client_reader, client_writer)
def client_done(task):
# When the tasks that handles the specific client connection is done
del clients[task]
# Add the client_done callback to be run when the future becomes done
task.add_done_callback(client_done)
#asyncio.coroutine
def handle_client(client_reader, client_writer):
# Handle the requests for a specific client with a line oriented protocol
while True:
cmd = yield from get_input()
client_writer.write(cmd.encode())
data = yield from client_reader.read(1024)
print(data.decode(),end="",flush=True)
#asyncio.coroutine
def get_input():
while True:
try:
return stdin_q.get()
except:
pass
class Control:
def start(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = asyncio.get_event_loop()
server = self.loop.run_until_complete(asyncio.start_server(client_connected_handler, '0.0.0.0', 2222))
self.loop.run_forever()
self.stop()
def stop(self):
self.loop.stop()
self.loop.close()
def fire_control():
con = Control()
con.start()
if __name__ == "__main__":
stdin_q.put("\n")
t = Thread(target=fire_control)
t.start()
sleep(2)
_cmd = ""
while _cmd.lower() != "exit":
_cmd = input("")
if _cmd == "":
_cmd = "\r\n"
stdin_q.put(_cmd)
This isn't going to work quite right, because the call to stdin_q.get() is going to block your event loop. This means that if your server has multiple clients, all of them will be completely blocked by whichever one happens to get to stdin_q.get() first, until you send data into the queue. The simplest way to get around this is use BaseEvent.loop.run_in_executor to run the stdin_q.get in a background ThreadPoolExecutor, which allows you to wait for it without blocking the event loop:
#asyncio.coroutine
def get_input():
loop = asyncio.get_event_loop()
return (yield from loop.run_in_executor(None, stdin_q.get)) # None == use default executor.
Edit (1/27/16):
There is a library called janus, which provides an asyncio-friendly, thread-safe queue implementation.
Using that library, your code would look like this (I left out unchanged parts):
...
import janus
loop = asyncio.new_event_loop()
stdin_q = janus.Queue(loop=loop)
...
#asyncio.coroutine
def get_input():
loop = asyncio.get_event_loop()
return (yield from stdin_q.async_q.get())
class Control:
def start(self):
asyncio.set_event_loop(loop)
self.loop = asyncio.get_event_loop()
server = self.loop.run_until_complete(asyncio.start_server(client_connected_handler, '0.0.0.0', 2222))
self.loop.run_forever()
self.stop()
def stop(self):
self.loop.stop()
self.loop.close()
...
if __name__ == "__main__":
stdin_q.sync_q.put("\n")
t = Thread(target=runner)
t.start()
sleep(2)
_cmd = ""
while _cmd.lower() != "exit":
_cmd = input("")
if _cmd == "":
_cmd = "\r\n"
stdin_q.sync_q.put(_cmd)