I'm looking to have many agents connect to different sockets of my local server so they can independently read and send to the server. So far I have this:
import json
import asyncio
class Agent():
def __init__(self, user, pw):
self.user = user
self.pw = pw
# Store host and port
self.host, self.port = "localhost", 12300
self.connect()
async def connect(self):
# Create asynchronous socket reader and writer
self.reader, self.writer = await asyncio.open_connection(self.host,
self.port)
request = <some_json>
# Create and send authentication request
self.writer.write(request)
await self.writer.drain()
response = await self.reader.read(1000)
response = json.loads(response.decode().split("\0")[0])
if response["content"]["result"] == "ok":
print("Connection Succesful")
else:
print("Connection Failed")
await self.listen()
async def receive_msg(self):
"""
Waits for a message from the server and returns it.
"""
msg = await self.reader.readuntil(b"\0")
msg = msg.decode().split("\0")[0]
# In case a message is received, parse it into a dictionary.
if len(msg) > 1:
return json.loads(msg)
else:
print("Retry receiving message...")
return self.receive_msg()
async def listen(self):
"""
Listens for output from server and writes if anything is received
"""
while True:
msg = await self.receive_msg()
print("Message received", self.user)
a_list = []
loop = asyncio.get_event_loop()
for i in range(15):
a_list.append(Agent(f"agentA{i}", "1").connect())
loop.run_until_complete(asyncio.gather(*a_list))
Because of the way asyncio works I think this is the only way to run this asynchrously. But I would like to be able to make the __init__ run asynchrously somehow instead of having to throw the connect function into the loop if that is possible. What I essentially would like to do is this:
a_list = []
loop = asyncio.get_event_loop()
for i in range(15):
a_list.append(Agent(f"agentA{i}", "1"))
loop.run_until_complete(asyncio.gather(*a_list))
I think that that makes more sense, but I can't figure out how to do it. Am I thinking wrongly, or is there a better way to do this?
You can't make __init__ async, but you can make Agent instances awaitable. To do so, define the __await__ magic method:
class Agent:
def __init__(self, user, pw):
self.user = user
self.pw = pw
self.host, self.port = "localhost", 12300
def __await__(self):
yield from self.connect().__await__()
This has the best of both worlds: your __init__ function remains sync, and yet Agent instances are valid arguments to functions like asyncio.gather().
Related
So here's the basic code (sorry it's long)
import argparse
import asyncio
from contextvars import ContextVar
import sys
# This thing is the offender
message_var = ContextVar("message")
class ServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info("peername")
print("Server: Connection from {}".format(peername))
self.transport = transport
def data_received(self, data):
message = data.decode()
print("Server: Data received: {!r}".format(message))
print("Server: Send: {!r}".format(message))
self.transport.write(data)
print("Server: Close the client socket")
self.transport.close()
class ClientProtocol(asyncio.Protocol):
def __init__(self, on_conn_lost):
self.on_conn_lost = on_conn_lost
self.transport = None
self.is_connected: bool = False
def connection_made(self, transport):
self.transport = transport
self.is_connected = True
def data_received(self, data):
# reading back supposed contextvar
message = message_var.get()
print(f"{message} : {data.decode()}")
def connection_lost(self, exc):
print("The server closed the connection")
self.is_connected = False
self.on_conn_lost.set_result(True)
def send(self, message: str):
# Setting context var
message_var.set(message)
if self.transport:
self.transport.write(message.encode())
def close(self):
self.transport.close()
self.is_connected = False
if not self.on_conn_lost.done():
self.on_conn_lost.set_result(True)
async def get_input(client: ClientProtocol):
loop = asyncio.get_running_loop()
while client.is_connected:
message = await loop.run_in_executor(None, input, ">>>")
if message == "q":
client.close()
return
client.send(message)
async def main(args):
host = "127.0.0.1"
port = 5001
loop = asyncio.get_running_loop()
if args.server:
server = await loop.create_server(lambda: ServerProtocol(), host, port)
async with server:
await server.serve_forever()
return
on_conn_lost = loop.create_future()
client = ClientProtocol(on_conn_lost)
await loop.create_connection(lambda: client, host, port)
await get_input(client)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--server", "-s", default=False, action="store_true", help="Start server"
)
arguments = parser.parse_args(sys.argv[1:])
asyncio.run(main(args=arguments))
This crashes with the following exception:
Exception in callback _ProactorReadPipeTransport._loop_reading(<_OverlappedF...shed result=4>)
handle: <Handle _ProactorReadPipeTransport._loop_reading(<_OverlappedF...shed result=4>)>
Traceback (most recent call last):
File "C:\Users\brent\AppData\Local\Programs\Python\Python310\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\brent\AppData\Local\Programs\Python\Python310\lib\asyncio\proactor_events.py", line 320, in _loop_reading
self._data_received(data, length)
File "C:\Users\brent\AppData\Local\Programs\Python\Python310\lib\asyncio\proactor_events.py", line 270, in _data_received
self._protocol.data_received(data)
File "E:\Development\Python\ibcs2023\_prep\experimental\asyncio_context.py", line 40, in data_received
message = message_var.get()
LookupError: <ContextVar name='message' at 0x0000023F30A54FE0>
The server closed the connection
Why does calling message = message_var.get() cause a crash? Why can't Python find the context var? Why is data_received not in the same context as send? How can I keep them in the same context?
I'm working on a larger project with the main branch of Textual and it uses a contextvar that loses context every time a message is received using a modified version of the code above.
Keeping a separated "context" for each task is exactly what contextvars are about. You could only assert that the send and data_received methods were called within the same context if you had control over the "uperlying" (as opposed to 'underlying') driver of your Protocol class - that is not the case, and both are called in different contexts. I mean, the answer to "How can I keep them in the same context?" is: you can't unless you write your own implementation of the code which makes this work inside asyncio.
There is no way you can keep track of metadata from a message, and retrieve this metadata on getting the reply, unless there is a marker on the message itself, that will survive the round-trip. That is: your networking/communication protocol itself have to spec a way to identify messages It might be as simple as a sequential integer number prefixing every string, for example - or, in this case where you simply echo the message back, it could be the message itself. Once you have that, a simple dictionary having these message IDs as keys, will work for what you seem to intend in this example.
I am trying to make a socket server that's able to have multiple clients connected using the asyncio sockets and is able to easily switch between which client it communicates while still having all the clients connected. I thought there would be some type of FD of the clients like there is in sockets, but I looked through the docs and did not find anything, or I missed it.
Here is my server code:
import socket
import asyncio
host = "localhost"
port = 9998
list_of_auths = ['desktop-llpeu0p\\tomiss', 'desktop-llpeu0p\\tomisss',
'desktop-llpeu0p\\tomissss', 'desktop-llpeu0p\\tomisssss']
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('socket initiated.')
confirmed = 'CONFIRMED'
deny = 'denied'
#(so i dont forget) to get recv in async do: var = (await reader.read(4096)).decode('utf-8') if -1 then it will read all
#(so i dont forget) to write sendall do: writer.write(var.encode('utf-8')) should be used with await writer.drain()
async def handle_client(reader, writer):
idrecv = (await reader.read(255)).decode('utf-8')
if idrecv in list_of_auths:
writer.write(confirmed.encode('utf-8'))
else:
writer.write(deny.encode('utf-8'))
writer.close()
request = None
while request != 'quit':
print("second checkpoint")
writer.close()
async def run_server():
print("first checkpoint")
server = await asyncio.start_server(handle_client, host, port)
async with server:
await server.serve_forever()
asyncio.run(run_server())
This code allows multiple clients to connect at once; However, it only lets me communicate with the last one that connected.
I would suggest to implement it like so:
class SocketHandler(asyncio.Protocol):
def __init__(self):
asyncio.Protocol.__init__(self)
self.transport = None
self.peername = None
# your other code
def connection_made(self, transport):
""" incoming connection """
global ALL_CONNECTIONS
self.transport = transport
self.peername = transport.get_extra_info('peername')
ALL_CONNECTIONS.append(self)
# your other code
def connection_lost(self, exception):
self.close()
# your other code
def data_received(self, data):
# your code handling incoming data
def close(self):
try:
self.transport.close()
except AttributeError:
pass
# global list to store all connections
ALL_CONNECTIONS = []
def send_to_all(message):
""" sending a message to all connected clients """
global ALL_CONNECTIONS
for sh in ALL_CONNECTIONS:
# here you can also check sh.peername to know which client it is
if sh.transport is not None:
sh.transport.write(message)
port = 5060
loop = asyncio.get_event_loop()
coro = loop.create_server(SocketHandler, '', port)
server = loop.run_until_complete(coro)
loop.run_forever()
This way, each connection to the server is represented by an instance of SocketHandler. Whenever you process some data inside this instance, you know which client connection it is.
I'm trying to experiment a bit with python asyncio to improve in that area, for self teaching purposes I'm trying to connect to redis, send some commands and read the response, this can fall under generic "read a stream of data from some source". The problem I cannot solve is how to read data in chunks, since the connection is not being closed between server and client and the termination sequence \r\n could be met more than once. If I await when there is no more data of course the call will block until something else will be received.
class Client:
def __init__(self, loop, host='127.0.0.1', port=6379):
self.host = host
self.port = port
self.reader = None
self.writer = None
self.loop = loop
#asyncio.coroutine
def _connect(self):
self.reader, self.writer = yield from asyncio.open_connection(
self.host, self.port, loop=self.loop)
async def read(self, b=4096):
resp = b''
while True:
chunk = await self.reader.read(b)
if chunk:
resp += chunk
else:
break
return resp
Let's pretend I want to read the response in chunks of 2 bytes (yes is stupid but it's just for this learning purpose) so:
loop = asyncio.get_event_loop()
client = Client(loop)
..... sends some commands here ....
resp = await client.read(2)
I cannot figure out how while not knowing the length of the server response the code can still be safe when the response is longer than the bytes read from the socket.
I encountered a similar problem recently. My solution was to continue reading until a given character (or set of characters) is read. This is the same philosophy behind people saying "over" on walkie talkies when they are done talking. It is easier to just wait for the response to say that it is done talking.
While I haven't worked with the asyncio module before, I believe that the following code should solve your problem, assuming that the source of the input ends the response with whatever character (or string of characters) is indicated in the variable end_signal.
class Client:
def __init__(self, loop, host='127.0.0.1', port=6379):
self.host = host
self.port = port
self.reader = None
self.writer = None
self.loop = loop
#asyncio.coroutine
def _connect(self):
self.reader, self.writer = yield from asyncio.open_connection(
self.host, self.port, loop=self.loop)
async def read(self, b=4096, end_signal = "10101101110111110"):
resp = b''
while True:
chunk = await self.reader.read(b)
resp += chunk
if resp[-1*len(end_signal):] == end_signal:
break
return resp
import asyncio
import json
import websockets
from mongodb import *
class WebSocketRequest:
def __init__(self, websocket):
self.websocket = websocket
async def login(self):
data = await self.websocket.recv()
j = json.loads(data)
for i in j:
if i == 'email':
email = j[i]
if i == "pass":
password = j[i]
user = users.find_one({"email":email})
if user == None:
msg = 400
else:
msg = 200
await websocket.send(str(msg))
async def register(self):
data = await self.websocket.recv()
j = json.loads(data)
print(j)
async def run(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.ensure_future(self.login())
asyncio.ensure_future(self.register())
loop.run_forever()
class WebsocketServer:
def __init__(self, localhost,port):
self.localhost = localhost
self.port = port
async def hello(self, websocket, path):
req = WebSocketRequest(websocket)
await req.run()
def run(self):
print("opening")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
start_server = websockets.serve(self.hello, self.localhost, self.port)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
if __name__=='__main__':
localhost, port = '127.0.0.1', 5678
web = WebsocketServer(localhost, port)
web.run()
I'm trying to build a chat-app server using WebSockets. I have 3 methods -
login, register and chat. I check if a user login or not and redirect him to register in a front end. I am trying to run 3 methods in one script using classes.
I got an error message because there 2 loops run.
What is the problem in my code?
It might not be possible. Guido van Rossum stated before that he didn't want recursive event loops
And it seems like you're creating a nested event loops
Before your code just write those lines
!pip install nest_asyncio
import nest_asyncio
nest_asyncio.apply()
I am trying to add two coroutines to asyncio loop and getting an error:
RuntimeError: This event loop is already running
My objective is to communicate to a server (that I have no control of). This server expects an initial connection from the client. The server then provided a port to the client on this connection. The client has to use this port to create a second connection. This second connection is used by the server to send unsolicited messages to the client. The first connection remains up throughout for other two-way communications.
To recreate this scenario, I have some code that reproduces the error:
class Connection():
def __init__(self, ip, port, ioloop):
self.ip = ip
self.port = port
self.ioloop = ioloop
self.reader, self.writer = None, None
self.protocol = None
self.fileno = None
async def __aenter__(self):
# Applicable when doing 'with Connection(...'
log.info("Entering and Creating Connection")
self.reader, self.writer = (
await asyncio.open_connection(self.ip, self.port, loop=self.ioloop)
)
self.protocol = self.writer.transport.get_protocol()
self.fileno = self.writer.transport.get_extra_info('socket').fileno()
log.info(f"Created connection {self}")
return self
async def __aexit__(self, *args):
# Applicable when doing 'with Connection(...'
log.info(f"Exiting and Destroying Connection {self}")
if self.writer:
self.writer.close()
def __await__(self):
# Applicable when doing 'await Connection(...'
return self.__aenter__().__await__()
def __repr__(self):
return f"[Connection {self.ip}:{self.port}, {self.protocol}, fd={self.fileno}]"
async def send_recv_message(self, message):
log.debug(f"send: '{message}'")
self.writer.write(message.encode())
await self.writer.drain()
log.debug("awaiting data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv: '{data}'")
return data
class ServerConnection(Connection):
async def setup_connection(self):
event_port = 8889 # Assume this came from the server
print("In setup connection")
event_connection = await EventConnection('127.0.0.1', event_port, self.ioloop)
self.ioloop.run_until_complete(event_connection.recv_message())
class EventConnection(Connection):
async def recv_message(self):
log.debug("awaiting recv-only data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv only: '{data}'")
return data
async def main(loop):
client1 = await ServerConnection('127.0.0.1', 8888, loop)
await client1.setup_connection()
await client1.send_recv_message("Hello1")
await client1.send_recv_message("Hello2")
await asyncio.sleep(5)
if __name__ == '__main__':
#logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
ioloop = asyncio.get_event_loop()
print('starting loop')
ioloop.run_until_complete(main(ioloop))
print('completed loop')
ioloop.close()
The error occurs in ServerConnection.setup_connection() method where run_until_complete is being called.
I am probably doing something wrong due to lack of understanding asyncio. Basically, how do I setup a secondary connection which will get event notifications (unsolicited) while setting up the first connection?
Thanks.
Followup
Since the code is very similar (a few changes to add more functionality to it), I hope it's not bad etiquette to followup to the original post as the resulting error is still the same.
The new issue is that when it receives the unsolicited message (which is received by EventConnection), the recv_message calls process_data method. I would like to make process_data be a future so that recv_message completes (ioloop should stop). The ensure_future would then pick it up and continue running again to use ServerConnection to do a request/response to the server. Before it does that though, it has to go to some user code (represented by external_command() and from whom I would prefer to hide the async stuff). This would make it synchronous again. Hence, once they've done what they need to, they should call execute_command on ServerConnection, which then kicks off the loop again.
The problem is, my expectation for using ensure_future didn't pan out as it seems the loop didn't stop from running. Hence, when the code execution reaches execute_command which does the run_until_complete, an exception with the error "This event loop is already running" occurs.
I have two questions:
How can I make it so that the ioloop can stop after process_data is
placed into ensure_future, and subsequently be able to run it again
in execute_command?
Once recv_message has received something, how can we make it so that
it can receive more unsolicited data? Is it enough/safe to just use
ensure_future to call itself again?
Here's the example code that simulates this issue.
client1 = None
class ServerConnection(Connection):
connection_type = 'Server Connection'
async def setup_connection(self):
event_port = 8889 # Assume this came from the server
print("In setup connection")
event_connection = await EventConnection('127.0.0.1', event_port, self.ioloop)
asyncio.ensure_future(event_connection.recv_message())
async def _execute_command(self, data):
return await self.send_recv_message(data)
def execute_command(self, data):
response_str = self.ioloop.run_until_complete(self._execute_command(data))
print(f"exec cmd response_str: {response_str}")
def external_command(self, data):
self.execute_command(data)
class EventConnection(Connection):
connection_type = 'Event Connection'
async def recv_message(self):
global client1
log.debug("awaiting recv-only data...")
data = await self.reader.read(9999)
data = data.decode()
log.debug(f"recv-only: '{data}'")
asyncio.ensure_future(self.process_data(data))
asyncio.ensure_future(self.recv_message())
async def process_data(self, data):
global client1
await client1.external_command(data)
async def main(ioloop):
global client1
client1 = await ServerConnection('127.0.0.1', 8888, ioloop)
await client1.setup_connection()
print(f"after connection setup loop running is {ioloop.is_running()}")
await client1.send_recv_message("Hello1")
print(f"after Hello1 loop running is {ioloop.is_running()}")
await client1.send_recv_message("Hello2")
print(f"after Hello2 loop running is {ioloop.is_running()}")
while True:
print(f"inside while loop running is {ioloop.is_running()}")
t = 10
print(f"asyncio sleep {t} sec")
await asyncio.sleep(t)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
ioloop = asyncio.get_event_loop()
print('starting loop')
ioloop.run_until_complete(main(ioloop))
print('completed loop')
ioloop.close()
Try replacing:
self.ioloop.run_until_complete
With
await