Based on this answer I want to build an async websoket client in a class which would be imported from another file:
#!/usr/bin/env python3
import sys, json
import asyncio
from websockets import connect
class EchoWebsocket:
def __await__(self):
# see: https://stackoverflow.com/a/33420721/1113207
return self._async_init().__await__()
async def _async_init(self):
self._conn = connect('wss://ws.binaryws.com/websockets/v3')
self.websocket = await self._conn.__aenter__()
return self
async def close(self):
await self._conn.__aexit__(*sys.exc_info())
async def send(self, message):
await self.websocket.send(message)
async def receive(self):
return await self.websocket.recv()
class mtest:
async def start(self):
try:
self.wws = await EchoWebsocket()
finally:
await self.wws.close()
async def get_ticks(self):
await self.wws.send(json.dumps({'ticks_history': 'R_50', 'end': 'latest', 'count': 1}))
return await self.wws.receive()
if __name__ == '__main__':
a = mtest()
loop = asyncio.get_event_loop()
loop.run_until_complete(a.start())
And I import it in main.py, where I have the following:
from testws import *
a = mtest()
print (a.get_ticks())
print ("this will be printed after the ticks")
But it retrieves me the following error:
root#ubupc1:/home/dinocob# python3 test.py
<coroutine object hello.get_ticks at 0x7f13190a9200>
test.py:42: RuntimeWarning: coroutine 'mtest.get_ticks' was never awaited
print (a.get_ticks())
this will be printed after the ticks
What's is going on here? Why I'm not able to access mtest.get_ticks if it has the async word at the begining of def?
Finally I could find the right way to do it (special thanks to #dirn)
#!/usr/bin/env python3
import sys, json
import asyncio
from websockets import connect
class EchoWebsocket:
async def __aenter__(self):
self._conn = connect('wss://ws.binaryws.com/websockets/v3')
self.websocket = await self._conn.__aenter__()
return self
async def __aexit__(self, *args, **kwargs):
await self._conn.__aexit__(*args, **kwargs)
async def send(self, message):
await self.websocket.send(message)
async def receive(self):
return await self.websocket.recv()
class mtest:
def __init__(self):
self.wws = EchoWebsocket()
self.loop = asyncio.get_event_loop()
def get_ticks(self):
return self.loop.run_until_complete(self.__async__get_ticks())
async def __async__get_ticks(self):
async with self.wws as echo:
await echo.send(json.dumps({'ticks_history': 'R_50', 'end': 'latest', 'count': 1}))
return await echo.receive()
And this in main.py:
from testws import *
a = mtest()
foo = a.get_ticks()
print (foo)
print ("async works like a charm!")
foo = a.get_ticks()
print (foo)
This is the output:
root#ubupc1:/home/dinocob# python3 test.py
{"count": 1, "end": "latest", "ticks_history": "R_50"}
async works like a charm!
{"count": 1, "end": "latest", "ticks_history": "R_50"}
Any tip to improve it is welcomed! ;)
Your question and answer are great!
They helped me a lot!
Based on your code I was able to create the following class,
better matching my need:
import asyncio
from websockets import connect
class TestClient:
def __init__(self, URL):
self.URL = URL
self.conn = None
self.loop = asyncio.get_event_loop()
async def send(self, message):
if self.conn == None:
self.conn = await connect(self.URL)
await self.conn.send(message)
async def receive(self):
return await self.conn.recv()
def ping(self):
return self.loop.run_until_complete(self._ping())
async def _ping(self):
await self.send("Hello World")
return await self.receive()
test = TestClient("wss://echo.websocket.org")
print(test.ping())
Related
I need to listen tasks on 2 queues, so I wrote the code below, but it has a problem. Currently it behaves like this: if the code started when 2 queues were full, it works great. But if queues were empty one of them was, the code reads messages, but does not proccess them (does not send ack, does not do the logic). But the messages became unacked, until I stop the code. I do not see any reason to be them unacked and unprocessed.
I can't understand what is wrong with the code? May be there is another way to aggregate 2 or more queues like this?
# task_processor.py
from aio_pika import IncomingMessage
class TaskProcessor:
MAX_TASKS_PER_INSTANCE = 1
def __init__(self):
self._tasks = []
def can_accept_new_task(self) -> bool:
return len(self._tasks) < self.MAX_TASKS_PER_INSTANCE
async def process(self, message: IncomingMessage):
self._tasks.append(message)
print(message.body)
await message.ack()
self._tasks.pop()
# main.py
import asyncio
from asyncio import QueueEmpty
from typing import Callable
import aio_pika
from aio_pika import RobustQueue
from dotenv import load_dotenv
load_dotenv()
from core.logger.logger import logger
from core.services.rabbitmq.task_processor.task_processor import TaskProcessor
async def get_single_task(queue: RobustQueue):
while True:
try:
msg = await queue.get(timeout=3600)
return msg
except QueueEmpty:
await asyncio.sleep(3)
except asyncio.exceptions.TimeoutError:
logger.warning('queue timeout error')
pass
except Exception as ex:
logger.error(f"{queue} errored", exc_info=ex)
async def task_aggregator(queue1: RobustQueue, queue2: RobustQueue, should_take_new_task_cb: Callable):
while True:
if should_take_new_task_cb():
queue2, queue1 = queue1, queue2
gen1 = get_single_task(queue1)
gen2 = get_single_task(queue2)
done, _ = await asyncio.wait([gen1, gen2], return_when=asyncio.FIRST_COMPLETED)
for item in done:
result = item.result()
yield result
else:
await asyncio.sleep(1)
async def tasks(queue1: RobustQueue, queue2: RobustQueue, should_take_new_task_cb: Callable):
async for task in task_aggregator(queue1, queue2, should_take_new_task_cb):
yield task
async def main():
connection = await aio_pika.connect_robust(
f"amqp://user:password#host:port/vhost?heartbeat={180}"
)
channel1 = connection.channel()
channel2 = connection.channel()
await channel1.initialize()
await channel2.initialize()
queue1 = await channel1.get_queue('queue1')
queue2 = await channel2.get_queue('queue2')
task_processor = TaskProcessor()
task_generator = tasks(queue1, queue2, task_processor.can_accept_new_task)
while True:
if task_processor.can_accept_new_task():
task = await anext(task_generator)
await task_processor.process(task)
else:
await asyncio.sleep(1)
if __name__ == '__main__':
asyncio.run(main())
I want to send the acceleration data I received from the bmi160 acceleration sensor to two servers at the same time in json format. I found a sample code. but how do I emit the data from the main function which is the while loop to two servers.
import socketio
import asyncio
class MyCustomNamespace(socketio.AsyncClientNamespace):
async def on_connect(self):
print("I'm connected!")
async def on_disconnect(self):
print("I'm disconnected!")
async def on_my_event(self, data):
await self.emit('my_response', data)
async def on_message(self, data):
print("[echo]:", data)
class mysio:
def __init__(self) -> None:
global sio
self.sio = socketio.AsyncClient(logger=False, engineio_logger=False)
self.sio.register_namespace(MyCustomNamespace('/')) # bind
async def main():
def bmi160_read():
acc_dict = {
"x":x,
"y":y,
"z":z
}
return acc_dict #I WANT TO SEND THIS DATA TO SERVERS SAME TIME.
async def fun1():
sio1 = mysio().sio
await sio1.connect('http://192.168.3.85:11451')
await sio1.emit('message', b'11111110001')
await sio1.wait()
async def fun2():
sio2 = mysio().sio
await sio2.connect('http://localhost:8080')
await sio2.emit('message', 'from sio2')
await sio2.wait()
tasks = [asyncio.create_task(fun1()),asyncio.create_task(fun2()) ]
await asyncio.wait(tasks)
asyncio.run(main())
When trying to change the presence in the bot class of discord.py I get the error that the change_presence attribute is not in discord.ext.comands
In "on ready" is located the change of presence and description that it has put on, I try to change what it has but it does not let me do it because of the error that takes me
from pathlib import Path
import discord
from discord.ext import commands
class MusicBot(commands.Bot):
def __init__(self):
self._cogs = [p.stem for p in Path(".").glob("./bot/cogs/*.py")]
super().__init__(command_prefix=self.prefix, case_insensitive=True)
def setup(self):
print("Yuki abrio sus ojitos...")
for cog in self._cogs:
self.load_extension(f"bot.cogs.{cog}")
print(f" Loaded `{cog}` cog.")
print("Yuki se esta visitendo...")
def run(self):
self.setup()
with open("data/token.0", "r", encoding="utf-8") as f:
TOKEN = f.read()
print("Yuki se esta levantando :3...")
super().run(TOKEN, reconnect=True)
async def shutdown(self):
print("Closing connection to Discord...")
await super().close()
async def close(self):
print("Closing on keyboard interrupt...")
await self.shutdown()
async def on_connect(self):
print(f" Connected to Discord (latency: {self.latency*1000:,.0f} ms).")
async def on_resumed(self):
print("Bot resumed.")
async def on_disconnect(self):
print("Bot disconnected.")
async def on_error(self, err, *args, **kwargs):
raise
async def on_command_error(self, ctx, exc):
raise getattr(exc, "original", exc)
async def on_ready(self):
self.client_id = (await self.application_info()).id
print("Yuki ya desperto uwu")
async def on_ready(self):
activity = discord.Game(name="", type=3)
await commands.change_presence(status=discord.Status.idle, activity=activity)
async def prefix(self, bot, msg):
return commands.when_mentioned_or("<")(bot, msg)
async def process_commands(self, msg):
ctx = await self.get_context(msg, cls=commands.Context)
if ctx.command is not None:
await self.invoke(ctx)
async def on_message(self, msg):
if not msg.author.bot:
await self.process_commands(msg)
await commands.change_presence() returns an error because
commands has to be replaced with a commands.Bot instance.
Simple example
import asyncio
import logging
from aiogram import Bot, Dispatcher, types
logging.basicConfig(level=logging.INFO)
token = 'token'
bot = Bot(token=token)
dp = Dispatcher(bot=bot)
#dp.callback_query_handler(text='stoploop')
async def stop_loop(query: types.CallbackQuery):
# TODO how to stop test loop?
await query.message.edit_text('stop')
#dp.callback_query_handler(text='test')
async def start_loop(query: types.CallbackQuery):
a = 100
while True:
a -= 1
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton('<<<Stop And Back To Home', callback_data='stoploop'))
await query.message.edit_text(str(a),reply_markup=markup)
await asyncio.sleep(1)
#dp.message_handler(commands='start')
async def start_cmd_handler(message: types.Message):
markup = types.InlineKeyboardMarkup()
markup.add(
types.InlineKeyboardButton('start loop', callback_data='test')
)
await message.reply('test', reply_markup=markup)
async def main():
try:
await dp.start_polling()
finally:
await bot.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
When I click start_loop, the tg message box on my page starts to display a countdown. When I click stop, how can I stop the previous countdown?
I use id(query) to confirm that the query instance sent twice is not the same. After I execute the stop_loop function, start_loop will still execute and change the content of the message.
Can someone tell me how to stop it?
I used redis to solve it, but I don't know if this is the most appropriate way. If there is a more suitable way, please let me know
To manage your loop you should take it outside the handlers and just get in from any storage (dict is used for example).
Basic example of the loop
loops = {}
class Loop:
def __init__(self, user_id):
self.user_id = user_id
self._active = False
self._stopped = True
loops[self.user_id] = self
#classmethod
def get_loop(cls, user_id):
return loops.get(user_id, cls(user_id))
#property
def is_running(self):
return not self._stopped
async def start(self):
self._active = True
asyncio.create_task(self._run_loop())
async def _run_loop(self):
while self._active:
await bot.send_message(self.user_id, 'loop is running')
await asyncio.sleep(5)
self._stopped = True
async def stop(self):
self._active = False
while not self._stopped:
await asyncio.sleep(1)
So then:
#dp.callback_query_handler(text='start')
async def start_loop(query: CallbackQuery):
user = query.from_user
loop = Loop.get_loop(user.id)
if loop.is_running:
return await query.answer('Loop is already running')
loop.start()
await query.answer('Started!')
#dp.callback_query_handler(text='stop')
async def stop_loop(query: CallbackQuery):
user = query.from_user
loop = Loop.get_loop(user.id)
await query.answer('Stopping...')
await loop.stop()
await bot.send_message(user.id, 'Loop successfully stopped.')
One of the async function returns the async generator object. I added loop.run_until_complete(func()), but still, it throws the error as "TypeError: A Future, a coroutine or an awaitable is required". Below is the code. I'm trying to fetch the records from Neo4j asynchronously. I got the async "Neo4j class from a GitHub. I'm new to this async concept.
from concurrent import futures
import neo4j
from neo4j import GraphDatabase, basic_auth
import time
import traceback
import asyncio
RETRY_WAITS = [0, 1, 4] # How long to wait after each successive failure.
class Neo4j:
"""Neo4j database API."""
def __init__(self, config, loop):
self.config = config
self.loop = loop
self.executor = futures.ThreadPoolExecutor(max_workers=30)
for retry_wait in RETRY_WAITS:
try:
self.init_driver()
break
except:
if retry_wait == RETRY_WAITS[-1]:
raise
else:
print('WARNING: retrying to Init DB; err:')
traceback.print_exc()
time.sleep(retry_wait) # wait for 0, 1, 3... seconds.
def init_driver(self):
auth = basic_auth(self.config['user'], self.config['pass'])
self.driver = GraphDatabase.driver(self.config['url'], auth=auth)
async def afetch_start(self, query):
session = self.driver.session(access_mode=neo4j.READ_ACCESS)
def run():
return session.run(query).records()
return session, await self.loop.run_in_executor(self.executor, run)
async def afetch_iterate(self, session, iter):
def iterate():
try:
return next(iter)
except StopIteration:
return None
while True:
res = await self.loop.run_in_executor(self.executor, iterate)
if res is None:
return
else:
yield dict(res)
async def afetch(self, query):
for retry_wait in RETRY_WAITS:
try:
session, iter = await self.afetch_start(query)
break
except (BrokenPipeError, neo4j.exceptions.ServiceUnavailable) as e:
if retry_wait == RETRY_WAITS[-1]:
raise
else:
await asyncio.sleep(retry_wait)
await self.loop.run_in_executor(self.executor, self.init_driver)
async for x in self.afetch_iterate(session, iter):
yield x
await self.loop.run_in_executor(self.executor, session.close)
async def afetch_one(self, query):
async for i in self.afetch(query):
return i
return None
async def aexec(self, query):
async for i in self.afetch(query):
pass
return
config={'url':"bolt://localhost",'user':'neo4j','pass':'pwd'}
loop=asyncio.get_event_loop()
n=Neo4j(config,loop)
loop.run_until_complete(n.afetch("MATCH(p:Person)-[:Acted_in]->(mv:Movies) RETURN p.name as actors"))
loop.close()
--EDIT
I have modified the code to work properly. The query returns 218K rows and it takes 5 minutes to extract the complete list and the same async operation in C# completes in just 2 sec. Looks like the above code still doesnt go in async
It's very hard to tell what exactly happens without reproducible example, but I'll take a guess. You probably pass async generator object in a loop, you shouldn't do it. A way to work with async generators is to use async for. Here's example:
import asyncio
async def func(): # async generator
yield 1
yield 2
yield 3
async def main():
async for i in func(): # get values from async generator
print(i)
asyncio.run(main()) # can be used instead of loop.run_until_complete(main())