Multiple loop functions parallel running in Python - python

I've 3 loops i want to be executed in parallel until the exit of program.
Actually I used this solution but i think is not optimal:
class Window(tk.Tk):
def __init__(self, loop,app):
self.loop = loop
self.app = app
self.root = tk.Tk()
async def show(self):
while True:
self.root.update()
await asyncio.sleep(0.1)
class App:
async def exec(self):
self.window = Window(asyncio.get_event_loop(),self)
await asyncio.gather(self.window.show(), self.connectWS(),self.readSerial())
async def readSerial(self):
self.serial = serial.serial_for_url('/dev/cu.usbserial-1430', baudrate=9600, timeout=5)
self.serial.isOpen()
self.serial.flushInput() #flush input buffer, discarding all its contents
self.serial.flushOutput()
while True:
response = self.serial.read(1)
await asyncio.sleep(0.1)
async def connectWS(self):
try:
async with websockets.connect(
"ws://mysocket.com") as ws:
self.ws = WebSocketHandler(str(uuid.getnode()), ws,self)
await asyncio.gather(self.ws.start(), self.ws.send_boot_payload())
except websockets.exceptions.ConnectionClosedError:
print("DISCONNECTED")
call_later(10,self.connectWS) #reconnect
asyncio.run(App().exec())
Can someone suggest me the best practice to execute 3 (or more) loops (actually are a WebSocket, a serial reader and a Tkinter loop that update UI basing on serial and websocket)?

Related

Stop asyncio task, that was started inside a function in a class

I try to cancel a specific asyncio task that was started in a function inside a class.
But it doesn't work. The task starts up again....
Thanks for some inspirations! :)
def button_stop_command():
t1.cancel()
#check which tasks are running
tasks = asyncio.all_tasks()
for task in tasks:
print(f'> {task.get_name()}, {task.get_coro()}')
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
url= 'tcp://192.168.0.91:28332'
channel= 'sequence'
self.ctx = zmq.asyncio.Context.instance()
self.sock = self.ctx.socket(zmq.SUB)
self.sock.connect(url)
self.sock.setsockopt(zmq.SUBSCRIBE, channel.encode())
print("Open ZMQ socket on", ZMQ_URL)
async def handle(self) :
[..code...]
asyncio.ensure_future(self.handle())
def start(self):
global t1
self.loop.add_signal_handler(signal.SIGINT, self.stop)
t1=self.loop.create_task(self.handle())
self.loop.run_forever()
async def tk_main(root):
while True:
root.update()
await asyncio.sleep(0.05)
tkmain = asyncio.ensure_future(tk_main(root))
daemon = ZMQHandler()
daemon.start()
I want to cancel a specific task
Everytime I post something, I get a new idea, and then the problem gets solved. My idea was:
def button_stop_command():
t1.cancel()
#check which tasks are running
tasks = asyncio.all_tasks()
for task in tasks:
print(f'> {task.get_name()}, {task.get_coro()}')
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
url= 'tcp://192.168.0.91:28332'
channel= 'sequence'
self.ctx = zmq.asyncio.Context.instance()
self.sock = self.ctx.socket(zmq.SUB)
self.sock.connect(url)
self.sock.setsockopt(zmq.SUBSCRIBE, channel.encode())
print("Open ZMQ socket on", ZMQ_URL)
async def handle(self) :
global t1
[..code...]
t1= asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
async def tk_main(root):
while True:
root.update()
await asyncio.sleep(0.05)
tkmain = asyncio.ensure_future(tk_main(root))
daemon = ZMQHandler()
daemon.start()

How to correctly lock async generators?

I am trying to use an async generator as a wrapper for a shared connection
async def mygen():
await init()
connection = await open_connection()
while True:
data = yield
await connection.send(data)
shared_gen = None
async def send_data(data):
global shared_gen
if not shared_gen:
shared_gen = mygen()
await shared_gen.asend(None)
await shared_gen.asend(data)
Is the above code safe from race conditions? Is it possible for two asends to execute concurrently or the second one will block implicitly until the generator is ready in the yield step? Assume connection.send is not concurrency safe.
Update:
Wrote a wrapper to help use safely.
class Locked:
def __init__(self, resource):
self._resource = resource
self._lock = asyncio.Lock()
#contextlib.asynccontextmanager
async def lock(self):
async with self._lock:
yield self._resource
async def send_data(locked_gen, data):
async with locked_gen.lock() as gen:
await gen.asend(data)
async def main():
gen = mygen()
await gen.asend(None)
locked_gen = Locked(gen)
...
Is it possible for two asends to execute concurrently or the second one will block implicitly until the generator is ready in the yield step?
It is not possible for asend to be called concurrently, but trying to do so doesn't result in blocking. Instead, the second one will raise a RuntimeError, as demonstrated by the following example:
import asyncio
async def gen():
while True:
yield
await asyncio.sleep(1)
async def main():
ait = gen()
await ait.asend(None) # start the generator
async def send():
print('sending')
await ait.asend(42)
await asyncio.gather(send(), send())
asyncio.run(main())
To make the send block until the previous one finishes, you need an explicit lock around the await of asend:
async def main():
ait = gen()
await ait.asend(None)
lock = asyncio.Lock()
async def send():
async with lock:
print('sending')
await ait.asend(42)
await asyncio.gather(send(), send())

python aiogram how to stop an asynchronous loop

Simple example
import asyncio
import logging
from aiogram import Bot, Dispatcher, types
logging.basicConfig(level=logging.INFO)
token = 'token'
bot = Bot(token=token)
dp = Dispatcher(bot=bot)
#dp.callback_query_handler(text='stoploop')
async def stop_loop(query: types.CallbackQuery):
# TODO how to stop test loop?
await query.message.edit_text('stop')
#dp.callback_query_handler(text='test')
async def start_loop(query: types.CallbackQuery):
a = 100
while True:
a -= 1
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton('<<<Stop And Back To Home', callback_data='stoploop'))
await query.message.edit_text(str(a),reply_markup=markup)
await asyncio.sleep(1)
#dp.message_handler(commands='start')
async def start_cmd_handler(message: types.Message):
markup = types.InlineKeyboardMarkup()
markup.add(
types.InlineKeyboardButton('start loop', callback_data='test')
)
await message.reply('test', reply_markup=markup)
async def main():
try:
await dp.start_polling()
finally:
await bot.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
When I click start_loop, the tg message box on my page starts to display a countdown. When I click stop, how can I stop the previous countdown?
I use id(query) to confirm that the query instance sent twice is not the same. After I execute the stop_loop function, start_loop will still execute and change the content of the message.
Can someone tell me how to stop it?
I used redis to solve it, but I don't know if this is the most appropriate way. If there is a more suitable way, please let me know
To manage your loop you should take it outside the handlers and just get in from any storage (dict is used for example).
Basic example of the loop
loops = {}
class Loop:
def __init__(self, user_id):
self.user_id = user_id
self._active = False
self._stopped = True
loops[self.user_id] = self
#classmethod
def get_loop(cls, user_id):
return loops.get(user_id, cls(user_id))
#property
def is_running(self):
return not self._stopped
async def start(self):
self._active = True
asyncio.create_task(self._run_loop())
async def _run_loop(self):
while self._active:
await bot.send_message(self.user_id, 'loop is running')
await asyncio.sleep(5)
self._stopped = True
async def stop(self):
self._active = False
while not self._stopped:
await asyncio.sleep(1)
So then:
#dp.callback_query_handler(text='start')
async def start_loop(query: CallbackQuery):
user = query.from_user
loop = Loop.get_loop(user.id)
if loop.is_running:
return await query.answer('Loop is already running')
loop.start()
await query.answer('Started!')
#dp.callback_query_handler(text='stop')
async def stop_loop(query: CallbackQuery):
user = query.from_user
loop = Loop.get_loop(user.id)
await query.answer('Stopping...')
await loop.stop()
await bot.send_message(user.id, 'Loop successfully stopped.')

Python3 asyncio - callback for add_done_callback do not updates self variable in server class

I have two servers, created with asyncio.start_server:
asyncio.start_server(self.handle_connection, host = host, port = port) and running in one loop:
loop.run_until_complete(asyncio.gather(server1, server2))
loop.run_forever()
I'm using asyncio.Queue to communicate between servers. Messages from Server2, added via queue.put(msg) successfully receives by queue.get() in Server1. I'm running queue.get() by asyncio.ensure_future and using as callback for
add_done_callback method from Server1:
def callback(self, future):
msg = future.result()
self.msg = msg
But this callback not working as expected - self.msg do not updates. What am I doing wrong?
UPDATED
with additional code to show max full example:
class Queue(object):
def __init__(self, loop, maxsize: int):
self.instance = asyncio.Queue(loop = loop, maxsize = maxsize)
async def put(self, data):
await self.instance.put(data)
async def get(self):
data = await self.instance.get()
self.instance.task_done()
return data
#staticmethod
def get_instance():
return Queue(loop = asyncio.get_event_loop(), maxsize = 10)
Server class:
class BaseServer(object):
def __init__(self, host, port):
self.instance = asyncio.start_server(self.handle_connection, host = host, port = port)
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
pass
def get_instance(self):
return self.instance
#staticmethod
def create():
return BaseServer(None, None)
Next I'm running the servers:
loop.run_until_complete(asyncio.gather(server1.get_instance(), server2.get_instance()))
loop.run_forever()
In the handle_connection of server2 I'm calling queue.put(msg), in the handle_connection of server1 I'm registered queue.get() as task:
task_queue = asyncio.ensure_future(queue.get())
task_queue.add_done_callback(self.process_queue)
The process_queue method of server1:
def process_queue(self, future):
msg = future.result()
self.msg = msg
The handle_connection method of server1:
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
task_queue = asyncio.ensure_future(queue.get())
task_queue.add_done_callback(self.process_queue)
while self.msg != SPECIAL_VALUE:
# doing something
Although task_queue is done, self.process_queue called, self.msg never updates.
Basically as you are using asynchronous structure, I think you can directly await the result:
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
msg = await queue.get()
process_queue(msg) # change it to accept real value instead of a future.
# do something

Python asyncio: interruptable task

I am trying to make a barebones skeleton fighting game with python asyncio.
class Skeleton(Creature):
pass
class SkeletonAI():
def __init__(self, skeleton,loop = None):
self.loop = loop or asyncio.new_event_loop()
self.skeleton = skeleton
self.action_task = None
async def run(self):
while True:
#print(self.action_task, )
if self.skeleton.alive and self.skeleton.target.alive:
if self.skeleton.state == 'idle':
#ATTACK
self.skeleton.begin_attack()
self.action_task = self.loop.call_later(3, self.skeleton.action_complete)
else:
break
class Player(Creature):
def attack_target(self, target):
target.take_damage(self.damage)
if target.state == 'attacking':
target.state = 'idle'
#interrupt attack
class Game():
#Super simple game
#The skeleton launches an attack, press anything to interrupt it
async def handle_sending(self):
loop = asyncio.get_event_loop()
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=1,
)
while True:
msg = await loop.run_in_executor(executor, input)
print('got a message')
if self.skeleton_ai.action_task:
print('cancelling attack')
self.skeleton_ai.action_task.cancel()
self.skeleton_ai.skeleton.machine.set_state('idle')
print('cancelled attack')
self.skeleton_ai.action_task = None
async def game_loop(self):
player_task = asyncio.ensure_future(self.handle_sending())
skeleton_task = asyncio.ensure_future(self.skeleton_ai.run())
def __init__(self):
self.task = None
self.loop = asyncio.get_event_loop()
self.player = Player(name='ply')
self.skeleton_ai = SkeletonAI(skeleton=Skeleton(name='bobby'))
self.skeleton_ai.skeleton.target = self.player
self.loop.run_until_complete(self.game_loop())
try:
self.loop.run_forever()
finally:
pass
loop.close()
Game()
Here's what I am trying to do:
Player input and game output are async, so input() doesn't block. This works.
The skeleton prepares an attack, if it's not interrupted in 3 seconds, the attack deals damage to the player.
The player can input any text to interrupt the skeleton attack.
How can I make the skeleton's attack? I want a task I can interrupt at will and call a callback later. Currently everything just gets stuck. The call_later never calls.
This is the pattern for a async function with timeout and callback function. The clue is to catch the asyncio.TimeoutError and do your timeout logic. The function that is cancelled will not continue after it's current await position.
import asyncio
async def slow_function(seconds):
print('starting slow computations')
await asyncio.sleep(seconds)
print('slow computations done')
async def timeout_callback():
print('timeout called')
async def timeout_with_cb(fut, timeout, timeout_fut):
try:
await asyncio.wait_for(fut, timeout)
except asyncio.TimeoutError:
await timeout_fut
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.ensure_future(
timeout_with_cb(slow_function(2), 1,
timeout_callback())))
This will print:
starting slow computations
timeout called
I guess this can help you to adapt your example (the provided example does not compile).

Categories

Resources