I'm trying to write code that enables using asyncpg from mostly sync code (to avoid duplication).
For some very strange reason, the coroutine Database.test() will execute and return in my worker eventloop/thread. The future seems to work correctly. But connecting to a database with asyncpg will just hang. Any clue as to why?
Also, maybe I should use asyncio.run() instead.
from threading import Thread
import asyncio
import asyncpg
class AsyncioWorkerThread(Thread):
def __init__(self, *args, daemon=True, loop=None, **kwargs):
super().__init__(*args, daemon=daemon, **kwargs)
self.loop = loop or asyncio.new_event_loop()
self.running = False
def run(self):
self.running = True
self.loop.run_forever()
def submit(self, coro):
fut = asyncio.run_coroutine_threadsafe(coro, loop=self.loop)
return fut.result()
def stop(self):
self.loop.call_soon_threadsafe(self.loop.stop)
self.join()
self.running = False
class Database:
async def test(self):
print('In test')
await asyncio.sleep(5)
async def connect(self):
# Put in your db credentials here
# pg_user = ''
# pg _password = ''
# pg_host = ""
# pg_port = 20
# pg_db
connection_uri = f'postgres://{pg_user}:{pg_password}#{pg_host}:{pg_port}/{pg_db}'
self.connection_pool = await asyncpg.create_pool(
connection_uri, min_size=5, max_size=10)
if __name__ == "__main__":
db = Database()
worker = AsyncioWorkerThread()
worker.start()
worker.submit(db.test()) # Works future returns correctly
worker.submit(db.connect()) # Hangs, thread never manages to acquire
Related
I try to cancel a specific asyncio task that was started in a function inside a class.
But it doesn't work. The task starts up again....
Thanks for some inspirations! :)
def button_stop_command():
t1.cancel()
#check which tasks are running
tasks = asyncio.all_tasks()
for task in tasks:
print(f'> {task.get_name()}, {task.get_coro()}')
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
url= 'tcp://192.168.0.91:28332'
channel= 'sequence'
self.ctx = zmq.asyncio.Context.instance()
self.sock = self.ctx.socket(zmq.SUB)
self.sock.connect(url)
self.sock.setsockopt(zmq.SUBSCRIBE, channel.encode())
print("Open ZMQ socket on", ZMQ_URL)
async def handle(self) :
[..code...]
asyncio.ensure_future(self.handle())
def start(self):
global t1
self.loop.add_signal_handler(signal.SIGINT, self.stop)
t1=self.loop.create_task(self.handle())
self.loop.run_forever()
async def tk_main(root):
while True:
root.update()
await asyncio.sleep(0.05)
tkmain = asyncio.ensure_future(tk_main(root))
daemon = ZMQHandler()
daemon.start()
I want to cancel a specific task
Everytime I post something, I get a new idea, and then the problem gets solved. My idea was:
def button_stop_command():
t1.cancel()
#check which tasks are running
tasks = asyncio.all_tasks()
for task in tasks:
print(f'> {task.get_name()}, {task.get_coro()}')
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
url= 'tcp://192.168.0.91:28332'
channel= 'sequence'
self.ctx = zmq.asyncio.Context.instance()
self.sock = self.ctx.socket(zmq.SUB)
self.sock.connect(url)
self.sock.setsockopt(zmq.SUBSCRIBE, channel.encode())
print("Open ZMQ socket on", ZMQ_URL)
async def handle(self) :
global t1
[..code...]
t1= asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
async def tk_main(root):
while True:
root.update()
await asyncio.sleep(0.05)
tkmain = asyncio.ensure_future(tk_main(root))
daemon = ZMQHandler()
daemon.start()
I've 3 loops i want to be executed in parallel until the exit of program.
Actually I used this solution but i think is not optimal:
class Window(tk.Tk):
def __init__(self, loop,app):
self.loop = loop
self.app = app
self.root = tk.Tk()
async def show(self):
while True:
self.root.update()
await asyncio.sleep(0.1)
class App:
async def exec(self):
self.window = Window(asyncio.get_event_loop(),self)
await asyncio.gather(self.window.show(), self.connectWS(),self.readSerial())
async def readSerial(self):
self.serial = serial.serial_for_url('/dev/cu.usbserial-1430', baudrate=9600, timeout=5)
self.serial.isOpen()
self.serial.flushInput() #flush input buffer, discarding all its contents
self.serial.flushOutput()
while True:
response = self.serial.read(1)
await asyncio.sleep(0.1)
async def connectWS(self):
try:
async with websockets.connect(
"ws://mysocket.com") as ws:
self.ws = WebSocketHandler(str(uuid.getnode()), ws,self)
await asyncio.gather(self.ws.start(), self.ws.send_boot_payload())
except websockets.exceptions.ConnectionClosedError:
print("DISCONNECTED")
call_later(10,self.connectWS) #reconnect
asyncio.run(App().exec())
Can someone suggest me the best practice to execute 3 (or more) loops (actually are a WebSocket, a serial reader and a Tkinter loop that update UI basing on serial and websocket)?
I have two servers, created with asyncio.start_server:
asyncio.start_server(self.handle_connection, host = host, port = port) and running in one loop:
loop.run_until_complete(asyncio.gather(server1, server2))
loop.run_forever()
I'm using asyncio.Queue to communicate between servers. Messages from Server2, added via queue.put(msg) successfully receives by queue.get() in Server1. I'm running queue.get() by asyncio.ensure_future and using as callback for
add_done_callback method from Server1:
def callback(self, future):
msg = future.result()
self.msg = msg
But this callback not working as expected - self.msg do not updates. What am I doing wrong?
UPDATED
with additional code to show max full example:
class Queue(object):
def __init__(self, loop, maxsize: int):
self.instance = asyncio.Queue(loop = loop, maxsize = maxsize)
async def put(self, data):
await self.instance.put(data)
async def get(self):
data = await self.instance.get()
self.instance.task_done()
return data
#staticmethod
def get_instance():
return Queue(loop = asyncio.get_event_loop(), maxsize = 10)
Server class:
class BaseServer(object):
def __init__(self, host, port):
self.instance = asyncio.start_server(self.handle_connection, host = host, port = port)
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
pass
def get_instance(self):
return self.instance
#staticmethod
def create():
return BaseServer(None, None)
Next I'm running the servers:
loop.run_until_complete(asyncio.gather(server1.get_instance(), server2.get_instance()))
loop.run_forever()
In the handle_connection of server2 I'm calling queue.put(msg), in the handle_connection of server1 I'm registered queue.get() as task:
task_queue = asyncio.ensure_future(queue.get())
task_queue.add_done_callback(self.process_queue)
The process_queue method of server1:
def process_queue(self, future):
msg = future.result()
self.msg = msg
The handle_connection method of server1:
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
task_queue = asyncio.ensure_future(queue.get())
task_queue.add_done_callback(self.process_queue)
while self.msg != SPECIAL_VALUE:
# doing something
Although task_queue is done, self.process_queue called, self.msg never updates.
Basically as you are using asynchronous structure, I think you can directly await the result:
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
msg = await queue.get()
process_queue(msg) # change it to accept real value instead of a future.
# do something
I am trying to make a barebones skeleton fighting game with python asyncio.
class Skeleton(Creature):
pass
class SkeletonAI():
def __init__(self, skeleton,loop = None):
self.loop = loop or asyncio.new_event_loop()
self.skeleton = skeleton
self.action_task = None
async def run(self):
while True:
#print(self.action_task, )
if self.skeleton.alive and self.skeleton.target.alive:
if self.skeleton.state == 'idle':
#ATTACK
self.skeleton.begin_attack()
self.action_task = self.loop.call_later(3, self.skeleton.action_complete)
else:
break
class Player(Creature):
def attack_target(self, target):
target.take_damage(self.damage)
if target.state == 'attacking':
target.state = 'idle'
#interrupt attack
class Game():
#Super simple game
#The skeleton launches an attack, press anything to interrupt it
async def handle_sending(self):
loop = asyncio.get_event_loop()
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=1,
)
while True:
msg = await loop.run_in_executor(executor, input)
print('got a message')
if self.skeleton_ai.action_task:
print('cancelling attack')
self.skeleton_ai.action_task.cancel()
self.skeleton_ai.skeleton.machine.set_state('idle')
print('cancelled attack')
self.skeleton_ai.action_task = None
async def game_loop(self):
player_task = asyncio.ensure_future(self.handle_sending())
skeleton_task = asyncio.ensure_future(self.skeleton_ai.run())
def __init__(self):
self.task = None
self.loop = asyncio.get_event_loop()
self.player = Player(name='ply')
self.skeleton_ai = SkeletonAI(skeleton=Skeleton(name='bobby'))
self.skeleton_ai.skeleton.target = self.player
self.loop.run_until_complete(self.game_loop())
try:
self.loop.run_forever()
finally:
pass
loop.close()
Game()
Here's what I am trying to do:
Player input and game output are async, so input() doesn't block. This works.
The skeleton prepares an attack, if it's not interrupted in 3 seconds, the attack deals damage to the player.
The player can input any text to interrupt the skeleton attack.
How can I make the skeleton's attack? I want a task I can interrupt at will and call a callback later. Currently everything just gets stuck. The call_later never calls.
This is the pattern for a async function with timeout and callback function. The clue is to catch the asyncio.TimeoutError and do your timeout logic. The function that is cancelled will not continue after it's current await position.
import asyncio
async def slow_function(seconds):
print('starting slow computations')
await asyncio.sleep(seconds)
print('slow computations done')
async def timeout_callback():
print('timeout called')
async def timeout_with_cb(fut, timeout, timeout_fut):
try:
await asyncio.wait_for(fut, timeout)
except asyncio.TimeoutError:
await timeout_fut
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.ensure_future(
timeout_with_cb(slow_function(2), 1,
timeout_callback())))
This will print:
starting slow computations
timeout called
I guess this can help you to adapt your example (the provided example does not compile).
I would like to make a ReconnectingClientFactory with asyncio. In particular to handle the case that the server is not available when the client is started in which case the ReconnectingClientFactory will keep trying. That is something that the asyncio.events.create_connection does not do.
Concretely:
The EchoClient example would be fine.
The crux is how the connection is made.
factory = EchoClientFactory('ws://127.0.0.1:5678')
connectWS(factory)
in the case of the twisted version with ReconnectingClientFactory.
Vs
factory = EchoClientFactory(u"ws://127.0.0.1:5678")
factory.protocol = SecureServerClientProtocol
loop = asyncio.get_event_loop()
# coro = loop.create_connection(factory, 'ws_server', 5678)
coro = loop.create_connection(factory, '127.0.0.1', 5678)
loop.run_until_complete(asyncio.wait([
alive(), coro
]))
loop.run_forever()
loop.close()
Or similar with the asycnio version.
The problem is that in the asyncio version the connection is established by asyncio.events.create_connection which simply fails if the server is not available.
How can I reconcile the two?
Many thanks
I think I get what you want. Here's the code and example based on asyncio TCP echo client protocol example.
import asyncio
import random
class ReconnectingTCPClientProtocol(asyncio.Protocol):
max_delay = 3600
initial_delay = 1.0
factor = 2.7182818284590451
jitter = 0.119626565582
max_retries = None
def __init__(self, *args, loop=None, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._args = args
self._kwargs = kwargs
self._retries = 0
self._delay = self.initial_delay
self._continue_trying = True
self._call_handle = None
self._connector = None
def connection_lost(self, exc):
if self._continue_trying:
self.retry()
def connection_failed(self, exc):
if self._continue_trying:
self.retry()
def retry(self):
if not self._continue_trying:
return
self._retries += 1
if self.max_retries is not None and (self._retries > self.max_retries):
return
self._delay = min(self._delay * self.factor, self.max_delay)
if self.jitter:
self._delay = random.normalvariate(self._delay,
self._delay * self.jitter)
self._call_handle = self._loop.call_later(self._delay, self.connect)
def connect(self):
if self._connector is None:
self._connector = self._loop.create_task(self._connect())
async def _connect(self):
try:
await self._loop.create_connection(lambda: self,
*self._args, **self._kwargs)
except Exception as exc:
self._loop.call_soon(self.connection_failed, exc)
finally:
self._connector = None
def stop_trying(self):
if self._call_handle:
self._call_handle.cancel()
self._call_handle = None
self._continue_trying = False
if self._connector is not None:
self._connector.cancel()
self._connector = None
if __name__ == '__main__':
class EchoClientProtocol(ReconnectingTCPClientProtocol):
def __init__(self, message, *args, **kwargs):
super().__init__(*args, **kwargs)
self.message = message
def connection_made(self, transport):
transport.write(self.message.encode())
print('Data sent: {!r}'.format(self.message))
def data_received(self, data):
print('Data received: {!r}'.format(data.decode()))
def connection_lost(self, exc):
print('The server closed the connection')
print('Stop the event loop')
self._loop.stop()
loop = asyncio.get_event_loop()
client = EchoClientProtocol('Hello, world!', '127.0.0.1', 8888, loop=loop)
client.connect()
loop.run_forever()
loop.close()