Send asyncio tasks to loop running in other thread - python

How can I asynchronously insert tasks to run in an asyncio event loop running in another thread?
My motivation is to support interactive asynchronous workloads in the interpreter. I can't block the main REPL thread.
Example
My current flawed understanding says that the following should work. Why doesn't it? What is a better way to accomplish goal above?
import asyncio
from threading import Thread
loop = asyncio.new_event_loop()
def f(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
t = Thread(target=f, args=(loop,))
t.start()
#asyncio.coroutine
def g():
yield from asyncio.sleep(1)
print('Hello, world!')
asyncio.async(g(), loop=loop)

You must use call_soon_threadsafe to schedule callbacks from different threads:
import asyncio
from threading import Thread
loop = asyncio.new_event_loop()
def f(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
t = Thread(target=f, args=(loop,))
t.start()
#asyncio.coroutine
def g():
yield from asyncio.sleep(1)
print('Hello, world!')
loop.call_soon_threadsafe(asyncio.async, g())
See https://docs.python.org/3/library/asyncio-dev.html#asyncio-multithreading for more information.
EDIT: Example of an interpreter supporting asynchronous workloads
# vim: filetype=python3 tabstop=2 expandtab
import asyncio as aio
import random
#aio.coroutine
def async_eval(input_, sec):
yield from aio.sleep(sec)
print("")
try:
result = eval(input_)
except Exception as e:
print("< {!r} does not compute >".format(input_))
else:
print("< {!r} = {} >".format(input_, result))
#aio.coroutine
def main(loop):
while True:
input_ = yield from loop.run_in_executor(None, input, "> ")
if input_ == "quit":
break
elif input_ == "":
continue
else:
sec = random.uniform(5, 10)
print("< {!r} scheduled for execution in {:.02} sec>".format(input_, sec))
aio.async(async_eval(input_, sec))
loop = aio.get_event_loop()
loop.run_until_complete(main(loop))
loop.close()

The first example in Jashandeep Sohi's answer does not work for me in 3.7+ and prints warnings about the deprecated annotation. I reworked this into a something that runs under 3.8. I tweaked it a little to meet my needs as well. I am new to multi-threading in Python (but not multithreading in general) so any advice, guidance, etc. is appreciated:
import asyncio
from threading import Thread
loop = asyncio.new_event_loop()
running = True
def evaluate(future):
global running
stop = future.result()
if stop:
print("press enter to exit...")
running = False
def side_thread(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
thread = Thread(target=side_thread, args=(loop,), daemon=True)
thread.start()
async def display(text):
await asyncio.sleep(5)
print("echo:", text)
return text == "exit"
while running:
text = input("enter text: ")
future = asyncio.run_coroutine_threadsafe(display(text), loop)
future.add_done_callback(evaluate)
print("exiting")
The echo and other output will conflict with the prompts but it should be good enough to demonstrate it is working.
One thing I am unsure about is setting the global running from one thread and reading it from another. I think maybe the GIL synchronizes the thread cache but I'd love to get confirmation (or not) about that.

Related

How to run the whole async function in given timeout?

From the last post, the duplicate post cannot answer my question.
Right now, I have a function f1() which contains CPU intensive part and async IO intensive part. Therefore f1() itself is an async function. How can I run the whole f1() with given timeout? I found the method provided in the post cannot solve my situation. For the following part, it shows RuntimeWarning: coroutine 'f1' was never awaited handle = None # Needed to break cycles when an exception occurs.
import asyncio
import time
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(1)
async def f1():
print("start sleep")
time.sleep(3) # simulate CPU intensive part
print("end sleep")
print("start asyncio.sleep")
await asyncio.sleep(3) # simulate IO intensive part
print("end asyncio.sleep")
async def process():
print("enter process")
loop = asyncio.get_running_loop()
await loop.run_in_executor(executor, f1)
async def main():
print("-----f1-----")
t1 = time.time()
try:
await asyncio.wait_for(process(), timeout=2)
except:
pass
t2 = time.time()
print(f"f1 cost {(t2 - t1)} s")
if __name__ == '__main__':
asyncio.run(main())
From previous post, loop.run_in_executor can only work for normal function not async function.
one way to do it is to make process not an async function, so it can run in another thread, and have it start an asyncio loop in the other thread to run f1.
note that starting another loops means you cannot share coroutines and futures between the two loops.
import asyncio
import time
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(1)
async def f1():
print("start sleep")
time.sleep(3) # simulate CPU intensive part
print("end sleep")
print("start asyncio.sleep")
await asyncio.sleep(3) # simulate IO intensive part
print("end asyncio.sleep")
def process():
print("enter process")
asyncio.run(asyncio.wait_for(f1(),2))
async def main():
print("-----f1-----")
t1 = time.time()
try:
loop = asyncio.get_running_loop()
await loop.run_in_executor(executor, process)
except:
pass
t2 = time.time()
print(f"f1 cost {(t2 - t1)} s")
if __name__ == '__main__':
asyncio.run(main())
-----f1-----
enter process
start sleep
end sleep
start asyncio.sleep
f1 cost 3.0047199726104736 s
keep in mind that you must wait for any IO to return f1 to the eventloop so the future can be cancelled, you cannot cancel the CPU-intensive part of the code unless it does something like await asyncio.sleep(0) which returns to the event-loop momentarily, which is why time.sleep cannot be cancelled.
I have explained the cause of the issue. You should remove or replace the time.sleep at f1 as it blocks the thread, and asyncio.wait_for cannot handle the timeout.
Regarding to the RuntimeWarning
RuntimeWarning: coroutine 'f1' was never awaited handle = None # Needed to break cycles when an exception occurs.
It occurs because the loop.run_in_executor expects a non-async function as a second argument.

How to gracefully end asyncio program with CTRL-C when using loop run_in_executor

The following code requires 3 presses of CTRL-C to end, how can I make it end with one only? (So it works nicely in Docker)
import asyncio
import time
def sleep_blocking():
print("Sleep blocking")
time.sleep(1000)
async def main():
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, sleep_blocking)
try:
asyncio.run(main())
except KeyboardInterrupt:
print("Nicely shutting down ...")
I've read many asyncio related questions and answers but can't figure this one out yet. The 1st CTRL-C does nothing, the 2nd prints "Nicely shutting down ..." and then hangs. The 3rd CTRL-C prints an ugly error.
I'm on Python 3.9.10 and Linux.
(edit: updated code per comment #mkrieger1)
The way to exit immediately and unconditionally from a Python program is by calling os._exit(). If your background threads are in the middle of doing something important this may not be wise. However the following program does what you asked (python 3.10, Windows10):
import asyncio
import time
import os
def sleep_blocking():
print("Sleep blocking")
time.sleep(1000)
async def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(loop.run_in_executor(None, sleep_blocking))
try:
asyncio.run(main())
except KeyboardInterrupt:
print("Nicely shutting down ...")
os._exit(42)
From here we know that it's effectively impossible to kill a task running in a thread executor. If I replace the default thread executor with a ProcessPoolExecutor, I get the behavior you're looking for. Here's the code:
import concurrent.futures
import asyncio
import time
def sleep_blocking():
print("Sleep blocking")
time.sleep(1000)
async def main():
loop = asyncio.get_event_loop()
x = concurrent.futures.ProcessPoolExecutor()
await loop.run_in_executor(x, sleep_blocking)
try:
asyncio.run(main())
except KeyboardInterrupt:
print("Nicely shutting down ...")
And the result is:
$ python asynctest.py
Sleep blocking
^CNicely shutting down ...

Is there a better way to catch `KeyboardInterrupt` than an infinite loop with asyncio?

A program I am developing has a long running process in another thread. I would like to interrupt that thread in the event something goes awry.
Of other SO posts I've seen like this, they use syntax similar to this:
while True:
if condition_here:
break
else:
await asyncio.sleep(1)
which does work in catching KeyboardInterrupts. However, I'm not a big fan of using while loops like this and would like to avoid this if at all possible.
For some example code, here is what I currently have (which does not catch the interrupts until after the thread is done):
import asyncio
import time
from threading import Thread
def some_long_process():
time.sleep(60)
async def main():
thread = Thread(target=some_long_process)
thread.start()
# Doesn't work
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, thread.join)
# await asyncio.wait([loop.run_in_executor(None, thread.join)])
# await asyncio.wait_for(loop.run_in_executor(None, thread.join), None)
# await asyncio.gather(asyncio.to_thread(thread.join))
# Works
# while thread.is_alive():
# await asyncio.sleep(1)
if __name__ == '__main__':
asyncio.run(main())
I'm also open to suggestions to reconsider my entire approach to the way this is designed if this isn't possible. Thanks for your time.

Python calling coroutine from normal function

So i have a script for a countdown which looks something like this:
import time, threading, asyncio
def countdown(n, m):
print("timer start")
time.sleep(n)
print("timer stop")
yield coro1
async def coro1():
print("coroutine called")
async def coromain():
print("first")
t1 = threading.Thread(target=countdown, args=(5, 0))
t1.start()
print("second")
loop = asyncio.get_event_loop()
loop.run_until_complete(coromain())
loop.stop()
What i want it to do is simple:
Run coromain
Print "first"
Start thread t1, print "timer start" and have it wait for 5 seconds
In the mean time, print "second"
after 5 seconds, print "timer stop"
exit
However, when i run this code it outputs:
Run coromain
Print "first"
Print "second"
exit
I'm so confused as to why it does this. Can anyone explain what I'm doing wrong here?
This depends on whether your question is a part of a bigger problem imposing additional constraints or not, but I do not see a reason to use threading. Instead, you can use two separate Tasks running in the same event loop, which is one of the main points of asynchronous programming:
import asyncio
async def countdown(n, m): # <- coroutine function
print("timer start")
await asyncio.sleep(n)
print("timer stop")
await coro1()
async def coro1():
print("coroutine called")
async def coromain():
print("first")
asyncio.ensure_future(countdown(5, 0)) # create a new Task
print("second")
loop = asyncio.get_event_loop()
loop.run_until_complete(coromain()) # run coromain() from sync code
pending = asyncio.Task.all_tasks() # get all pending tasks
loop.run_until_complete(asyncio.gather(*pending)) # wait for tasks to finish normally
Output:
first
second
timer start
(5 second wait)
timer stop
coroutine called
When using ensure_future, you effectively make a new “execution thread” (see fibers) inside the single OS's thread.
After some digging crafted this workaround. It might not be pretty, but it works:
import time, threading, asyncio
def countdown(n, m):
print("timer start")
time.sleep(n)
print("timer stop")
looptemp = asyncio.new_event_loop()
asyncio.set_event_loop(looptemp)
loop2 = asyncio.get_event_loop()
loop2.run_until_complete(coro1())
loop2.close()
async def coro1():
print("coroutine called")
async def coromain():
print("first")
t1 = threading.Thread(target=countdown, args=(5, 0))
t1.start()
print("second")
loop = asyncio.get_event_loop()
loop.run_until_complete(coromain())
loop.stop()
It unfortunately doesn't work for my specific usecase, but I thought it might be useful.
I'm so confused as to why it does this. Can anyone explain what I'm doing wrong here?
There is already an accepted answer showing how to achieve what you want, but just to explain why your code produced the output it did:
The coroutine coromain starts the countdown as a thread, but does not wait for it to finish. Actually your coroutine, and therefore your program, exits before the thread could be exectued.

Python queue linking object running asyncio coroutines with main thread input

I have a script running where the main thread takes input from stdin and then passes it to a child thread using a queue. In the child thread I'm using asyncio coroutines to spin up a listener on a socket and wait for connections. Once a connection is made I can now send data through the listener from the main thread.
It all seems to work well enough, but since asyncio.BaseEventLoop is not thread safe am I going to run into problems?
This is my attempt to solve the problem of using a blocking library like python's cmd module with asyncio.
My code is below.
import sys
import asyncio
from time import sleep
from threading import Thread
from queue import Queue
stdin_q = Queue()
clients = {} # task -> (reader, writer)
def client_connected_handler(client_reader, client_writer):
# Start a new asyncio.Task to handle this specific client connection
task = asyncio.Task(handle_client(client_reader, client_writer))
clients[task] = (client_reader, client_writer)
def client_done(task):
# When the tasks that handles the specific client connection is done
del clients[task]
# Add the client_done callback to be run when the future becomes done
task.add_done_callback(client_done)
#asyncio.coroutine
def handle_client(client_reader, client_writer):
# Handle the requests for a specific client with a line oriented protocol
while True:
cmd = yield from get_input()
client_writer.write(cmd.encode())
data = yield from client_reader.read(1024)
print(data.decode(),end="",flush=True)
#asyncio.coroutine
def get_input():
while True:
try:
return stdin_q.get()
except:
pass
class Control:
def start(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = asyncio.get_event_loop()
server = self.loop.run_until_complete(asyncio.start_server(client_connected_handler, '0.0.0.0', 2222))
self.loop.run_forever()
self.stop()
def stop(self):
self.loop.stop()
self.loop.close()
def fire_control():
con = Control()
con.start()
if __name__ == "__main__":
stdin_q.put("\n")
t = Thread(target=fire_control)
t.start()
sleep(2)
_cmd = ""
while _cmd.lower() != "exit":
_cmd = input("")
if _cmd == "":
_cmd = "\r\n"
stdin_q.put(_cmd)
This isn't going to work quite right, because the call to stdin_q.get() is going to block your event loop. This means that if your server has multiple clients, all of them will be completely blocked by whichever one happens to get to stdin_q.get() first, until you send data into the queue. The simplest way to get around this is use BaseEvent.loop.run_in_executor to run the stdin_q.get in a background ThreadPoolExecutor, which allows you to wait for it without blocking the event loop:
#asyncio.coroutine
def get_input():
loop = asyncio.get_event_loop()
return (yield from loop.run_in_executor(None, stdin_q.get)) # None == use default executor.
Edit (1/27/16):
There is a library called janus, which provides an asyncio-friendly, thread-safe queue implementation.
Using that library, your code would look like this (I left out unchanged parts):
...
import janus
loop = asyncio.new_event_loop()
stdin_q = janus.Queue(loop=loop)
...
#asyncio.coroutine
def get_input():
loop = asyncio.get_event_loop()
return (yield from stdin_q.async_q.get())
class Control:
def start(self):
asyncio.set_event_loop(loop)
self.loop = asyncio.get_event_loop()
server = self.loop.run_until_complete(asyncio.start_server(client_connected_handler, '0.0.0.0', 2222))
self.loop.run_forever()
self.stop()
def stop(self):
self.loop.stop()
self.loop.close()
...
if __name__ == "__main__":
stdin_q.sync_q.put("\n")
t = Thread(target=runner)
t.start()
sleep(2)
_cmd = ""
while _cmd.lower() != "exit":
_cmd = input("")
if _cmd == "":
_cmd = "\r\n"
stdin_q.sync_q.put(_cmd)

Categories

Resources