Convert a simple multithreaded program with asyncio - python

I am still pretty new to Python asyncio, so I am trying to convert a simple problem I solved using multithreading, to using asyncio.
I made an example of what I want to achieve.
Each MiniBot instance can start at random times (those time.sleep() calls in main represent instantiations at unpredictable times.)
I am expecting each MiniBot to run in parallel if they start before others have finished.
All good with MultiThreading, but when I translated the problem with async coroutines, I can't get them to start together.
I could use gather, but this would require to have all the task at the beginning, which I don't.
Any suggestions?
Thanks
Oh yes, I am using Python 3.6
Multithreaded version
import threading
import time
class MiniBot(object):
def __init__(self, _id:str):
self._id = _id
self.alive = True
self.start_time = time.time()
self.th = threading.Thread(target=self.run)
self.internal_state = 0
def _foo(self):
self.internal_state += 1
def run(self):
while self.alive:
self._foo()
if time.time() - self.start_time > 4:
print(f"Killing minibot: {self._id}")
print(f"Var is: {self.internal_state}")
self.stop()
time.sleep(0.1)
def start(self):
print(f"Starting Minibot {self._id}")
self.th.start()
def stop(self):
self.alive = False
if __name__ == "__main__":
# MiniBots activities start at random times but should coexist
MiniBot('a').start()
time.sleep(2)
MiniBot('b').start()
time.sleep(1.5)
MiniBot('c').start()
Output:
Starting Minibot a
Starting Minibot b
Starting Minibot c
Killing minibot: a
Var is: 40
Killing minibot: b
Var is: 40
Killing minibot: c
Var is: 40
Async version (Not behaving as I hoped)
import asyncio
import time
class MiniBot(object):
def __init__(self, _id:str):
self._id = _id
self.alive = True
self.last_event = time.time()
self.internal_state = 0
async def _foo(self):
self.internal_state += 1
asyncio.sleep(2)
async def run(self):
while self.alive:
await self._foo()
if time.time() - self.last_event > 4:
print(f"Killing minibot: {self._id}")
print(f"Var is: {self.internal_state}")
self.stop()
asyncio.sleep(0.1)
def start(self):
print(f"Starting Minibot {self._id}")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(self.run())
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def stop(self):
self.alive = False
if __name__ == "__main__":
# MiniBots activities start at random times but should coexist
MiniBot('a').start()
time.sleep(2)
MiniBot('b').start()
time.sleep(1.5)
MiniBot('c').start()
Output:
Starting Minibot a
Killing minibot: a
Var is: 2839119
Starting Minibot b
Killing minibot: b
Var is: 2820634
Starting Minibot c
Killing minibot: c
Var is: 2804579

start cannot call run_until_complete because run_until_complete runs a coroutine through to the end, whereas you need multiple coroutines running in parallel. The asyncio equivalent of creating and starting a thread is asyncio.create_task(), so start() should call that, and return to the caller, like in the threaded version.
For example:
import asyncio, time
class MiniBot:
def __init__(self, _id):
self._id = _id
self.alive = True
self.start_time = time.time()
self.internal_state = 0
def _foo(self):
self.internal_state += 1
async def run(self):
while self.alive:
self._foo()
if time.time() - self.start_time > 4:
print(f"Killing minibot: {self._id}")
print(f"Var is: {self.internal_state}")
self.stop()
await asyncio.sleep(0.1)
def start(self):
print(f"Starting Minibot {self._id}")
return asyncio.create_task(self.run())
def stop(self):
self.alive = False
async def main():
taska = MiniBot('a').start()
await asyncio.sleep(2)
taskb = MiniBot('b').start()
await asyncio.sleep(1.5)
taskc = MiniBot('c').start()
await asyncio.gather(taska, taskb, taskc)
if __name__ == "__main__":
#asyncio.run(main())
asyncio.get_event_loop().run_until_complete(main())
Don't let the call to gather() throw you off: gather simply gives back control to the event loop and returns when all the provided tasks/futures have finished. You can replace it with something like await asyncio.sleep(10) and have the same effect (in this example). And if you have an unpredictable number of futures, there are other other ways to signal the end-condition.
Also note that you need to await asyncio.sleep(), otherwise it has no effect.

Related

Stop asyncio task, that was started inside a function in a class

I try to cancel a specific asyncio task that was started in a function inside a class.
But it doesn't work. The task starts up again....
Thanks for some inspirations! :)
def button_stop_command():
t1.cancel()
#check which tasks are running
tasks = asyncio.all_tasks()
for task in tasks:
print(f'> {task.get_name()}, {task.get_coro()}')
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
url= 'tcp://192.168.0.91:28332'
channel= 'sequence'
self.ctx = zmq.asyncio.Context.instance()
self.sock = self.ctx.socket(zmq.SUB)
self.sock.connect(url)
self.sock.setsockopt(zmq.SUBSCRIBE, channel.encode())
print("Open ZMQ socket on", ZMQ_URL)
async def handle(self) :
[..code...]
asyncio.ensure_future(self.handle())
def start(self):
global t1
self.loop.add_signal_handler(signal.SIGINT, self.stop)
t1=self.loop.create_task(self.handle())
self.loop.run_forever()
async def tk_main(root):
while True:
root.update()
await asyncio.sleep(0.05)
tkmain = asyncio.ensure_future(tk_main(root))
daemon = ZMQHandler()
daemon.start()
I want to cancel a specific task
Everytime I post something, I get a new idea, and then the problem gets solved. My idea was:
def button_stop_command():
t1.cancel()
#check which tasks are running
tasks = asyncio.all_tasks()
for task in tasks:
print(f'> {task.get_name()}, {task.get_coro()}')
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
url= 'tcp://192.168.0.91:28332'
channel= 'sequence'
self.ctx = zmq.asyncio.Context.instance()
self.sock = self.ctx.socket(zmq.SUB)
self.sock.connect(url)
self.sock.setsockopt(zmq.SUBSCRIBE, channel.encode())
print("Open ZMQ socket on", ZMQ_URL)
async def handle(self) :
global t1
[..code...]
t1= asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
async def tk_main(root):
while True:
root.update()
await asyncio.sleep(0.05)
tkmain = asyncio.ensure_future(tk_main(root))
daemon = ZMQHandler()
daemon.start()

How to exit a while loop in a multiprocessing pool process from the main script in python

EDIT 3: See last example at the end.
I need a while loop doing continuous send and return operations with an USB connection.
During this continuous operation I need (amongst other stuff in my main script) a few identical and isolated send/return operations on that same USB connection.
This seems to require multiprocessing and some tweaking.
I want to use the following workaround with the multiprocessing library:
Put the continuous send/return operation on a different thread with a pool (apply_async).
Put this process on "hold" when I perform the isolated send/return operation (using clear()).
Immediately after the isolated send/return operation resume the continuous send/return (using set()).
Stop the continuous send/return when i reach the end of the main script (here i have no solution yet should be x.stop() or something like this since terminate() won't do).
Get some return value from the stopped process (use get()).
I tried couple of things already but i just cant exit the while loop via a main command.
import multiprocessing
import time
def setup(event):
global unpaused
unpaused = event
class func:
def __init__(self):
self.finished = False
def stop(self):
self.finished = True
def myFunction(self, arg):
i = 0
s=[]
while self.finished == False:
unpaused.wait()
print(arg+i)
s.append(arg+i)
i=i+1
time.sleep(1)
return s
if __name__ == "__main__":
x=func()
event = multiprocessing.Event() # initially unset, so workers will be paused at first
pool = multiprocessing.Pool(1, setup, (event,))
result = pool.apply_async(x.myFunction, (10,))
print('We unpause for 2 sec')
event.set() # unpause
time.sleep(2)
print('We pause for 2 sec')
event.clear() # pause
time.sleep(2)
print('We unpause for 2 sec')
event.set() # unpause
time.sleep(2)
print('Now we try to terminate in 2 sec')
time.sleep(2)
x.stop()
return_val = result.get()
print('get worked with '+str(return_val))
Can someone point me in the right direction? As seen this wont stop with x.stop().
Global values also do not work.
Thanks in advance.
EDIT:
as suggested I tried to put the multiprocessing in a seperated object.
Is this done by putting functions in a class like my example below?
import multiprocessing
import time
class func(object):
def __init__(self):
self.event = multiprocessing.Event() # initially unset, so workers will be paused at first
self.pool = multiprocessing.Pool(1, self.setup, (self.event,))
def setup(self):
global unpaused
unpaused = self.event
def stop(self):
self.finished = True
def resume(self):
self.event.set() # unpause
def hold(self):
self.event.clear() #pause
def run(self, arg):
self.pool.apply_async(self.myFunction, (arg,))
def myFunction(self, arg):
i = 0
s=[]
self.finished = False
while self.finished == False:
unpaused.wait()
print(arg+i)
s.append(arg+i)
i=i+1
time.sleep(1)
return s
if __name__ == "__main__":
x=func()
result = x.run(10)
print('We unpause for 2 sec')
x.resume() # unpause
time.sleep(2)
print('We pause for 2 sec')
x.hold() # pause
time.sleep(2)
print('We unpause for 2 sec')
x.resume() # unpause
time.sleep(2)
print('Now we try to terminate in 2 sec')
time.sleep(2)
x.stop()
return_val = result.get()
print('get worked with '+str(return_val))
I added a hold and resume function and put the setup function in a single class.
But the lower example wont even run the function anymore.
What a complex little problem. I am puzzled with this.
EDIT2:
I tried a workaround with what i found so far.
Big trouble came in while using the microprocessing.pool library.
It is not straightforward using it with the USB connection...
I produced a mediocre workaround below:
from multiprocessing.pool import ThreadPool
import time
class switch:
state = 1
s1 = switch()
def myFunction(arg):
i = 0
while s1.state == 1 or s1.state == 2 or s1.state == 3:
if s1.state == 1:
print(arg+i)
s.append(arg+i)
i=i+1
time.sleep(1)
elif s1.state == 2:
print('we entered snippet mode (state 2)')
time.sleep(1)
x = s
return x
pool.close()
pool.join()
elif s1.state == 3:
while s1.state == 3:
time.sleep(1)
print('holding (state 3)')
return s
if __name__ == "__main__":
global s
s=[]
print('we set the state in the class on top to ' +str(s1.state))
pool = ThreadPool(processes=1)
async_result = pool.apply_async(myFunction, (10,))
print('in 5 sec we switch mode sir, buckle up')
time.sleep(5)
s1.state = 2
print('we switched for a snippet which is')
snippet = async_result.get()
print(str(snippet[-1])+' this snippet comes from main')
time.sleep(1)
print('now we return to see the full list in the end')
s1.state = 1
async_result = pool.apply_async(myFunction, (10,))
print('in 5 sec we hold it')
time.sleep(5)
s1.state = 3
print('in 5 sec we exit')
time.sleep(5)
s1.state = 0
return_val = async_result.get()
print('Succsses if you see a list of numbers '+ str(return_val))
EDIT 3:
from multiprocessing.pool import ThreadPool
import time
class switch:
state = 1
s1 = switch()
def myFunction(arg):
i = 0
while s1.state == 1 or s1.state == 2:
if s1.state == 1:
print(arg+i)
s.append(arg+i)
i=i+1
time.sleep(1)
elif s1.state == 2:
print('we entered snippet mode (state 2)')
time.sleep(1)
x = s
return x
pool.close() #These are not relevant i guess.
pool.join() #These are not relevant i guess.
return s
if __name__ == "__main__":
global s
s=[]
print('we set the state in the class on top to ' +str(s1.state))
pool = ThreadPool(processes=1)
async_result = pool.apply_async(myFunction, (10,))
print('in 5 sec we switch mode sir, buckle up')
time.sleep(5)
s1.state = 2
snippet = async_result.get()
print(str(snippet[-1])+' this snippet comes from main')
time.sleep(1)
print('now we return to see the full list in the end')
s1.state = 1
async_result = pool.apply_async(myFunction, (10,))
print('in 5 sec we exit')
time.sleep(5)
s1.state = 0
return_val = async_result.get()
print('Succsses if you see a list of numbers '+ str(return_val))
Well, this is what i have come up with...
Not great not terrible. Maybe a bit more on the terrible side (:
I hate it that I have to recall the function pool.apply_async(myFunction, (10,)) after I grabbed a single piece of data.
Currently only ThreadingPool works with no further code changes in my actual script!
in a situation where I need a process to run continuously, while occasionally doing other things, I like to use asyncio. This is a rough draft of how I would approach this
import asyncio
class MyObject:
def __init__(self):
self.mydatastructure = []
self.finished = False
self.loop = None
async def main_loop(self):
while not self.finished:
new_result = self.get_data()
self.mydatastructure.append(new_result)
await asyncio.sleep(0)
async def timed_loop(self):
while not self.finished:
await asyncio.sleep(2)
self.dotimedtask(self.mydatastructure)
async def run(self):
await asyncio.gather(self.main_loop(), self.timed_loop())
asyncio.run(MyObject().run())
only one coroutine will be running at a time, with the timed one being scheduled once every 2 seconds. It would always get the data passed out of the most recent continuous execution. you could do things like keep a connection open on an object as well. Depending on your requirements (is it a 2 second interval, or once every other second no matter how long it takes) there are library packages to make the scheduling a bit more elegant.

set_exception_handler ignored in python3.6 with asyncio

I'm basically creating an object that needs to perform a number of tasks in async mode (+ other things but I've tried to simplify here). Here is a snippet of code for the object itself. It's successful (thanks to Lynn Root's example) at handling signals - but not at handling exceptions. Or at least not in the way I am hoping to be able to handle them.
class myobj(object):
def __init__(self, loop: asyncio.AbstractEventLoop):
self.shutdown = False
self.loop = loop
async def graceful_shutdown(self, s=None):
if s is not None:
logging.warning(f'Receiving signal {s.name}.')
else:
logging.warning(f'Shutting NOT via signal')
logging.warning(f'Initiating cancellation of {len(self.tasks)} tasks...')
[task.cancel() for task in self.tasks]
logging.warning(f'Gaterhing out put of cancellation of {len(self.tasks)} tasks...')
await asyncio.gather(*self.tasks, loop=self.loop, return_exceptions=True)
logging.warning('Done graceful shutdown of subtasks')
# Mark main task to shutdown
self.shutdown = True
async def run(self):
i = 0
taskx = self.loop.create_task(self.task_x())
self.tasks = [taskx]
while not self.shutdown:
await asyncio.sleep(1)
print(f'Main runner... {i}')
i += 1
logging.warning('Main runner is over.')
async def task_x(self):
logging.warning('Starting task X')
i = 0
while True:
await asyncio.sleep(2.25)
print(f'Doing task x... {i}')
if i == 2:
raise RuntimeError('BOOM X!')
i += 1
At this point, from the "main" I need to install a few things and create the loop :
def main():
try:
global newobj
loop = asyncio.get_event_loop()
logging.warning(f'Installing exception handler')
loop.set_exception_handler(handle_exception)
logging.warning(f'Creating main object')
newobj = myobj(loop)
logging.warning(f'Installing signal handlers')
signals = (signal.SIGINT, signal.SIGTERM)
for s in signals:
loop.add_signal_handler(s, lambda s=s: loop.create_task(newobj.graceful_shutdown(s)))
logging.warning(f'Running object...')
loop.run_until_complete(newobj.run())
finally:
loop.close()
logging.warning(f'object is Shutdown - Exiting program.')
sys.exit(0)
if __name__ == "__main__":
main()
But the handle_exception needs to be defined.
def handle_exception(loop, context):
# context["message"] will always be there; but context["exception"] may not
msg = context.get("exception", context["message"])
logging.error(f'Caught exception: {msg}')
logging.info(f'Shutting down from exception.')
loop.create_task(newobj.graceful_shutdown())
The problem is that it's never calling handle_exception. I need to be running this in python3.6 for some reason. What am I missing here?

Python asyncio: interruptable task

I am trying to make a barebones skeleton fighting game with python asyncio.
class Skeleton(Creature):
pass
class SkeletonAI():
def __init__(self, skeleton,loop = None):
self.loop = loop or asyncio.new_event_loop()
self.skeleton = skeleton
self.action_task = None
async def run(self):
while True:
#print(self.action_task, )
if self.skeleton.alive and self.skeleton.target.alive:
if self.skeleton.state == 'idle':
#ATTACK
self.skeleton.begin_attack()
self.action_task = self.loop.call_later(3, self.skeleton.action_complete)
else:
break
class Player(Creature):
def attack_target(self, target):
target.take_damage(self.damage)
if target.state == 'attacking':
target.state = 'idle'
#interrupt attack
class Game():
#Super simple game
#The skeleton launches an attack, press anything to interrupt it
async def handle_sending(self):
loop = asyncio.get_event_loop()
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=1,
)
while True:
msg = await loop.run_in_executor(executor, input)
print('got a message')
if self.skeleton_ai.action_task:
print('cancelling attack')
self.skeleton_ai.action_task.cancel()
self.skeleton_ai.skeleton.machine.set_state('idle')
print('cancelled attack')
self.skeleton_ai.action_task = None
async def game_loop(self):
player_task = asyncio.ensure_future(self.handle_sending())
skeleton_task = asyncio.ensure_future(self.skeleton_ai.run())
def __init__(self):
self.task = None
self.loop = asyncio.get_event_loop()
self.player = Player(name='ply')
self.skeleton_ai = SkeletonAI(skeleton=Skeleton(name='bobby'))
self.skeleton_ai.skeleton.target = self.player
self.loop.run_until_complete(self.game_loop())
try:
self.loop.run_forever()
finally:
pass
loop.close()
Game()
Here's what I am trying to do:
Player input and game output are async, so input() doesn't block. This works.
The skeleton prepares an attack, if it's not interrupted in 3 seconds, the attack deals damage to the player.
The player can input any text to interrupt the skeleton attack.
How can I make the skeleton's attack? I want a task I can interrupt at will and call a callback later. Currently everything just gets stuck. The call_later never calls.
This is the pattern for a async function with timeout and callback function. The clue is to catch the asyncio.TimeoutError and do your timeout logic. The function that is cancelled will not continue after it's current await position.
import asyncio
async def slow_function(seconds):
print('starting slow computations')
await asyncio.sleep(seconds)
print('slow computations done')
async def timeout_callback():
print('timeout called')
async def timeout_with_cb(fut, timeout, timeout_fut):
try:
await asyncio.wait_for(fut, timeout)
except asyncio.TimeoutError:
await timeout_fut
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.ensure_future(
timeout_with_cb(slow_function(2), 1,
timeout_callback())))
This will print:
starting slow computations
timeout called
I guess this can help you to adapt your example (the provided example does not compile).

Asyncio print status of coroutines progress

I have a bunch of coroutines that doing some work
#asyncio.coroutine
def do_work():
global COUNTER
result = ...
if result.status == 'OK':
COUNTER += 1
and another one
COUNTER = 0
#asyncio.coroutine
def display_status():
while True:
print(COUNTER)
yield from asyncio.sleep(1)
which have to display how many coroutines have finished their work. How to properly implement this task? Following solution doesn't work
#asyncio.coroutine
def spawn_jobs():
coros = []
for i in range(10):
coros.append(asyncio.Task(do_work()))
yield from asyncio.gather(*coros)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.create_task(display_status())
loop.run_until_complete(spawn_jobs())
loop.close()
I expect that counter will be printed to the console every second no matter what do_work() coroutines do. But I have just two outputs: 0 and after a few seconds repeating 10.
But I have just two outputs: 0 and after a few seconds repeating 10.
I can't reproduce it. If I use:
import asyncio
import random
#asyncio.coroutine
def do_work():
global COUNTER
yield from asyncio.sleep(random.randint(1, 5))
COUNTER += 1
I get the output like this:
0
0
4
6
8
Task was destroyed but it is pending!
task: <Task pending coro=<display_status() running at most_wanted.py:16> wait_for=<Future pending cb=[Task._wakeup()] created at ../Lib/asyncio/tasks.py:490> created at most_wanted.py:27>
The infinite loop in display_status() causes the warning at the end. To avoid the warning; exit the loop when all tasks in the batch are done:
#!/usr/bin/env python3
import asyncio
import random
from contextlib import closing
from itertools import cycle
class Batch:
def __init__(self, n):
self.total = n
self.done = 0
async def run(self):
await asyncio.wait([batch.do_work() for _ in range(batch.total)])
def running(self):
return self.done < self.total
async def do_work(self):
await asyncio.sleep(random.randint(1, 5)) # do some work here
self.done += 1
async def display_status(self):
while self.running():
await asyncio.sleep(1)
print('\rdone:', self.done)
async def display_spinner(self, char=cycle('/|\-')):
while self.running():
print('\r' + next(char), flush=True, end='')
await asyncio.sleep(.3)
with closing(asyncio.get_event_loop()) as loop:
batch = Batch(10)
loop.run_until_complete(asyncio.wait([
batch.run(), batch.display_status(), batch.display_spinner()]))
Output
done: 0
done: 2
done: 3
done: 4
done: 10
Solution using threading module
SHOW_STATUS = True
def display_status_sync():
while SHOW_STATUS:
print(S_REQ)
time.sleep(1)
if __name__ == '__main__':
new_thread = threading.Thread(target=display_status_sync)
new_thread.start()
loop.run_until_complete(spawn_jobs())
SHOW_STATS = False
new_thread.join()
loop.close()
But I want to achieve similar functionality using asyncio coroutines. Is it possible to do that?

Categories

Resources