I have a thread which listens for new messages from rabbitmq using pika. After configuring the connection using BlockingConnection, I start consuming messages throught start_consuming. How can I interrupt the start consuming method call to, for example, stop the thread in a gracefully manner?
You can use consume generator instead of start_consuming.
import threading
import pika
class WorkerThread(threading.Thread):
def __init__(self):
super(WorkerThread, self).__init__()
self._is_interrupted = False
def stop(self):
self._is_interrupted = True
def run(self):
connection = pika.BlockingConnection(pika.ConnectionParameters())
channel = connection.channel()
channel.queue_declare("queue")
for message in channel.consume("queue", inactivity_timeout=1):
if self._is_interrupted:
break
if not message:
continue
method, properties, body = message
print(body)
def main():
thread = WorkerThread()
thread.start()
# some main thread activity ...
thread.stop()
thread.join()
if __name__ == "__main__":
main()
Related
I want to use Interactive Brokers' API which opens a TCP connection on a different thread.
The problem is that the app.run() function what needs to be called to establish the TCP connection probably uses a while true loop for processing it's queue, what block every possible way I know to terminate the thread when exiting the program.
class TradeApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
#...
def websocket_con():
app.run()
app = TradeApp()
app.connect("127.0.0.1", 7497, clientId=1)
con_thread = threading.Thread(target=websocket_con, daemon=True)
con_thread.start()
I've tried using a daemon thread and simple one.
I've tried to use events also.
But whatever I do, it seems the thread will never exit from the execution of app.run() function.
class TradingApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
#...
def websocket_conn():
app.run()
event.wait()
if event.is_set():
app.close()
event = threading.Event()
app = TradingApp()
app.connect("127.0.0.1", 7497, clientId=1)
conn_thread = threading.Thread(target=websocket_conn)
conn_thread.start()
#...
event.set()
Am I doing something wrong?
How could I exit from app.run() function?
I think u bad understand multiprocessing, when u start websocket_conn() function by new process, this new process will be executed step by step so event.wait() if event.is_set(): doesn't execute before app.run() wasn't over.
Try something like this:
conn_thread = threading.Thread(target=websocket_conn)
conn_thread.start()
event.wait()
if event.is_set():
conn_thread.kill()
I am working on a Python app, but I am moving from Flask to Quart. The application needs a background task that runs constantly whilst the application is running.
When I try to stop the process using control-c, the thread doesn't close cleanly and sits in the while loop in the shutdown routine.
while not self._master_thread_class.shutdown_completed:
if not pro:
print('[DEBUG] Thread is not complete')
pro = True
I have followed this Stackoverflow question, but I can't figure out how to cleanly shutdown the background thread so I would love an explanation please as it seems like the Quart Documentation is lacking a bit.
MasterThread class:
import asyncio
class MasterThread:
def __init__(self, shutdown_requested_event):
self._shutdown_completed = False
self._shutdown_requested_event = shutdown_requested_event
self._shutdown_requested = False
def __del__(self):
print('Thread was deleted')
def run(self, loop) -> None:
asyncio.set_event_loop(loop)
loop.run_until_complete(self._async_entrypoint())
#asyncio.coroutine
def _async_entrypoint(self) -> None:
while not self. _shutdown_requested and \
not self._shutdown_requested_event.isSet():
#print('_main_loop()')
pass
if self._shutdown_requested_event.wait(0.1):
self. _shutdown_requested = True
print('[DEBUG] thread has completed....')
self._shutdown_completed = True
def _main_loop(self) -> None:
print('_main_loop()')
Main application module:
import asyncio
import threading
from quart import Quart
from workthr import MasterThread
app = Quart(__name__)
class Service:
def __init__(self):
self._shutdown_thread_event = threading.Event()
self._master_thread = MasterThread(self._shutdown_thread_event)
self._thread = None
def __del__(self):
self.stop()
def start(self):
loop = asyncio.get_event_loop()
self._thread = threading.Thread(target=self._master_thread.run, args=(loop,))
self._thread.start()
return True
def stop(self) -> None:
print('[DEBUG] Stop signal caught...')
self._shutdown_thread_event.set()
while not self._master_thread.shutdown_completed:
print('[DEBUG] Thread is not complete')
print('[DEBUG] Thread has completed')
self._shutdown()
def _shutdown(self):
print('Shutting down...')
service = Service()
service.start()
Quart has startup and shutdown methods that allow something to be started before the server starts serving and stopped when the server finishes serving. If your background task is mostly IO bound I'd recommend just using a coroutine function rather than a thread,
async def background_task():
while True:
...
#app.before_serving
async def startup():
app.background_task = asyncio.ensure_future(background_task())
#app.after_serving
async def shutdown():
app.background_task.cancel() # Or use a variable in the while loop
Or you can do the same with your Service,
#app.before_serving
async def startup():
service.start()
#app.after_serving
async def shutdown():
service.stop()
I am writing multiprocess program. There are four class: Main, Worker, Request and Ack. The Main class is the entry point of program. It will create the sub-process called Worker to do some jobs. The main process put the Request into JoinableQueue, and than Worker get request from queue. When Worker finished the request, it will put the ACK into queue. The part of code shown as below:
Main:
class Main():
def __init__(self):
self.cmd_queue = JoinableQueue()
self.worker = Worker(self.cmd_queue)
def call_worker(self, cmd_code):
if self.cmd_queue.empty() is True:
request = Request(cmd_code)
self.cmd_queue.put(request)
self.cmd_queue.join()
ack = self.cmd_queue.get()
self.cmd_queue.task_done()
if ack.value == 0:
return True
else:
return False
else:
# TODO: Error Handling.
pass
def run_worker(self):
self.worker.start()
Worker:
class Worker(Process):
def __init__(self, cmd_queue):
super(Worker, self).__init__()
self.cmd_queue = cmd_queue
...
def run(self):
while True:
ack = Ack(0)
try:
request = self.cmd_queue.get()
if request.cmd_code == ReqCmd.enable_handler:
self.enable_handler()
elif request.cmd_code == ReqCmd.disable_handler:
self.disable_handler()
else:
pass
except Exception:
ack.value = -1
finally:
self.cmd_queue.task_done()
self.cmd_queue.put(ack)
self.cmd_queue.join()
It often works normally. But Main process stuck at self.cmd_queue.join(), and the Worker stuck at self.cmd_queue.join() sometimes. It is so weird! Does anyone have any ideas? Thanks
There's nothing weird in the above issue: you shouldn't call queue's join within a typical single worker process activity because
Queue.join()
Blocks until all items in the queue have been gotten and
processed.
Such a calls where they are in your current implementation will make the processing pipeline wait.
Usually queue.join() is called in the main (supervisor) thread after initiating/starting all threads/workers.
https://docs.python.org/3/library/queue.html#queue.Queue.join
Goal is to allocate a thread and wait for the callback. Single thread is going to run the while loop forever. Difficulty here is that we are not directly calling or controlling the callback and we do not know in advance how long it will takes to the remote server to invoke callback.
I've tried to look for a solution in the asyncio module using asyncio.Future but unsuccessfully.
from a_module import Server # <a_module> is fictitious
import random
import time
class App(Server):
def __init__(self):
self.response = None
def send_requests(self):
"""Send request to remote server"""
self.send_number_to_server(42) # inherited from Server
# This is going to loop forever. We should "suspend" the
# current thread, allocate a new thread to wait for the
# callback and then comeback here to return the (not None)
# response.
while self.response is None:
# Wait for the callback before terminating this method.
time.sleep(1) # seconds
return self.response
def callback(self, message):
"""Inherited form parent class 'Server'. When the request sent
with App.send_req has been processed by the remote server,
this function is invoked in the background."""
self.response = message
if __name__ == '__main__':
app = App()
response = app.send_requests()
print(response)
Since callback is "invoked in the background", Server is presumably already running a background thread. In that case, you want your main thread to run the event loop, and the server's background thread to notify you when it is done. Assuming send_number_to_server is not blocking, you could do it like this:
class App(Server):
def __init__(self):
self._loop = asyncio.get_event_loop()
async def send_requests(self):
self.send_number_to_server(42)
self._future_resp = asyncio.Future()
resp = await self._future_resp
self._future_resp = None
return resp
def callback(self, message):
# called from a different thread
self._loop.call_soon_threadsafe(self._future_resp.set_result, message)
async def main():
app = App()
response = await app.send_requests()
print(response)
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
I have a script running where the main thread takes input from stdin and then passes it to a child thread using a queue. In the child thread I'm using asyncio coroutines to spin up a listener on a socket and wait for connections. Once a connection is made I can now send data through the listener from the main thread.
It all seems to work well enough, but since asyncio.BaseEventLoop is not thread safe am I going to run into problems?
This is my attempt to solve the problem of using a blocking library like python's cmd module with asyncio.
My code is below.
import sys
import asyncio
from time import sleep
from threading import Thread
from queue import Queue
stdin_q = Queue()
clients = {} # task -> (reader, writer)
def client_connected_handler(client_reader, client_writer):
# Start a new asyncio.Task to handle this specific client connection
task = asyncio.Task(handle_client(client_reader, client_writer))
clients[task] = (client_reader, client_writer)
def client_done(task):
# When the tasks that handles the specific client connection is done
del clients[task]
# Add the client_done callback to be run when the future becomes done
task.add_done_callback(client_done)
#asyncio.coroutine
def handle_client(client_reader, client_writer):
# Handle the requests for a specific client with a line oriented protocol
while True:
cmd = yield from get_input()
client_writer.write(cmd.encode())
data = yield from client_reader.read(1024)
print(data.decode(),end="",flush=True)
#asyncio.coroutine
def get_input():
while True:
try:
return stdin_q.get()
except:
pass
class Control:
def start(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = asyncio.get_event_loop()
server = self.loop.run_until_complete(asyncio.start_server(client_connected_handler, '0.0.0.0', 2222))
self.loop.run_forever()
self.stop()
def stop(self):
self.loop.stop()
self.loop.close()
def fire_control():
con = Control()
con.start()
if __name__ == "__main__":
stdin_q.put("\n")
t = Thread(target=fire_control)
t.start()
sleep(2)
_cmd = ""
while _cmd.lower() != "exit":
_cmd = input("")
if _cmd == "":
_cmd = "\r\n"
stdin_q.put(_cmd)
This isn't going to work quite right, because the call to stdin_q.get() is going to block your event loop. This means that if your server has multiple clients, all of them will be completely blocked by whichever one happens to get to stdin_q.get() first, until you send data into the queue. The simplest way to get around this is use BaseEvent.loop.run_in_executor to run the stdin_q.get in a background ThreadPoolExecutor, which allows you to wait for it without blocking the event loop:
#asyncio.coroutine
def get_input():
loop = asyncio.get_event_loop()
return (yield from loop.run_in_executor(None, stdin_q.get)) # None == use default executor.
Edit (1/27/16):
There is a library called janus, which provides an asyncio-friendly, thread-safe queue implementation.
Using that library, your code would look like this (I left out unchanged parts):
...
import janus
loop = asyncio.new_event_loop()
stdin_q = janus.Queue(loop=loop)
...
#asyncio.coroutine
def get_input():
loop = asyncio.get_event_loop()
return (yield from stdin_q.async_q.get())
class Control:
def start(self):
asyncio.set_event_loop(loop)
self.loop = asyncio.get_event_loop()
server = self.loop.run_until_complete(asyncio.start_server(client_connected_handler, '0.0.0.0', 2222))
self.loop.run_forever()
self.stop()
def stop(self):
self.loop.stop()
self.loop.close()
...
if __name__ == "__main__":
stdin_q.sync_q.put("\n")
t = Thread(target=runner)
t.start()
sleep(2)
_cmd = ""
while _cmd.lower() != "exit":
_cmd = input("")
if _cmd == "":
_cmd = "\r\n"
stdin_q.sync_q.put(_cmd)