So I have this problem. I want to use both Flask and RabbitMQ to do a microservice capable of doing some computation-heavy task. I basically wants something like the
Remote procedure call (RPC) tutorial from the documentation, but with a REST Api overhead.
So I've come with that code, so far:
server.py
from flask import Flask
import sys
import os
import json
import pika
import uuid
import time
''' HEADERS = {'Content-type': 'audio/*', 'Accept': 'text/plain'}'''
class RPIclient(object):
def __init__(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='rabbitmq'))
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.channel.exchange_declare(exchange='kaldi_expe', exchange_type='topic')
# Create all the queue and bind them to the corresponding routing key
self.channel.queue_declare('request', durable=True)
result = self.channel.queue_declare('answer', durable=True)
self.channel.queue_bind(exchange='kaldi_expe', queue='request', routing_key='kaldi_expe.web.request')
self.channel.queue_bind(exchange='kaldi_expe', queue='answer', routing_key='kaldi_expe.kaldi.answer')
self.callback_queue = result.method.queue
self.channel.basic_consume(queue="answer", on_message_callback=self.on_response)
def on_response(self, ch, method, props, body):
print("from server, correlation id : " + str(props.correlation_id), file=sys.stderr)
self.response = body
ch.basic_ack(delivery_tag=method.delivery_tag)
def call(self, n):
print("Launched Call ")
self.response = None
self.corr_id = str(uuid.uuid4())
self.channel.basic_publish(
exchange='kaldi_expe',
routing_key='kaldi_expe.web.request',
properties=pika.BasicProperties(
correlation_id=self.corr_id,
),
body=str(n))
while self.response is None:
self.connection.process_data_events()
return int(2)
def flask_app():
app = Flask("__name__")
#app.route('/', methods=['GET'])
def server_is_up():
return 'server is up', 200
#app.route('/add-job/<cmd>')
def add(cmd):
app.config['RPIclient'].call(10)
return "Call RPI client",404
return app
if __name__ == '__main__':
print("Waiting for RabbitMq")
time.sleep(20)
rpiClient = RPIclient()
app = flask_app()
app.config['RPIclient'] = rpiClient
print("Rabbit MQ is connected, starting server", file=sys.stderr)
app.run(debug=True, threaded=False, host='0.0.0.0')
worker.py
import pika
import time
import sys
print(' [*] Waiting for RabbitMQ ...')
time.sleep(20)
print(' [*] Connecting to server ...')
channel = connection.channel()
print(' [*] Waiting for messages.')
def callback(ch, method, properties, body):
print(" [x] Received %s" % body)
print(" [x] Executing task ")
print("from worker, correlation id : " + str(properties.correlation_id))
ch.basic_publish(
exchange='kaldi_expe',
routing_key='kaldi_expe.kaldi.answer',
properties=pika.BasicProperties(correlation_id = properties.correlation_id),
body="response")
print(" [x] Done")
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='request', on_message_callback=callback)
channel.start_consuming()
Sadly, when I'm sending back a message (from the worker to the server), it seems that the server does consume the message, but never execute the callback (it shows the message as consummed, but not ACK on the rabbit mq interface. Also, print don't show).
I'm pretty lost, since the message seems to be consummed, but the callback seems to not be executed. Do you have any idea where it might come from ?
you did attach the callback method on_response to the queue answer, but you never tell your server to start consuming the queues.
Looks like you are missing self.channel.start_consuming() at the end of your class initialization.
Related
I have a socketio setup that uses an eventlet server. My program gets logs from multiple machines and writes them to a database. I have an event called "new_log" which is triggered whenever a new log is sent through the websocket. Since database insertions take longer than the intervals between new logs, when I don't use any queueing system, the logs accumulate on the client side and when the client queue is filled to it's max, I no longer receive any new log. That is the reason I decided to use RabbitMQ.
But, I thought that since db insertions still take longer, a RabbitMQ setup with a single consumer doesn't really solve the problem. This time, the queue will be on the server side but it will still get bigger and bigger. So I wanted it to launch a new consumer thread with each log. I found the following multi-threaded example from Pika's repo:
https://github.com/pika/pika/blob/0.13.1/examples/basic_consumer_threaded.py
and modified it a bit to use it like this:
main.py
import socketio
import os
import threading
import json
import pika
import functools
import config as cfg
from util.rabbitmq import consumer_threaded
sio = socketio.Server(async_mode="eventlet", namespaces='*', cors_allowed_origins=['*'])
app = socketio.WSGIApp(sio)
credentials = pika.PlainCredentials('guest', 'guest')
parameters = pika.ConnectionParameters('localhost', credentials=credentials, heartbeat=100)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange="test_exchange", exchange_type="direct", passive=False, durable=True, auto_delete=False)
channel.queue_declare(queue="standard", durable=True)
channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
channel.basic_qos(prefetch_count=1)
#sio.on("new_log")
def client_activity(pid, data):
channel.basic_publish(
exchange='test_exchange',
routing_key='standard_key',
body=json.dumps(data),
properties=pika.BasicProperties(
delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE
))
return "OK"
#sio.event
def connect(sid, environ, auth):
print(f"[NEW CONNECTION]] {sid}", flush=True)
#sio.event
def disconnect(sid):
sio.disconnect(sid)
print(f"[DISCONNECTED] {sid}", flush=True)
def start_consumer():
on_message_callback = functools.partial(consumer_threaded.on_message, args=(connection, channel))
channel.basic_consume('standard', on_message_callback)
channel.start_consuming()
print("Started consuming", flush=True)
if __name__ == "__main__":
consumer_thread = threading.Thread(target=start_consumer)
consumer_thread.start()
import eventlet
eventlet.monkey_patch()
eventlet.wsgi.server(eventlet.listen(("", 1234)), app)
consumer_threaded.py
import functools
import logging
import threading
import json
from util.logger import save_log
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
def ack_message(channel, delivery_tag):
if channel.is_open:
channel.basic_ack(delivery_tag)
def do_work(connection, channel, delivery_tag, body):
thread_id = threading.get_ident()
fmt1 = 'Thread id: {} Total threads: {} Delivery tag: {} Message body: {}'
LOGGER.info(fmt1.format(thread_id, threading.active_count(), delivery_tag, body))
save_log.save_log(json.loads(body.decode()))
cb = functools.partial(ack_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
def on_message(channel, method_frame, header_frame, body, args):
(connection, channel) = args
delivery_tag = method_frame.delivery_tag
t = threading.Thread(target=do_work, args=(connection, channel, delivery_tag, body))
t.start()
t.join()
This seems to be working for a bit but after a while, I get the following error:
AssertionError: ('_AsyncTransportBase._produce() tx buffer size
underflow', -44, 1)
How can I achieve what I described without getting this error?
I am writing a python script using the python stomp library to connect and subscribe to an ActiveMQ message queue.
My code is very similar to the examples in the documentation "Dealing with disconnects" with the addition of the timer being placed in a loop for a long running listener.
The listener class is working to receive and process messages. However after a few minutes, the connection gets disconnected and then the listener stops picking up messages.
Problem:
The on_disconnected method is getting called which runs the connect_and_subscribe() method, however it seems the listener stops working after this happens. Perhaps the listener needs to be re-initialized? After the script is run again, the listener is re-created, it starts picking up messages again, but this is not practical to keep running the script again periodically.
Question 1: How can I set this up to re-connect and re-create the listener automatically?
Question 2: Is there a better way to initialize a long-running listener rather than the timeout loop?
import os, time, datetime, stomp
_host = os.getenv('MQ_HOST')
_port = os.getenv('MQ_PORT')
_user = os.getenv('MQ_USER')
_password = os.getenv('MQ_PASSWORD')
_queue = os.getenv('QUEUE_NAME')
# Subscription id is unique to the subscription in this case there is only one subscription per connection
sub_id = 1
def connect_and_subscribe(conn):
conn.connect(_user, _password, wait=True)
conn.subscribe(destination=_queue, id=sub_id, ack='client-individual')
print('connect_and_subscribe connecting {} to with connection id {}'.format(_queue, sub_id), flush=True)
class MqListener(stomp.ConnectionListener):
def __init__(self, conn):
self.conn = conn
self.sub_id = sub_id
print('MqListener init')
def on_error(self, frame):
print('received an error "%s"' % frame.body)
def on_message(self, headers, body):
print('received a message headers "%s"' % headers)
print('message body "%s"' % body)
time.sleep(1)
print('processed message')
print('Acknowledging')
self.conn.ack(headers['message-id'], self.sub_id)
def on_disconnected(self):
print('disconnected! reconnecting...')
connect_and_subscribe(self.conn)
def initialize_mqlistener():
conn = stomp.Connection([(_host, _port)], heartbeats=(4000, 4000))
conn.set_listener('', MqListener(conn))
connect_and_subscribe(conn)
# https://github.com/jasonrbriggs/stomp.py/issues/206
while conn.is_connected():
time.sleep(2)
conn.disconnect()
if __name__ == '__main__':
initialize_mqlistener()
I was able to solve this issue by refactoring the retry attempts loop and the on_error handler. Also, I have installed and configured supervisor in the docker container to run and manage the listener process. That way if the listener program stops it will be automatically restarted by the supervisor process manager.
Updated python stomp listener script
init_listener.py
import os, json, time, datetime, stomp
_host = os.getenv('MQ_HOST')
_port = os.getenv('MQ_PORT')
_user = os.getenv('MQ_USER')
_password = os.getenv('MQ_PASSWORD')
# The listener will listen for messages that are relevant to this specific worker
# Queue name must match the 'worker_type' in job tracker file
_queue = os.getenv('QUEUE_NAME')
# Subscription id is unique to the subscription in this case there is only one subscription per connection
_sub_id = 1
_reconnect_attempts = 0
_max_attempts = 1000
def connect_and_subscribe(conn):
global _reconnect_attempts
_reconnect_attempts = _reconnect_attempts + 1
if _reconnect_attempts <= _max_attempts:
try:
conn.connect(_user, _password, wait=True)
print('connect_and_subscribe connecting {} to with connection id {} reconnect attempts: {}'.format(_queue, _sub_id, _reconnect_attempts), flush=True)
except Exception as e:
print('Exception on disconnect. reconnecting...')
print(e)
connect_and_subscribe(conn)
else:
conn.subscribe(destination=_queue, id=_sub_id, ack='client-individual')
_reconnect_attempts = 0
else:
print('Maximum reconnect attempts reached for this connection. reconnect attempts: {}'.format(_reconnect_attempts), flush=True)
class MqListener(stomp.ConnectionListener):
def __init__(self, conn):
self.conn = conn
self._sub_id = _sub_id
print('MqListener init')
def on_error(self, headers, body):
print('received an error "%s"' % body)
def on_message(self, headers, body):
print('received a message headers "%s"' % headers)
print('message body "%s"' % body)
message_id = headers.get('message-id')
message_data = json.loads(body)
task_name = message_data.get('task_name')
prev_status = message_data.get('previous_step_status')
if prev_status == "success":
print('CALLING DO TASK')
resp = True
else:
print('CALLING REVERT TASK')
resp = True
if (resp):
print('Ack message_id {}'.format(message_id))
self.conn.ack(message_id, self._sub_id)
else:
print('NON Ack message_id {}'.format(message_id))
self.conn.nack(message_id, self._sub_id)
print('processed message message_id {}'.format(message_id))
def on_disconnected(self):
print('disconnected! reconnecting...')
connect_and_subscribe(self.conn)
def initialize_mqlistener():
conn = stomp.Connection([(_host, _port)], heartbeats=(4000, 4000))
conn.set_listener('', MqListener(conn))
connect_and_subscribe(conn)
# https://github.com/jasonrbriggs/stomp.py/issues/206
while True:
time.sleep(2)
if not conn.is_connected():
print('Disconnected in loop, reconnecting')
connect_and_subscribe(conn)
if __name__ == '__main__':
initialize_mqlistener()
Supervisor installation and configuration
Dockerfile
Some details removed for brevity
# Install supervisor
RUN apt-get update && apt-get install -y supervisor
# Add the supervisor configuration file
ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
# Start supervisor with the configuration file
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
supervisor.conf
[supervisord]
nodaemon=true
logfile=/home/exampleuser/logs/supervisord.log
[program:mqutils]
command=python3 init_listener.py
directory=/home/exampleuser/mqutils
user=exampleuser
autostart=true
autorestart=true
I have a Flask application as such
from flask import Flask
from flask_restful import Resource, Api
from mq_handler import MessageBroker
import pika
app = Flask(__name__)
api = Api(app)
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
mb = MessageBroker(connection)
class HelloWorld(Resource):
def get(self):
mb.run()
return {'hello': 'world'}
class LogHandler(Resource):
def get(self, table):
return {'TableName': table}
api.add_resource(HelloWorld, '/')
api.add_resource(LogHandler, '/log/<string:table>')
if __name__ == '__main__':
app.run(debug=True)
I have added a MessageBroker class to handle all my rabbitMq messages
import pika
import json
class MessageBroker:
def __init__(self, connection):
self.connection = connection
self.channel = connection.channel()
def run(self):
self.channel.start_consuming()
self.channel.basic_consume(queue='logs',
auto_ack=True,
on_message_callback=self.handle_log)
self.channel.start_consuming()
def handle_log(self, ch, method, properties, body):
decoded_content = body.decode('utf-8')
json_payload = json.loads(decoded_content)
print(" [x] Received %r" % json_payload['message'])
I have tried different solutions, but have can I get both services to run simultaneously on the same server? can somebody explain that please?
In general.. how is it possible to have several services running listening on my flask server?
I am not sure about running the consumer on an end-point will be a good idea. Because, when you start a consumer it runs an IO loop to fetch and process messages from the server continuously. The loop will not exit unless it is done externally or any exception in the message processing causing the connection to close. Can you please state your scenario for running the consumer in the end-point?
I am just starting to use RabbitMQ and Python in general. I have been reading the tuts on the rabbit official page, but I have no idea how to use Rabbitmq to do another things.
I have been trying to run the example of this [tutorial] (https://www.rabbitmq.com/tutorials/tutorial-three-python.html), It runs well,.. BUT I need to know how can I create more than one fucntions and call them throug Rabbitmq Messages?... (I am also using this [example] (Python and RabbitMQ - Best way to listen to consume events from multiple channels?) to guide me. )
I hope someone have some idea how to do this... (I will repeat again, I am very new on this topics)...
This some code what i have.
I use this code to send the message as the tutorial..
import pika
import sys
url = 'amqp://oamogcgg:xxxxxxxxxxxxxxxxxxxxxxxxx#salamander.rmq.cloudamqp.com/oamogcgg'
params = pika.URLParameters(url)
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.exchange_declare(exchange='logs', exchange_type='fanout')
message = ' '.join(sys.argv[1:]) or "info: THIS IS A TEST MESSAGE !!!!!!"
channel.basic_publish(exchange='logs', routing_key='', body=message)
print(" [x] Sent %r" % message)
connection.close()
And in this file is where I receive the message according to me.
import pika
import sys
import threading
threads=[]
#function 1
def validator1(channel):
channel.queue_declare(queue='queue_name')
print (' [*] Waiting messsaes for valiadtor1 press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
sleep(2) #I need stop it for two minutes
channel.basic_consume(callback, queue='queue_name', no_ack=True)
channel.start_consuming()
#function 2
def validator2(channel):
channel.queue_declare(queue='queue_name')
print (' [*] Waiting messsaes for valiadtor2 press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
sleep(2) #I need stop it for two minutes
channel.basic_consume(callback, queue='queue_name', no_ack=True)
channel.start_consuming()
def manager():
url = 'amqp://oamogcgg:xxxxxxxxxxxxxxxxxxxxxxxxx#salamander.rmq.cloudamqp.com/oamogcgg'
params = pika.URLParameters(url)
#channel 1
connection1= pika.BlockingConnection(params)
channel1 = connection1.channel()
channel1.exchange_declare(exchange='logs', exchange_type='fanout')
result = channel1.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
channel1.queue_bind(exchange='logs', queue=queue_name)
#channel 2
connection2= pika.BlockingConnection(params)
channel2 = connection2.channel()
channel2.exchange_declare(exchange='logs', exchange_type='fanout')
result = channel2.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
channel2.queue_bind(exchange='logs', queue=queue_name)
#creating threads
t1 = threading.Thread(target=validator1, args=(channel1,))
t1.daemon = True
threads.append(t1)
t1.start()
t2 = threading.Thread(target=valiadtor2, args=(channel2,))
t2.daemon = True
threads.append(t2)
t2.start()
for t in threads:
t.join()
manager()
If your functions are dependent(by dependent I mean, one function works on the output of the other) then, you can call all those functions one by one in your callback function. Once the last function is executed successfully you can acknowledge the message.
If those functions are independent then you can maintain multiple queues for each of the function you wanna execute on the message. The same message can be routed to multiple queues using a fanout exchange as mentioned in RabbitMQ Tutorial-3.
I have two, separate RabbitMQ instances. I'm trying to find the best way to listen to events from both.
For example, I can consume events on one with the following:
credentials = pika.PlainCredentials(user, pass)
connection = pika.BlockingConnection(pika.ConnectionParameters(host="host1", credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(Exclusive=True)
self.channel.queue_bind(exchange="my-exchange", result.method.queue, routing_key='*.*.*.*.*')
channel.basic_consume(callback_func, result.method.queue, no_ack=True)
self.channel.start_consuming()
I have a second host, "host2", that I'd like to listen to as well. I thought about creating two separate threads to do this, but from what I've read, pika isn't thread safe. Is there a better way? Or would creating two separate threads, each listening to a different Rabbit instance (host1, and host2) be sufficient?
The answer to "what is the best way" depends heavily on your usage pattern of queues and what you mean by "best". Since I can't comment on questions yet, I'll just try to suggest some possible solutions.
In each example I'm going to assume exchange is already declared.
Threads
You can consume messages from two queues on separate hosts in single process using pika.
You are right - as its own FAQ states, pika is not thread safe, but it can be used in multi-threaded manner by creating connections to RabbitMQ hosts per thread. Making this example run in threads using threading module looks as follows:
import pika
import threading
class ConsumerThread(threading.Thread):
def __init__(self, host, *args, **kwargs):
super(ConsumerThread, self).__init__(*args, **kwargs)
self._host = host
# Not necessarily a method.
def callback_func(self, channel, method, properties, body):
print("{} received '{}'".format(self.name, body))
def run(self):
credentials = pika.PlainCredentials("guest", "guest")
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._host,
credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(exclusive=True)
channel.queue_bind(result.method.queue,
exchange="my-exchange",
routing_key="*.*.*.*.*")
channel.basic_consume(self.callback_func,
result.method.queue,
no_ack=True)
channel.start_consuming()
if __name__ == "__main__":
threads = [ConsumerThread("host1"), ConsumerThread("host2")]
for thread in threads:
thread.start()
I've declared callback_func as a method purely to use ConsumerThread.name while printing message body. It might as well be a function outside the ConsumerThread class.
Processes
Alternatively, you can always just run one process with consumer code per queue you want to consume events.
import pika
import sys
def callback_func(channel, method, properties, body):
print(body)
if __name__ == "__main__":
credentials = pika.PlainCredentials("guest", "guest")
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=sys.argv[1],
credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(exclusive=True)
channel.queue_bind(result.method.queue,
exchange="my-exchange",
routing_key="*.*.*.*.*")
channel.basic_consume(callback_func, result.method.queue, no_ack=True)
channel.start_consuming()
and then run by:
$ python single_consume.py host1
$ python single_consume.py host2 # e.g. on another console
If the work you're doing on messages from queues is CPU-heavy and as long as number of cores in your CPU >= number of consumers, it is generally better to use this approach - unless your queues are empty most of the time and consumers won't utilize this CPU time*.
Async
Another alternative is to involve some asynchronous framework (for example Twisted) and running whole thing in single thread.
You can no longer use BlockingConnection in asynchronous code; fortunately, pika has adapter for Twisted:
from pika.adapters.twisted_connection import TwistedProtocolConnection
from pika.connection import ConnectionParameters
from twisted.internet import protocol, reactor, task
from twisted.python import log
class Consumer(object):
def on_connected(self, connection):
d = connection.channel()
d.addCallback(self.got_channel)
d.addCallback(self.queue_declared)
d.addCallback(self.queue_bound)
d.addCallback(self.handle_deliveries)
d.addErrback(log.err)
def got_channel(self, channel):
self.channel = channel
return self.channel.queue_declare(exclusive=True)
def queue_declared(self, queue):
self._queue_name = queue.method.queue
self.channel.queue_bind(queue=self._queue_name,
exchange="my-exchange",
routing_key="*.*.*.*.*")
def queue_bound(self, ignored):
return self.channel.basic_consume(queue=self._queue_name)
def handle_deliveries(self, queue_and_consumer_tag):
queue, consumer_tag = queue_and_consumer_tag
self.looping_call = task.LoopingCall(self.consume_from_queue, queue)
return self.looping_call.start(0)
def consume_from_queue(self, queue):
d = queue.get()
return d.addCallback(lambda result: self.handle_payload(*result))
def handle_payload(self, channel, method, properties, body):
print(body)
if __name__ == "__main__":
consumer1 = Consumer()
consumer2 = Consumer()
parameters = ConnectionParameters()
cc = protocol.ClientCreator(reactor,
TwistedProtocolConnection,
parameters)
d1 = cc.connectTCP("host1", 5672)
d1.addCallback(lambda protocol: protocol.ready)
d1.addCallback(consumer1.on_connected)
d1.addErrback(log.err)
d2 = cc.connectTCP("host2", 5672)
d2.addCallback(lambda protocol: protocol.ready)
d2.addCallback(consumer2.on_connected)
d2.addErrback(log.err)
reactor.run()
This approach would be even better, the more queues you would consume from and the less CPU-bound the work performing by consumers is*.
Python 3
Since you've mentioned pika, I've restricted myself to Python 2.x-based solutions, because pika is not yet ported.
But in case you would want to move to >=3.3, one possible option is to use asyncio with one of AMQP protocol (the protocol you speak in with RabbitMQ) , e.g. asynqp or aioamqp.
* - please note that these are very shallow tips - in most cases choice is not that obvious; what will be the best for you depends on queues "saturation" (messages/time), what work do you do upon receiving these messages, what environment you run your consumers in etc.; there's no way to be sure other than to benchmark all implementations
Below is an example of how I use one rabbitmq instance to listen to 2 queues at the same time:
import pika
import threading
threads=[]
def client_info(channel):
channel.queue_declare(queue='proxy-python')
print (' [*] Waiting for client messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
channel.basic_consume(callback, queue='proxy-python', no_ack=True)
channel.start_consuming()
def scenario_info(channel):
channel.queue_declare(queue='savi-virnet-python')
print (' [*] Waiting for scenrio messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
channel.basic_consume(callback, queue='savi-virnet-python', no_ack=True)
channel.start_consuming()
def manager():
connection1= pika.BlockingConnection(pika.ConnectionParameters
(host='localhost'))
channel1 = connection1.channel()
connection2= pika.BlockingConnection(pika.ConnectionParameters
(host='localhost'))
channel2 = connection2.channel()
t1 = threading.Thread(target=client_info, args=(channel1,))
t1.daemon = True
threads.append(t1)
t1.start()
t2 = threading.Thread(target=scenario_info, args=(channel2,))
t2.daemon = True
threads.append(t2)
t2.start()
for t in threads:
t.join()
manager()
import asyncio
import tornado.ioloop
import tornado.web
from aio_pika import connect_robust, Message
tornado.ioloop.IOLoop.configure("tornado.platform.asyncio.AsyncIOLoop")
io_loop = tornado.ioloop.IOLoop.current()
asyncio.set_event_loop(io_loop.asyncio_loop)
QUEUE = asyncio.Queue()
class SubscriberHandler(tornado.web.RequestHandler):
async def get(self):
message = await QUEUE.get()
self.finish(message.body)
class PublisherHandler(tornado.web.RequestHandler):
async def post(self):
connection = self.application.settings["amqp_connection"]
channel = await connection.channel()
try:
await channel.default_exchange.publish(
Message(body=self.request.body), routing_key="test",
)
finally:
await channel.close()
print('ok')
self.finish("OK")
async def make_app():
amqp_connection = await connect_robust()
channel = await amqp_connection.channel()
queue = await channel.declare_queue("test", auto_delete=True)
await queue.consume(QUEUE.put, no_ack=True)
return tornado.web.Application(
[(r"/publish", PublisherHandler), (r"/subscribe", SubscriberHandler)],
amqp_connection=amqp_connection,
)
if __name__ == "__main__":
app = io_loop.asyncio_loop.run_until_complete(make_app())
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
You can use aio-pika in async way
more examples here
https://buildmedia.readthedocs.org/media/pdf/aio-pika/latest/aio-pika.pdf
Happy coding :)
Pika can be used into a multithreaded consumer. The only requirement is to have a Pika connection per thread.
Pika Github repository has an example here.
A snippet from basic_consumer_threaded.py:
def on_message(ch, method_frame, _header_frame, body, args):
(conn, thrds) = args
delivery_tag = method_frame.delivery_tag
t = threading.Thread(target=do_work, args=(conn, ch, delivery_tag, body))
t.start()
thrds.append(t)
threads = []
on_message_callback = functools.partial(on_message, args=(connection, threads))
channel.basic_consume('standard', on_message_callback)