Calling more than one functions as RABBITMQ message - python

I am just starting to use RabbitMQ and Python in general. I have been reading the tuts on the rabbit official page, but I have no idea how to use Rabbitmq to do another things.
I have been trying to run the example of this [tutorial] (https://www.rabbitmq.com/tutorials/tutorial-three-python.html), It runs well,.. BUT I need to know how can I create more than one fucntions and call them throug Rabbitmq Messages?... (I am also using this [example] (Python and RabbitMQ - Best way to listen to consume events from multiple channels?) to guide me. )
I hope someone have some idea how to do this... (I will repeat again, I am very new on this topics)...
This some code what i have.
I use this code to send the message as the tutorial..
import pika
import sys
url = 'amqp://oamogcgg:xxxxxxxxxxxxxxxxxxxxxxxxx#salamander.rmq.cloudamqp.com/oamogcgg'
params = pika.URLParameters(url)
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.exchange_declare(exchange='logs', exchange_type='fanout')
message = ' '.join(sys.argv[1:]) or "info: THIS IS A TEST MESSAGE !!!!!!"
channel.basic_publish(exchange='logs', routing_key='', body=message)
print(" [x] Sent %r" % message)
connection.close()
And in this file is where I receive the message according to me.
import pika
import sys
import threading
threads=[]
#function 1
def validator1(channel):
channel.queue_declare(queue='queue_name')
print (' [*] Waiting messsaes for valiadtor1 press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
sleep(2) #I need stop it for two minutes
channel.basic_consume(callback, queue='queue_name', no_ack=True)
channel.start_consuming()
#function 2
def validator2(channel):
channel.queue_declare(queue='queue_name')
print (' [*] Waiting messsaes for valiadtor2 press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
sleep(2) #I need stop it for two minutes
channel.basic_consume(callback, queue='queue_name', no_ack=True)
channel.start_consuming()
def manager():
url = 'amqp://oamogcgg:xxxxxxxxxxxxxxxxxxxxxxxxx#salamander.rmq.cloudamqp.com/oamogcgg'
params = pika.URLParameters(url)
#channel 1
connection1= pika.BlockingConnection(params)
channel1 = connection1.channel()
channel1.exchange_declare(exchange='logs', exchange_type='fanout')
result = channel1.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
channel1.queue_bind(exchange='logs', queue=queue_name)
#channel 2
connection2= pika.BlockingConnection(params)
channel2 = connection2.channel()
channel2.exchange_declare(exchange='logs', exchange_type='fanout')
result = channel2.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
channel2.queue_bind(exchange='logs', queue=queue_name)
#creating threads
t1 = threading.Thread(target=validator1, args=(channel1,))
t1.daemon = True
threads.append(t1)
t1.start()
t2 = threading.Thread(target=valiadtor2, args=(channel2,))
t2.daemon = True
threads.append(t2)
t2.start()
for t in threads:
t.join()
manager()

If your functions are dependent(by dependent I mean, one function works on the output of the other) then, you can call all those functions one by one in your callback function. Once the last function is executed successfully you can acknowledge the message.
If those functions are independent then you can maintain multiple queues for each of the function you wanna execute on the message. The same message can be routed to multiple queues using a fanout exchange as mentioned in RabbitMQ Tutorial-3.

Related

Flask consummer doesn't execute callback when consomming from rabbitMQ

So I have this problem. I want to use both Flask and RabbitMQ to do a microservice capable of doing some computation-heavy task. I basically wants something like the
Remote procedure call (RPC) tutorial from the documentation, but with a REST Api overhead.
So I've come with that code, so far:
server.py
from flask import Flask
import sys
import os
import json
import pika
import uuid
import time
''' HEADERS = {'Content-type': 'audio/*', 'Accept': 'text/plain'}'''
class RPIclient(object):
def __init__(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='rabbitmq'))
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.channel.exchange_declare(exchange='kaldi_expe', exchange_type='topic')
# Create all the queue and bind them to the corresponding routing key
self.channel.queue_declare('request', durable=True)
result = self.channel.queue_declare('answer', durable=True)
self.channel.queue_bind(exchange='kaldi_expe', queue='request', routing_key='kaldi_expe.web.request')
self.channel.queue_bind(exchange='kaldi_expe', queue='answer', routing_key='kaldi_expe.kaldi.answer')
self.callback_queue = result.method.queue
self.channel.basic_consume(queue="answer", on_message_callback=self.on_response)
def on_response(self, ch, method, props, body):
print("from server, correlation id : " + str(props.correlation_id), file=sys.stderr)
self.response = body
ch.basic_ack(delivery_tag=method.delivery_tag)
def call(self, n):
print("Launched Call ")
self.response = None
self.corr_id = str(uuid.uuid4())
self.channel.basic_publish(
exchange='kaldi_expe',
routing_key='kaldi_expe.web.request',
properties=pika.BasicProperties(
correlation_id=self.corr_id,
),
body=str(n))
while self.response is None:
self.connection.process_data_events()
return int(2)
def flask_app():
app = Flask("__name__")
#app.route('/', methods=['GET'])
def server_is_up():
return 'server is up', 200
#app.route('/add-job/<cmd>')
def add(cmd):
app.config['RPIclient'].call(10)
return "Call RPI client",404
return app
if __name__ == '__main__':
print("Waiting for RabbitMq")
time.sleep(20)
rpiClient = RPIclient()
app = flask_app()
app.config['RPIclient'] = rpiClient
print("Rabbit MQ is connected, starting server", file=sys.stderr)
app.run(debug=True, threaded=False, host='0.0.0.0')
worker.py
import pika
import time
import sys
print(' [*] Waiting for RabbitMQ ...')
time.sleep(20)
print(' [*] Connecting to server ...')
channel = connection.channel()
print(' [*] Waiting for messages.')
def callback(ch, method, properties, body):
print(" [x] Received %s" % body)
print(" [x] Executing task ")
print("from worker, correlation id : " + str(properties.correlation_id))
ch.basic_publish(
exchange='kaldi_expe',
routing_key='kaldi_expe.kaldi.answer',
properties=pika.BasicProperties(correlation_id = properties.correlation_id),
body="response")
print(" [x] Done")
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='request', on_message_callback=callback)
channel.start_consuming()
Sadly, when I'm sending back a message (from the worker to the server), it seems that the server does consume the message, but never execute the callback (it shows the message as consummed, but not ACK on the rabbit mq interface. Also, print don't show).
I'm pretty lost, since the message seems to be consummed, but the callback seems to not be executed. Do you have any idea where it might come from ?
you did attach the callback method on_response to the queue answer, but you never tell your server to start consuming the queues.
Looks like you are missing self.channel.start_consuming() at the end of your class initialization.

Pika RabbitMQ Publish from a consumer

I have a RabbitMQ consumer. I would like to have that consumer do some message processing, simulated by time.sleep(10), then publish a message to a different queue. I know the consumer callback has a channel that in theory could be used to do the publish, but this seems like a bad implementation because if the basic_publish() somehow manages for force close the channel, then the consumer dies. What is the best way to handle this?
import time
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='logs', exchange_type='fanout')
result = channel.queue_declare(queue='original_queue', exclusive=True)
channel.queue_bind(exchange='logs', queue='original_queue')
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
time.sleep(10)
ch.basic_publish(exchange='logs', routing_key='different_queue', body='hello_world')
channel.basic_consume(
queue='original_queue', on_message_callback=callback, auto_ack=True)
channel.start_consuming()
You can implement your consumer in a way that it automatically reconnects to the RabbitMQ server if the connection gets closed. Hope this helps(I didn't put much thought on the design part, feel free to suggest some!)
import time
import pika
reconnect_on_failure = True
def consumer(connection, channel):
channel.exchange_declare(exchange='logs', exchange_type='fanout')
result = channel.queue_declare(queue='original_queue', exclusive=True)
channel.queue_bind(exchange='logs', queue='original_queue')
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
time.sleep(10)
ch.basic_publish(exchange='logs', routing_key='different_queue', body='hello_world')
channel.basic_consume(
queue='original_queue', on_message_callback=callback, auto_ack=True)
channel.start_consuming()
def get_connection_and_channel():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
def start(reconnect_on_failure):
connection, channel = get_connection_and_channel()
consumer(connection, channel)
# the if condition will be executed when the consumer's start_consuming loop exists
if reconnect_on_failure:
# cleanly close the connection and channel
if not connection.is_closed():
connection.close()
if not channel.is_close():
channel.close()
start(reconnect_on_failure)
start(reconnect_on_failure)

How to add multiprocessing to consumer with pika (RabbitMQ) in python

I have very basic producer-consumer code written with pika framework in python. The problem is - consumer side runs too slow on messages in queue. I ran some tests and found out that i can speed up the workflow up to 27 times with multiprocessing. The problem is - I don't know what is the right way to add multiprocessing functionality to my code.
import pika
import json
from datetime import datetime
from functions import download_xmls
def callback(ch, method, properties, body):
print('Got something')
body = json.loads(body)
type = body[-1]['Type']
print('Object type in work currently ' + type)
cnums = [x['cadnum'] for x in body[:-1]]
print('Got {} cnums to work with'.format(len(cnums)))
date_start = datetime.now()
download_xmls(type,cnums)
date_end = datetime.now()
ch.basic_ack(delivery_tag=method.delivery_tag)
print('Download complete in {} seconds'.format((date_end-date_start).total_seconds()))
def consume(queue_name = 'bot-test'):
parameters = pika.URLParameters('server#address')
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=True)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue='bot-test')
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
How do I start with adding multiprocessing functionality from here?
Pika has extensive example code that I recommend you check out. Note that this code is for example use only. In the case of doing work on threads, you will have to use a more intelligent way to manage your threads.
The goal is to not block the thread that runs Pika's IO loop, and to call back into the IO loop correctly from your worker threads. That's why add_callback_threadsafe exists and is used in that code.
NOTE: the RabbitMQ team monitors the rabbitmq-users mailing list and only sometimes answers questions on StackOverflow.
import pika
import json
from multiprocessing import Process
from datetime import datetime
from functions import download_xmls
import multiprocessing
import concurrent.futures
def do_job(body):
body = json.loads(body)
type = body[-1]['Type']
print('Object type in work currently ' + type)
cnums = [x['cadnum'] for x in body[:-1]]
print('Got {} cnums to work with'.format(len(cnums)))
date_start = datetime.now()
download_xmls(type,cnums)
date_end = datetime.now()
ch.basic_ack(delivery_tag=method.delivery_tag)
print('Download complete in {} seconds'.format((date_end-date_start).total_seconds()))
def callback(ch, method, properties, body):
print('Got something')
p = Process(target=do_job,args=(body))
p.start()
p.join()
def consume(queue_name = 'bot-test'):
parameters = pika.URLParameters('server#address')
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=True)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue='bot-test')
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
def get_workers():
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 4
workers = get_workers()
with concurrent.futures.ProcessPoolExecutor() as executor:
for i in range(workers):
executor.submit(consume)
Above is just simple demo how you can include multiprocessing execution here. I recommend you to go through documentation to further optimise the code and achieve what you require.
https://docs.python.org/3/library/multiprocessing.html#the-process-class

RabbitMQ pika connection errors and inconsistency

def response(return_JSON):
connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))
channel = connection.channel()
channel.exchange_declare(exchange=EXCHANGE, type='direct', durable='True')
channel.queue_declare(queue=QUEUE_NAME, durable='True')
channel.queue_bind(exchange=EXCHANGE, queue=QUEUE_NAME)
channel.basic_publish(exchange=EXCHANGE, routing_key=RESPONSE_ROUTING_KEY, body=return_JSON)
connection.close()
print " [x] Finished sending msg to sender..."
def callback(ch, method, properties, body):
print " [x] Received message from sender!"
print body
result_JSON = Work().compute(json.loads(body))
if result_JSON is not None:
#result_JSON = json.dumps(result_JSON)
response(result_JSON)
channel.basic_ack(delivery_tag = method.delivery_tag)
print "Acknowledged to sender"
if __name__ == '__main__':
connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))
channel = connection.channel()
channel.queue_declare(queue=QUEUE_NAME, durable='True')
channel.basic_consume(callback, queue=QUEUE_NAME, no_ack=False)
print " [*] Waiting for messages. To exit press CTRL+C"
channel.start_consuming()
This often works for a while and then falls over. Only need to consume messages one at a time, process them and send back a response message with a different routing KEY. Is there a smarter more reliable way to create a minimal consumer processing messages one at a time.
Error message is:
Connection reset by peer

Python and RabbitMQ - Best way to listen to consume events from multiple channels?

I have two, separate RabbitMQ instances. I'm trying to find the best way to listen to events from both.
For example, I can consume events on one with the following:
credentials = pika.PlainCredentials(user, pass)
connection = pika.BlockingConnection(pika.ConnectionParameters(host="host1", credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(Exclusive=True)
self.channel.queue_bind(exchange="my-exchange", result.method.queue, routing_key='*.*.*.*.*')
channel.basic_consume(callback_func, result.method.queue, no_ack=True)
self.channel.start_consuming()
I have a second host, "host2", that I'd like to listen to as well. I thought about creating two separate threads to do this, but from what I've read, pika isn't thread safe. Is there a better way? Or would creating two separate threads, each listening to a different Rabbit instance (host1, and host2) be sufficient?
The answer to "what is the best way" depends heavily on your usage pattern of queues and what you mean by "best". Since I can't comment on questions yet, I'll just try to suggest some possible solutions.
In each example I'm going to assume exchange is already declared.
Threads
You can consume messages from two queues on separate hosts in single process using pika.
You are right - as its own FAQ states, pika is not thread safe, but it can be used in multi-threaded manner by creating connections to RabbitMQ hosts per thread. Making this example run in threads using threading module looks as follows:
import pika
import threading
class ConsumerThread(threading.Thread):
def __init__(self, host, *args, **kwargs):
super(ConsumerThread, self).__init__(*args, **kwargs)
self._host = host
# Not necessarily a method.
def callback_func(self, channel, method, properties, body):
print("{} received '{}'".format(self.name, body))
def run(self):
credentials = pika.PlainCredentials("guest", "guest")
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._host,
credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(exclusive=True)
channel.queue_bind(result.method.queue,
exchange="my-exchange",
routing_key="*.*.*.*.*")
channel.basic_consume(self.callback_func,
result.method.queue,
no_ack=True)
channel.start_consuming()
if __name__ == "__main__":
threads = [ConsumerThread("host1"), ConsumerThread("host2")]
for thread in threads:
thread.start()
I've declared callback_func as a method purely to use ConsumerThread.name while printing message body. It might as well be a function outside the ConsumerThread class.
Processes
Alternatively, you can always just run one process with consumer code per queue you want to consume events.
import pika
import sys
def callback_func(channel, method, properties, body):
print(body)
if __name__ == "__main__":
credentials = pika.PlainCredentials("guest", "guest")
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=sys.argv[1],
credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(exclusive=True)
channel.queue_bind(result.method.queue,
exchange="my-exchange",
routing_key="*.*.*.*.*")
channel.basic_consume(callback_func, result.method.queue, no_ack=True)
channel.start_consuming()
and then run by:
$ python single_consume.py host1
$ python single_consume.py host2 # e.g. on another console
If the work you're doing on messages from queues is CPU-heavy and as long as number of cores in your CPU >= number of consumers, it is generally better to use this approach - unless your queues are empty most of the time and consumers won't utilize this CPU time*.
Async
Another alternative is to involve some asynchronous framework (for example Twisted) and running whole thing in single thread.
You can no longer use BlockingConnection in asynchronous code; fortunately, pika has adapter for Twisted:
from pika.adapters.twisted_connection import TwistedProtocolConnection
from pika.connection import ConnectionParameters
from twisted.internet import protocol, reactor, task
from twisted.python import log
class Consumer(object):
def on_connected(self, connection):
d = connection.channel()
d.addCallback(self.got_channel)
d.addCallback(self.queue_declared)
d.addCallback(self.queue_bound)
d.addCallback(self.handle_deliveries)
d.addErrback(log.err)
def got_channel(self, channel):
self.channel = channel
return self.channel.queue_declare(exclusive=True)
def queue_declared(self, queue):
self._queue_name = queue.method.queue
self.channel.queue_bind(queue=self._queue_name,
exchange="my-exchange",
routing_key="*.*.*.*.*")
def queue_bound(self, ignored):
return self.channel.basic_consume(queue=self._queue_name)
def handle_deliveries(self, queue_and_consumer_tag):
queue, consumer_tag = queue_and_consumer_tag
self.looping_call = task.LoopingCall(self.consume_from_queue, queue)
return self.looping_call.start(0)
def consume_from_queue(self, queue):
d = queue.get()
return d.addCallback(lambda result: self.handle_payload(*result))
def handle_payload(self, channel, method, properties, body):
print(body)
if __name__ == "__main__":
consumer1 = Consumer()
consumer2 = Consumer()
parameters = ConnectionParameters()
cc = protocol.ClientCreator(reactor,
TwistedProtocolConnection,
parameters)
d1 = cc.connectTCP("host1", 5672)
d1.addCallback(lambda protocol: protocol.ready)
d1.addCallback(consumer1.on_connected)
d1.addErrback(log.err)
d2 = cc.connectTCP("host2", 5672)
d2.addCallback(lambda protocol: protocol.ready)
d2.addCallback(consumer2.on_connected)
d2.addErrback(log.err)
reactor.run()
This approach would be even better, the more queues you would consume from and the less CPU-bound the work performing by consumers is*.
Python 3
Since you've mentioned pika, I've restricted myself to Python 2.x-based solutions, because pika is not yet ported.
But in case you would want to move to >=3.3, one possible option is to use asyncio with one of AMQP protocol (the protocol you speak in with RabbitMQ) , e.g. asynqp or aioamqp.
* - please note that these are very shallow tips - in most cases choice is not that obvious; what will be the best for you depends on queues "saturation" (messages/time), what work do you do upon receiving these messages, what environment you run your consumers in etc.; there's no way to be sure other than to benchmark all implementations
Below is an example of how I use one rabbitmq instance to listen to 2 queues at the same time:
import pika
import threading
threads=[]
def client_info(channel):
channel.queue_declare(queue='proxy-python')
print (' [*] Waiting for client messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
channel.basic_consume(callback, queue='proxy-python', no_ack=True)
channel.start_consuming()
def scenario_info(channel):
channel.queue_declare(queue='savi-virnet-python')
print (' [*] Waiting for scenrio messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
channel.basic_consume(callback, queue='savi-virnet-python', no_ack=True)
channel.start_consuming()
def manager():
connection1= pika.BlockingConnection(pika.ConnectionParameters
(host='localhost'))
channel1 = connection1.channel()
connection2= pika.BlockingConnection(pika.ConnectionParameters
(host='localhost'))
channel2 = connection2.channel()
t1 = threading.Thread(target=client_info, args=(channel1,))
t1.daemon = True
threads.append(t1)
t1.start()
t2 = threading.Thread(target=scenario_info, args=(channel2,))
t2.daemon = True
threads.append(t2)
t2.start()
for t in threads:
t.join()
manager()
import asyncio
import tornado.ioloop
import tornado.web
from aio_pika import connect_robust, Message
tornado.ioloop.IOLoop.configure("tornado.platform.asyncio.AsyncIOLoop")
io_loop = tornado.ioloop.IOLoop.current()
asyncio.set_event_loop(io_loop.asyncio_loop)
QUEUE = asyncio.Queue()
class SubscriberHandler(tornado.web.RequestHandler):
async def get(self):
message = await QUEUE.get()
self.finish(message.body)
class PublisherHandler(tornado.web.RequestHandler):
async def post(self):
connection = self.application.settings["amqp_connection"]
channel = await connection.channel()
try:
await channel.default_exchange.publish(
Message(body=self.request.body), routing_key="test",
)
finally:
await channel.close()
print('ok')
self.finish("OK")
async def make_app():
amqp_connection = await connect_robust()
channel = await amqp_connection.channel()
queue = await channel.declare_queue("test", auto_delete=True)
await queue.consume(QUEUE.put, no_ack=True)
return tornado.web.Application(
[(r"/publish", PublisherHandler), (r"/subscribe", SubscriberHandler)],
amqp_connection=amqp_connection,
)
if __name__ == "__main__":
app = io_loop.asyncio_loop.run_until_complete(make_app())
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
You can use aio-pika in async way
more examples here
https://buildmedia.readthedocs.org/media/pdf/aio-pika/latest/aio-pika.pdf
Happy coding :)
Pika can be used into a multithreaded consumer. The only requirement is to have a Pika connection per thread.
Pika Github repository has an example here.
A snippet from basic_consumer_threaded.py:
def on_message(ch, method_frame, _header_frame, body, args):
(conn, thrds) = args
delivery_tag = method_frame.delivery_tag
t = threading.Thread(target=do_work, args=(conn, ch, delivery_tag, body))
t.start()
thrds.append(t)
threads = []
on_message_callback = functools.partial(on_message, args=(connection, threads))
channel.basic_consume('standard', on_message_callback)

Categories

Resources