how can i stop the consumer if the queue is empty RabbitMQ? - python

so im new to RabbitMQ, i have implemented a simple producer-consumer and for my use case i need to stop the consumer if the queue is empty but i can't find any solution.
sender:
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='', routing_key='hello', body='Hello World!')
print(" [x] Sent 'Hello World!'")
connection.close()
reciver:
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
channel.basic_consume(queue='hello', on_message_callback=callback, auto_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()

You can get the count of messages in queue, then exit the loop if its equal to 0.
import pika
connection = pika.BlockingConnection()
channel = connection.channel()
q = channel.queue_declare(q_name)
q_len = q.method.message_count

Alternatively, you could use a timeout functionality. As in, if after certain time passes by and your consumer is sitting idle, you can kill your worker/process/program.
import pika
import _thread
from threading import Timer
q_name = "hello"
conn = pika.BlockingConnection()
ch = conn.channel()
ch.queue_declare(q_name)
timeout_sec = 5 # times out in 5s
def timer():
return Timer(timeout_sec, lambda: _thread.interrupt_main())
def callback(t, ch, method, properties, body):
t.cancel()
print("[x] Received:", body)
t = timer()
t.start()
try:
t = timer()
t.start()
ch.basic_consume(
queue=q_name,
on_message_callback=lambda *args, **kwargs: callback(t, *args, **kwargs),
auto_ack=True,
)
ch.start_consuming()
except KeyboardInterrupt:
print("Nothing left in queue exiting....")
The above program will die if the consumer sits idle for 5 seconds.
Also note that my on_message_callback is encapsulating a regular callback that's expected by pika. That is essentially passing an instance of timer to pika's callback in order to start/stop the timer.

Related

Aiokafka library does not consume messages asynchronously

I'm trying to implement Python aiokafka async library and for some reason I can't process the messages asynchronously.
I created async consumer, producer and use the asyncio python library.
environment:
python 3.7.2
aiokafka==0.5.1
kafka-python==1.4.3
Consumer:
from aiokafka import AIOKafkaConsumer
import asyncio
import json
import ast
loop = asyncio.get_event_loop()
async def consume():
consumer = AIOKafkaConsumer(
"test_topic", loop=loop, bootstrap_servers='localhost:9092')
# Get cluster layout and topic/partition allocation
await consumer.start()
try:
async for msg in consumer:
sleep_time = ast.literal_eval(json.loads(msg.value))
print('before sleep %s' % sleep_time)
await asyncio.sleep(sleep_time)
print('after sleep %s' % sleep_time)
finally:
await consumer.stop()
loop.run_until_complete(consume())
Producer:
import json
import uuid
from kafka import KafkaProducer, KafkaConsumer
class KafkaClient(object):
def __init__(self, topic_name=None, consume=True):
"""
Initial consumer and producer for Kafka
:param topic_name: consumer topic name
"""
self.topic_name = topic_name
if topic_name is not None:
self.kafka_connect(topic_name, source='SOURCE')
self.producer = KafkaProducer(bootstrap_servers='localhost:9092',
key_serializer=str.encode,
value_serializer=lambda m: json.dumps(m).encode('utf-8'))
def publish_message(self, topic_name, message, extra_data=None):
try:
msg_uid = str(uuid.uuid1())
self.producer.send(topic_name, value=json.dumps(message))
self.producer.flush()
print('Message published [msg_uid]: %s' % msg_uid)
return True
except Exception as err:
print(err)
return False
k = KafkaClient()
for i in range(0, 1):
k.publish_message('test_topic', 5)
k.publish_message('test_topic', 3)
k.publish_message('test_topic', 1)
expected result :
the process will print:
before sleep 5
before sleep 3
before sleep 1
after sleep 1
after sleep 3
after sleep 5
actual result:
the process prints
before sleep 5
after sleep 5
before sleep 3
after sleep 3
before sleep 1
after sleep 1
On its own, async for doesn't process a sequence in parallel - it just allows a coroutine to suspend while waiting for the next item to be produced by the async iterable. You can think of it as a series of awaits on the __anext__ special method, analogous to ordinary for being a series of calls to __next__.
But it's easy enough to spawn tasks that process the messages as they arrive. For example:
async def process(msg):
sleep_time = ast.literal_eval(json.loads(msg.value))
print('before sleep %s' % sleep_time)
await asyncio.sleep(sleep_time)
print('after sleep %s' % sleep_time)
async def consume():
consumer = AIOKafkaConsumer(
"test_topic", loop=loop, bootstrap_servers='localhost:9092')
await consumer.start()
tasks = []
try:
async for msg in consumer:
tasks.append(asyncio.create_task(process(msg))
finally:
await consumer.stop()
await asyncio.gather(*tasks)

Stop Gracefully Tornado ioLoop

I have this async worker functionality using tornado's ioloop.
I'm trying to shutdown the loop gracefully on Ctrl+C but getting the following error
tornado.ioloop.TimeoutError: Operation timed out after None seconds
I know I can catch it but I do want to finish the process in a graceful way, how can I achieve that?
#!/usr/bin/env python
import time
import signal
import random
from tornado import gen, ioloop, queues
concurrency = 10
def sig_exit(signum, frame):
ioloop.IOLoop.current().add_callback_from_signal(shutdown)
def shutdown():
print('Will shutdown in few seconds ...')
io_loop = ioloop.IOLoop.current()
deadline = time.time() + 3
def stop_loop():
now = time.time()
if now < deadline and (io_loop._callbacks or io_loop._timeouts):
io_loop.add_timeout(now + 1, stop_loop)
else:
io_loop.stop()
print('Shutdown')
stop_loop()
#gen.coroutine
def main():
q = queues.Queue()
q.put(1)
#gen.coroutine
def do_stuff():
print("doing stuff")
yield gen.Task(ioloop.IOLoop.instance().add_timeout, time.time() + random.randint(1, 5))
print("done doing stuff")
#gen.coroutine
def worker():
while True:
yield do_stuff()
for _ in range(concurrency):
worker()
yield q.join()
if __name__ == '__main__':
signal.signal(signal.SIGTERM, sig_exit)
signal.signal(signal.SIGINT, sig_exit)
io_loop = ioloop.IOLoop.instance()
io_loop.run_sync(main)
If you're using run_sync, you can no longer call IOLoop.stop - run_sync is now responsible for that. So if you want to make this shutdown "graceful" (instead of just raising a KeyboardInterrupt at the point where you now call stop() and exiting with a stack trace), you need to change the coroutine passed to run_sync so it exits.
One possible solution is a tornado.locks.Event:
# Create a global Event
shutdown_event = tornado.locks.Event()
def shutdown():
# Same as in the question, but instead of `io_loop.stop()`:
shutdown_event.set()
#gen.coroutine
def main():
# Use a WaitIterator to exit when either the queue
# is done or shutdown is triggered.
wait_iter = gen.WaitIterator(q.join(), shutdown_event.wait())
# In this case we just want to wait for the first one; we don't
# need to actually iterate over the WaitIterator.
yield wait_iter.next()
async def main():
tornado.options.parse_command_line()
...
app = Application(db)
app.listen(options.port)
shutdown_event = tornado.locks.Event()
def shutdown( signum, frame ):
print("shutdown database !!!!")
db.close()
shutdown_event.set()
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
await shutdown_event.wait()
print("\n\nshutdown -h now")
if __name__ == "__main__":
tornado.ioloop.IOLoop.current().run_sync(main)

Multiple consumer in rabbitmq for multiple queue

I have 2 queues, say q1 and q2, which corresponds to e1 and e2 exchanges with binding key b1 and b2. I want to run consumer functions in parallel, say c1 and c2 which will listen to q1 and q2 respectively. I tried the following way:
def c1():
connection = pika.BlockingConnection(pika.ConnectionParameters(host=constants.rmqHostIp))
channel = connection.channel()
channel.exchange_declare(exchange='e1', durable='true',
type='topic')
result = channel.queue_declare(durable='false', queue='q1')
queue_name = result.method.queue
binding_key = "b1"
channel.queue_bind(exchange='e1',
queue=queue_name,
routing_key=binding_key)
channel.basic_consume(callback,queue=queue_name,no_ack=False)
channel.start_consuming()
def c2():
connection = pika.BlockingConnection(pika.ConnectionParameters(host=constants.rmqHostIp))
channel = connection.channel()
channel.exchange_declare(exchange='e2', durable='true',
type='topic')
result = channel.queue_declare(durable='false', queue='q2')
queue_name = result.method.queue
binding_key = "b2"
channel.queue_bind(exchange=e1,
queue=queue_name,
routing_key=binding_key)
channel.basic_consume(callback,queue=queue_name,no_ack=False)
channel.start_consuming()
if __name__ == '__main__':
c1()
c2()
However, it is only listening to c1 function and c2 function, it is not getting executed. How can I run the both functions?
Thanks in advance.
EDIT: I have method c1 and c1 in 2 different module(file)
In order to run both functions simultaneously some multi threading method needs to be in order. Please have a look here for some python examples.
Here is your code modified with the Process class. It can also use thread or run it explicitly from the OS.
import pika
from multiprocessing import Process
def callback():
print 'callback got data'
class c1():
def __init__(self):
self.connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange='e1', durable='true', type='topic')
result = self.channel.queue_declare(durable='false', queue='q1')
queue_name = result.method.queue
binding_key = "b1"
self.channel.queue_bind(exchange='e1', queue=queue_name, routing_key=binding_key)
self.channel.basic_consume(callback,queue=queue_name,no_ack=False)
def run(self):
self.channel.start_consuming()
class c2():
def __init__(self):
self.connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange='e2', durable='true', type='topic')
result = self.channel.queue_declare(durable='false', queue='q2')
queue_name = result.method.queue
binding_key = "b2"
self.channel.queue_bind(exchange='e1', queue=queue_name, routing_key=binding_key)
self.channel.basic_consume(callback,queue=queue_name,no_ack=False)
def run(self):
self.channel.start_consuming()
if __name__ == '__main__':
subscriber_list = []
subscriber_list.append(c1())
subscriber_list.append(c2())
# execute
process_list = []
for sub in subscriber_list:
process = Process(target=sub.run)
process.start()
process_list.append(process)
# wait for all process to finish
for process in process_list:
process.join()
You can receive messages from multiple queues using one connection and one channel. The pika python module has built in test code that tests with one blocking connection and one channel (FYI this test code is from 2015).
Here is the pika python module test code that tests getting messages from multiple queues using one blocking connection and one channel: https://github.com/pika/pika/blob/1.3.0/tests/acceptance/blocking_adapter_test.py#L2072-L2172 .
p.s. For my own stubborn reasons i also wrote similar code that used one blocking connection and one channel and two queues and verified this to work also.

Asynchronous RabbitMQ consumer with aioamqp

I'm trying to write an asynchronous consumer using asyncio/aioamqp. My problem is, the callback coroutine (below) is blocking. I set the channel to do a basic_consume(), and assign the callback as callback(). The callback has a "yield from asyncio.sleep" statement (to simulate "work"), which takes an integer from the publisher and sleeps for that amount of time before printing the message.
If I published two messages, one with a time of "10", immediately followed by one with a time of "1", I expected the second message would print first, since it has a shorter sleep time. Instead, the callback blocks for 10 seconds, prints the first message, and then prints the second.
It appears either basic_consume, or the callback, is blocking somewhere. Is there another way this could be handled?
#asyncio.coroutine
def callback(body, envelope, properties):
yield from asyncio.sleep(int(body))
print("consumer {} recved {} ({})".format(envelope.consumer_tag, body, envelope.delivery_tag))
#asyncio.coroutine
def receive_log():
try:
transport, protocol = yield from aioamqp.connect('localhost', 5672, login="login", password="password")
except:
print("closed connections")
return
channel = yield from protocol.channel()
exchange_name = 'cloudstack-events'
exchange_name = 'test-async-exchange'
queue_name = 'async-queue-%s' % random.randint(0, 10000)
yield from channel.exchange(exchange_name, 'topic', auto_delete=True, passive=False, durable=False)
yield from asyncio.wait_for(channel.queue(queue_name, durable=False, auto_delete=True), timeout=10)
binding_keys = ['mykey']
for binding_key in binding_keys:
print("binding", binding_key)
yield from asyncio.wait_for(channel.queue_bind(exchange_name=exchange_name,
queue_name=queue_name,
routing_key=binding_key), timeout=10)
print(' [*] Waiting for logs. To exit press CTRL+C')
yield from channel.basic_consume(queue_name, callback=callback)
loop = asyncio.get_event_loop()
loop.create_task(receive_log())
loop.run_forever()
For those interested, I figured out a way to do this. I'm not sure if it's best practice, but it's accomplishing what I need.
Rather than do the "work" (in this case, async.sleep) inside the callback, I create a new task on the loop, and schedule a separate co-routine to run do_work(). Presumably this is working, because it's freeing up callback() to return immediately.
I loaded up a few hundred events in Rabbit with different sleep timers, and they were interleaved when printed by the code below. So it seems to be working. Hope this helps someone!
#asyncio.coroutine
def do_work(envelope, body):
yield from asyncio.sleep(int(body))
print("consumer {} recved {} ({})".format(envelope.consumer_tag, body, envelope.delivery_tag))
#asyncio.coroutine
def callback(body, envelope, properties):
loop = asyncio.get_event_loop()
loop.create_task(do_work(envelope, body))
#asyncio.coroutine
def receive_log():
try:
transport, protocol = yield from aioamqp.connect('localhost', 5672, login="login", password="password")
except:
print("closed connections")
return
channel = yield from protocol.channel()
exchange_name = 'cloudstack-events'
exchange_name = 'test-async-exchange'
queue_name = 'async-queue-%s' % random.randint(0, 10000)
yield from channel.exchange(exchange_name, 'topic', auto_delete=True, passive=False, durable=False)
yield from asyncio.wait_for(channel.queue(queue_name, durable=False, auto_delete=True), timeout=10)
binding_keys = ['mykey']
for binding_key in binding_keys:
print("binding", binding_key)
yield from asyncio.wait_for(channel.queue_bind(exchange_name=exchange_name,
queue_name=queue_name,
routing_key=binding_key), timeout=10)
print(' [*] Waiting for logs. To exit press CTRL+C')
yield from channel.basic_consume(queue_name, callback=callback)
loop = asyncio.get_event_loop()
loop.create_task(receive_log())
loop.run_forever()

pika, stop_consuming does not work

I'm new to rabbitmq and pika, and is having trouble with stopping consuming.
channel and queue setting:
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue=new_task_id, durable=True, auto_delete=True)
Basically, consumer and producer are like this:
consumer:
def task(task_id):
def callback(channel, method, properties, body):
if body != "quit":
print(body)
else:
print(body)
channel.stop_consuming(task_id)
channel.basic_consume(callback, queue=task_id, no_ack=True)
channel.start_consuming()
print("finish")
return "finish"
producer:
proc = Popen(['app/sample.sh'], shell=True, stdout=PIPE)
while proc.returncode is None: # running
line = proc.stdout.readline()
if line:
channel.basic_publish(
exchange='',
routing_key=self.request.id,
body=line
)
else:
channel.basic_publish(
exchange='',
routing_key=self.request.id,
body="quit"
)
break
consumer task gave me output:
# ... output from sample.sh, as expected
quit
�}q(UstatusqUSUCCESSqU tracebackqNUresultqNUtask_idqU
1419350416qUchildrenq]u.
However, "finish" didn't get printed, so I'm guessing it's because channel.stop_consuming(task_id) didn't stop consuming. If so, what is the correct way to do? Thank you.
I had the same problem. It seems to be caused by the fact that internally, start_consuming calls self.connection.process_data_events(time_limit=None). This time_limit=None makes it hang.
I managed to workaround this problem by replacing the call to channel.start_consuming() with its implemenation, hacked:
while channel._consumer_infos:
channel.connection.process_data_events(time_limit=1) # 1 second
I have a class defined with member variables of channel and connection. These are initialized by a seperate thread. The consumer of MyClient Class uses the close() method and the the connection and consumer is stopped!
class MyClient:
def __init__(self, unique_client_code):
self.Channel = None
self.Conn: pika.BlockingConnection = None
self.ClientThread = self.init_client_driver()
def _close_callback(self):
self.Channel.stop_consuming()
self.Channel.close()
self.Conn.close()
def _client_driver_thread(self, tmout=None):
print("Starting Driver Thread...")
self.Conn = pika.BlockingConnection(pika.ConnectionParameters("localhost"))
self.Channel = self.Conn.channel()
def init_client_driver(self, tmout=None):
kwargs = {'tmout': tmout}
t = threading.Thread(target=self._client_driver_thread, kwargs=kwargs)
t.daemon = True
t.start()
return t
def close(self):
self.Conn.add_callback_threadsafe(self._close_callback)
self.ClientThread.join()

Categories

Resources