AWS Lambda multiple threads and boto3 - python

I am writing a AWS Lambda function that deletes 100,000 objects per lambda function call from S3 bucket. I am trying to see if I can create and run the deletion on a background threads. I have the following code.
import boto3
import boto3.session
from threading import Thread
http_response = []
MAX = 999
threads = []
class myThread(Thread):
def __init__(self, objects_to_delete, bucket_name):
Thread.__init__(self)
self.objects_to_delete = objects_to_delete
self.bucket_name = bucket_name
def run(self):
session = boto3.session.Session().client('s3')
s3 = session.client('s3')
####
COMES HERE AND PRINTS THE NAME OF THE BUCKET.
####
print(self.bucket_name)
response = s3.delete_objects(Bucket=bucket_name, Delete={'Objects': objects_to_delete[0:MAX] })
####
THIS IS NOT GETTING PRINTED. MEANING, delete_object IS BREAKING/NOT EXECUTING
####
print(response)
def handler(event, context):
keys = event['keys']
bucket_name = event["bucket"]
if (len(keys) == 0 or len(bucket_name) == 0):
return {
"message": http_response
}
try:
t = myThread(objects_to_delete[0:MAX], bucket_name)
t.start()
threads.append(t)
except:
print("Something Went wrong!!! " + str(objects_to_delete))
del keys[0:MAX]
for i in range(len(threads)):
threads[i].start()
handler({'keys': keys, 'bucket': bucket_name}, context)
Is there anything wrong I am doing here? Seems like thread is starting, however it's not making the "delete_objects" call. It's not even returning any error messages to learn about the error. Any thoughts or ideas?
One more thing, when I run this function locally on my computer, it runs just fine without any problem.

turns out after starting a thread, you should join them because, once the process quits, the threads die as well. So I did the following
import boto3
from threading import Thread
MAX = 999
threads = []
class myThread(Thread):
def __init__(self, bucket_name, objects):
Thread.__init__(self)
self.bucket_name = bucket_name
self.objects = objects
def run(self):
s3 = boto3.client('s3', region_name="us-east-1")
response = s3.delete_objects(Bucket=self.bucket_name, Delete={'Objects':self.objects})
print(response)
def handler(event, context):
keys = event["keys"]
bucket_name = event["bucket"]
objects_to_delete = [1...100,000]
while (len(objects_to_delete) != 0):
t = myThread(bucket_name, objects_to_delete[0:MAX])
threads.append(t)
del objects_to_delete[0:MAX]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return {
"message": "Success Message."
}

Related

Not receiving messages from Kafka Topic

I am receiving None when calling poll() in this program but I am getting the messages when running the kafka-console-consumer.bat from cmd, I can't figure out what exactly the problem.
The execution starts from main.py
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
import time
import json
from kafka_message_consumer import KafkaMessageConsumer
from kafka_discovery_executor import KafkaDiscoveryExecutor
with open('kafka_properties.json') as f:
kafka_properties = json.loads(f.read())
message_queue = Queue()
kafka_message_consumer = KafkaMessageConsumer(kafka_properties, message_queue)
kafka_discovery_executor = KafkaDiscoveryExecutor(message_queue, kafka_properties)
with ThreadPoolExecutor(max_workers=5) as executor:
executor.submit(kafka_message_consumer.run())
time.sleep(1)
executor.submit(kafka_discovery_executor.run())
time.sleep(1)
KafkaDiscoveryExecutor class is for consuming messages from shared queue and processing that messages.
This is kafka_message_consumer.py
import logging
from confluent_kafka import Consumer
class KafkaMessageConsumer:
def __init__(self, kafka_properties, message_queue):
self.message_queue = message_queue
self.logger = logging.getLogger('KafkaMessageConsumer')
self.kafka_stream_consumer = None
self.create_consumer(kafka_properties)
def create_consumer(self, kafka_properties):
"""
Create an instance of Kafka Consumer with the consumer configuration properties
and subscribes to the defined topic(s).
"""
consumer_config = dict()
# Consumer configuration properties.
consumer_config['bootstrap.servers'] = kafka_properties.get('bootstrap.servers')
consumer_config['group.id'] = kafka_properties.get('group.id')
consumer_config['enable.auto.commit'] = True
consumer_config['auto.offset.reset'] = 'earliest'
# For SSL Security
# consumer_config['security.protocol'] = 'SASL_SSL'
# consumer_config['sasl.mechanisms'] = 'PLAIN'
# consumer_config['sasl.username'] = ''
# consumer_config['sasl.password'] = ''
# Create the consumer using consumer_config.
self.kafka_stream_consumer = Consumer(consumer_config)
# Subscribe to the specified topic(s).
self.kafka_stream_consumer.subscribe(['mytopic'])
def run(self):
while True:
msg = self.kafka_stream_consumer.poll(1.0)
if msg is None:
# No message available within timeout.
print("Waiting for message or event/error in poll()")
continue
elif msg.error():
print("Error: {}".format(msg.error()))
else:
# Consume the record.
# Push the message into message_queue
try:
self.message_queue.put(msg)
except Exception as e:
self.logger.critical("Error occured in kafka Consumer: {}".format(e))
The specified topic has events but I am getting None here and the print statement inside 'if msg is None:' is executing.
I am still not sure as to why the above code is not working as it should.
Here's what changes I made to make this code work
I used threading module instead of concurrent.futures
used daemon thread
make a call to thread.init() inside the constructor of the classes [KafkaMessageConsumer, KafkaDiscoveryExecutor]
Here's main.py
from queue import Queue
import threading
import time
import json
from kafka_message_consumer import KafkaMessageConsumer
from kafka_discovery_executor import KafkaDiscoveryExecutor
def main():
with open('kafka_properties.json') as f:
kafka_properties = json.loads(f.read())
message_queue = Queue()
threads = [
KafkaMessageConsumer(kafka_properties, message_queue),
KafkaDiscoveryExecutor(message_queue, kafka_properties)
]
for thread in threads:
thread.start()
time.sleep(1)
for thread in threads:
thread.join()
time.sleep(1)
if __name__ == "__main__":
main()
and kafka_message_consumer.py
import logging
from confluent_kafka import Consumer
import threading
class KafkaMessageConsumer(threading.Thread):
daemon = True
def __init__(self, kafka_properties, message_queue):
threading.Thread.__init__(self)
self.message_queue = message_queue
self.logger = logging.getLogger('KafkaMessageConsumer')
self.kafka_stream_consumer = None
self.create_consumer(kafka_properties)
def create_consumer(self, kafka_properties):
"""
Create an instance of Kafka Consumer with the consumer configuration properties
and subscribes to the defined topic(s).
"""
consumer_config = dict()
# Consumer configuration properties.
consumer_config['bootstrap.servers'] = kafka_properties.get('bootstrap.servers')
consumer_config['group.id'] = kafka_properties.get('group.id')
consumer_config['enable.auto.commit'] = True
consumer_config['auto.offset.reset'] = 'earliest'
# Create the consumer using consumer_config.
self.kafka_stream_consumer = Consumer(consumer_config)
# Subscribe to the specified topic(s).
self.kafka_stream_consumer.subscribe(['mytopic'])
def run(self):
while True:
msg = self.kafka_stream_consumer.poll(1.0)
if msg is None:
# No message available within timeout.
print("Waiting for message or event/error in poll()")
continue
elif msg.error():
print("Error: {}".format(msg.error()))
else:
# Consume the record.
# Push the message into message_queue
try:
self.message_queue.put(msg)
except Exception as e:
self.logger.critical("Error occured in kafka Consumer: {}".format(e))
self.kafka_stream_consumer.close()

RabbitMQ exchange becomes unresponsive after some amount of time

I have RabbitMQ server running in Docker and two python clients that connect to the server and send messages to each other using headers exchange. Message rate is about 10/s. After some amount of time (most of the time after 300-500 messages have been exchanged) one of the exchange become unresponsive. channel.basic_publish call passes without any exception but receiver doesn't receive any messages. Also on rabbitmq dashboard there's no any activity on this exchange. rabbitmq dashboard screenshot
Here is the code example:
import pika
import threading
import time
import sys
class Test:
def __init__(
self,
p_username,
p_password,
p_host,
p_port,
p_virtualHost,
p_outgoingExchange,
p_incomingExchange
):
self.__outgoingExch = p_outgoingExchange
self.__incomingExch = p_incomingExchange
self.__headers = {'topic': 'test'}
self.__queueName = ''
self.__channelConsumer = None
self.__channelProducer = None
self.__isRun = False
l_credentials = pika.PlainCredentials(p_username, p_password)
l_parameters = pika.ConnectionParameters(
host=p_host,
port=p_port,
virtual_host=p_virtualHost,
credentials=l_credentials,
socket_timeout=30,
connection_attempts=5,
)
self.__connection = pika.SelectConnection(
parameters=l_parameters,
on_open_callback=self.__on_connection_open,
on_open_error_callback=self.__on_connection_open_error,
on_close_callback=self.__on_connection_closed
)
def __on_connection_open(self, _conn):
print("Connection opened")
self.__connection.channel(on_open_callback=self.__on_consume_channel_open)
self.__connection.channel(on_open_callback=self.__on_produce_channel_open)
def __on_connection_open_error(self, _conn, _exception):
print("Failed to open connection")
def __on_connection_closed(self, _conn, p_exception):
print("Connection closed: {}".format(p_exception))
def __on_consume_channel_open(self, p_ch):
print("Consumer channel opened")
self.__channelConsumer = p_ch
self.__channelConsumer.exchange_declare(
exchange=self.__incomingExch,
exchange_type="headers",
callback=self.__on_consume_exchange_declared
)
def __on_consume_exchange_declared(self, p_method):
print("Consumer exchange declared")
self.__channelConsumer.queue_declare(
queue='',
callback=self.__on_queue_declare
)
def __on_queue_declare(self, p_method):
print("Consumer queue declared")
self.__queueName = p_method.method.queue
self.__channelConsumer.queue_bind(
queue=self.__queueName,
exchange=self.__incomingExch,
arguments=self.__headers,
)
self.__channelConsumer.basic_consume(self.__queueName, self.__onMessageReceived)
def __on_produce_channel_open(self, p_ch):
print("Producer channel opened")
self.__channelProducer = p_ch
self.__channelProducer.exchange_declare(
exchange=self.__outgoingExch,
exchange_type="headers",
callback=self.__on_produce_exchange_declared
)
def __on_produce_exchange_declared(self, p_method):
print("Producer exchange declared")
l_publisher = threading.Thread(target=self.__publishProcedure)
l_publisher.start()
def __onMessageReceived(self, p_channel, p_method, p_properties, p_body):
p_channel.basic_ack(p_method.delivery_tag)
print("Message received: {}".format(p_body))
def __publishProcedure(self):
print("Start publishing")
l_msgCounter = 0
while self.__isRun:
l_msgCounter += 1
self.__publish(l_msgCounter)
time.sleep(0.1)
def __publish(self, p_msgCounter):
self.__channelProducer.basic_publish(
exchange=self.__outgoingExch,
routing_key="#",
body=str(p_msgCounter),
properties=pika.BasicProperties(headers=self.__headers)
)
def run(self):
self.__isRun = True
try:
self.__connection.ioloop.start()
except KeyboardInterrupt:
self.__isRun = False
self.__connection.close()
print("Exit...")
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Provide node name [node1 | node2]")
exit(-1)
l_outgoingExch = ''
l_incomingExch = ''
if sys.argv[1] == 'node1':
l_outgoingExch = 'node2.headers'
l_incomingExch = 'node1.headers'
elif sys.argv[1] == 'node2':
l_outgoingExch = 'node1.headers'
l_incomingExch = 'node2.headers'
else:
print("Wrong node name")
exit(-1)
l_testInstance = Test(
p_username='admin',
p_password='admin',
p_host='localhost',
p_port=5672,
p_virtualHost='/',
p_incomingExchange=l_incomingExch,
p_outgoingExchange=l_outgoingExch
)
l_testInstance.run()
I run two instances as two nodes (node1 and node2) so they should communicate with each other.
Also sometimes I have the issue described here:
Stream connection lost: AssertionError(('_AsyncTransportBase._produce() tx buffer size underflow', -275, 1),)
I found that I misused pika. As pika documentation states, it's not safe to share connection across multiple threads. The only way you can interact with connection from other threads is to use add_callback_threadsafe function. In my example it should look like this:
def __publishProcedure(self):
print("Start publishing")
l_msgCounter = 0
while self.__isRun:
l_msgCounter += 1
l_cb = functools.partial(self.__publish, l_msgCounter)
self.__connection.ioloop.add_callback_threadsafe(l_cb)
time.sleep(0.1)
def __publish(self, p_msgCounter):
self.__channelProducer.basic_publish(
exchange=self.__outgoingExch,
routing_key="#",
body=str(p_msgCounter),
properties=pika.BasicProperties(headers=self.__headers)
)

How to mock functionality of boto3 module using pytest

I have a custom module written called sqs.py. The script will do the following:
Get a message from AWS SQS
Get the AWS S3 path to delete
Delete the path
Send a confirmation email to the user
I'm trying to write unit tests for this module that will verify the code will execute as expected and that it will raise exceptions when they do occur.
This means I will need to mock the response from Boto3 calls that I make. My problem is that the code will first establish the SQS client to obtain the message and then a second call to establish the S3 client. I'm not sure how to mock these 2 independent calls and be able to fake a response so I can test my script's functionality. Perhaps my approach is incorrect. At any case, any advice on how to do this properly is appreciated.
Here's how the code looks like:
import boto3
import json
import os
import pprint
import time
import asyncio
import logging
from send_email import send_email
queue_url = 'https://xxxx.queue.amazonaws.com/1234567890/queue'
def shutdown(message):
""" Sends shutdown command to OS """
os.system(f'shutdown +5 "{message}"')
def send_failure_email(email_config: dict, error_message: str):
""" Sends email notification to user with error message attached. """
recipient_name = email_config['recipient_name']
email_config['subject'] = 'Subject: Restore Failed'
email_config['message'] = f'Hello {recipient_name},\n\n' \
+ 'We regret that an error has occurred during the restore process. ' \
+ 'Please try again in a few minutes.\n\n' \
+ f'Error: {error_message}.\n\n' \
try:
send_email(email_config)
except RuntimeError as error_message:
logging.error(f'ERROR: cannot send email to user. {error_message}')
async def restore_s3_objects(s3_client: object, p_bucket_name: str, p_prefix: str):
"""Attempts to restore objects specified by p_bucket_name and p_prefix.
Returns True if restore took place, false otherwise.
"""
is_truncated = True
key_marker = None
key = ''
number_of_items_restored = 0
has_restore_occured = False
logging.info(f'performing restore for {p_bucket_name}/{p_prefix}')
try:
while is_truncated == True:
if not key_marker:
version_list = s3_client.list_object_versions(
Bucket = p_bucket_name,
Prefix = p_prefix)
else:
version_list = s3_client.list_object_versions(
Bucket = p_bucket_name,
Prefix = p_prefix,
KeyMarker = key_marker)
if 'DeleteMarkers' in version_list:
logging.info('found delete markers')
delete_markers = version_list['DeleteMarkers']
for d in delete_markers:
if d['IsLatest'] == True:
key = d['Key']
version_id = d['VersionId']
s3_client.delete_object(
Bucket = p_bucket_name,
Key = key,
VersionId = version_id
)
number_of_items_restored = number_of_items_restored + 1
is_truncated = version_list['IsTruncated']
logging.info(f'is_truncated: {is_truncated}')
if 'NextKeyMarker' in version_list:
key_marker = version_list['NextKeyMarker']
if number_of_items_restored > 0:
has_restore_occured = True
return has_restore_occured
except Exception as error_message:
raise RuntimeError(error_message)
async def main():
if 'AWS_ACCESS_KEY_ID' in os.environ \
and 'AWS_SECRET_ACCESS_KEY' in os.environ \
and os.environ['AWS_ACCESS_KEY_ID'] != '' \
and os.environ['AWS_SECRET_ACCESS_KEY'] != '':
sqs_client = boto3.client(
'sqs',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
verify=False
)
s3_client = boto3.client(
's3',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
verify=False
)
else:
sqs_client = boto3.client(
'sqs',
verify=False,
)
s3_client = boto3.client(
's3',
verify=False,
)
received_message = sqs_client.receive_message(
QueueUrl=queue_url,
AttributeNames=['All'],
VisibilityTimeout=10,
WaitTimeSeconds=20, # Wait up to 20 seconds for a message to arrive
)
if 'Messages' in received_message \
and len(received_message['Messages']) > 0:
# NOTE: Initialize email configuration
receipient_email = 'support#example.com'
username = receipient_email.split('#')[0]
fullname_length = len(username.split('.'))
fullname = f"{username.split('.')[0]}" # Group name / First name only
if (fullname_length == 2): # First name and last name available
fullname = f"{username.split('.')[0]} {username.split('.')[1]}"
fullname = fullname.title()
email_config = {
'destination': receipient_email,
'recipient_name': fullname,
'subject': 'Subject: Restore Complete',
'message': ''
}
try:
receipt_handle = received_message['Messages'][0]['ReceiptHandle']
except Exception as error_message:
logging.error(error_message)
send_failure_email(email_config, error_message)
shutdown(f'{error_message}')
try:
data = received_message['Messages'][0]['Body']
data = json.loads(data)
logging.info('A SQS message for a restore has been received.')
except Exception as error_message:
message = f'Unable to obtain and parse message body. {error_message}'
logging.error(message)
send_failure_email(email_config, message)
shutdown(f'{error_message}')
try:
bucket = data['bucket']
prefix = data['prefix']
except Exception as error_message:
message = f'Retrieving bucket name and prefix failed. {error_message}'
logging.error(message)
send_failure_email(email_config, message)
shutdown(f'{error_message}')
try:
logging.info(f'Initiating restore for path: {bucket}/{prefix}')
restore_was_performed = await asyncio.create_task(restore_s3_objects(s3_client, bucket, prefix))
if restore_was_performed is True:
email_config['message'] = f'Hello {fullname},\n\n' \
+ f'The files in the path \'{bucket}/{prefix}\' have been restored. ' \
send_email(email_config)
logging.info('Restore complete. Shutting down.')
else:
logging.info('Path does not require restore. Shutting down.')
shutdown(f'shutdown +5 "Restore successful! System will shutdown in 5 mins"')
except Exception as error_message:
message = f'File restoration failed. {error_message}'
logging.error(message)
send_failure_email(email_config, message)
shutdown(f'{error_message}')
try:
sqs_client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle,
)
except Exception as error_message:
message = f'Deleting restore session from SQS failed. {error_message}'
logging.error(message)
send_failure_email(email_config, message)
shutdown(f'{error_message}')
if __name__ == '__main__':
logging.basicConfig(filename='restore.log',level=logging.INFO)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
The only way I was able to mock Boto3 is rebuilding a small class that represents the actual method structure. This is because Boto3 uses dynamic methods and all the resource level methods are created at runtime.
This might not be industry standard but I wasn't able to get any of the methods I found on the internet to work most of the time and this worked pretty well for me and requires minimal effort (comparing to some of the solutions I found).
class MockClient:
def __init__(self, region_name, aws_access_key_id, aws_secret_access_key):
self.region_name = region_name
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.MockS3 = MockS3()
def client(self, service_name, **kwargs):
return self.MockS3
class MockS3:
def __init__(self):
self.response = None # Test your mock data from S3 here
def list_object_versions(self, **kwargs):
return self.response
class S3TestCase(unittest.TestCase):
def test_restore_s3_objects(self):
# Given
bucket = "testBucket" # Test this to something that somewahat realistic
prefix = "some/prefix" # Test this to something that somewahat realistic
env_vars = mock.patch.dict(os.environ, {"AWS_ACCESS_KEY_ID": "abc",
"AWS_SECRET_ACCESS_KEY": "def"})
env_vars.start()
# initialising the Session can be tricy since it has to be imported from
# the module/file that creates the session on actual code rather than
# where's a Session code is. In this case you might have to import from
# main rather than boto3.
boto3.session.Session = mock.Mock(side_effect=[
MockClient(region_name='eu-west-1',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'])])
s3_client = boto3.client('s3', verify=False)
# When
has_restore_occured = restore_s3_objects(s3_client, bucket, prefix)
# Then
self.assertEqual(has_restore_occured, False) # your expected result set
env_vars.stop()

python multiprocessing doesn't seem to be working with class

I am trying to implement zmq client wrapper class. Hope the comments written are useful to understand. Please see the __main__ for the different ways I tried.
import zmq
class Client(object):
"""
This is class for passing message using pipeline(push-pull) pattern of zmq.
"""
def __init__(self, pull_addr="tcp://127.0.0.1:5757",\
push_addr="tcp://127.0.0.1:5858"):
"""
The constructor
Parameters:
pull_addr (string) : Local endpoint for pushing messages and pulling from
push_addr (string) : Remote endpoint for pushing messages.
"""
super(Client, self).__init__()
print("Initialized...")
self.context = zmq.Context()
self.push_addr = push_addr
self.pull_addr = pull_addr
def producer(self, msg):
"""
Produces messages.
It's a wrapper and will receive the message which will be queued locally by
pushing to self.pull_addr.
Parameters:
msg (json) : Messages to be queued.
Returns:
None
"""
print("Production started.... ")
zmq_socket = self.context.socket(zmq.PUSH)
zmq_socket.bind(self.pull_addr)
zmq_socket.send_json(msg)
def consumer(self):
"""
Consumes messages produced by self.producer.
Reads messages queued by self.producer and pushes to the self.push_addr
where it assumes the collector would be expecting messages.
Parameters:
None
Returns:
None
"""
print("consumption is awaiting...")
receiver = self.context.socket(zmq.PULL)
receiver.connect(self.pull_addr)
sender = self.context.socket(zmq.PUSH)
sender.connect(self.push_addr)
while True:
msg = receiver.recv_json()
sender.send_json(msg)
if __name__ == '__main__':
import multiprocessing
# con_pool = multiprocessing.Pool()
# # pro_pool = multiprocessing.Pool()
zmq_client = Client()
# consumer = con_pool.apply_async(zmq_client.consumer, ())
# # producer = pro_pool.apply_async(zmq_client.producer,({"msg" : "Hello World!"}))
# print("DONE>>>>")
# from multiprocessing import Process
# consumer = Process(target=zmq_client.consumer)
# consumer.start()
# producer = Process(target=zmq_client.producer, args=({'msg' : "Hello World!"}))
# producer.start()
# producer.join()
# consumer.join()
pool = multiprocessing.Pool()
p = pool.apply_async(zmq_client.consumer)
p2 = pool.apply_async(zmq_client.producer, ({"msg" : "Hello World!"}))
Try 1
pool = multiprocessing.Pool()
p = pool.apply_async(zmq_client.consumer)
p2 = pool.apply_async(zmq_client.producer, ({"msg" : "Hello World!"}))
Output 1
calls the constructor and terminates.
Try 2
con_pool = multiprocessing.Pool()
pro_pool = multiprocessing.Pool()
zmq_client = Client()
consumer = con_pool.apply_async(zmq_client.consumer, ())
producer = pro_pool.apply_async(zmq_client.producer,({"msg" : "Hello World!"}))
Output 2
calls the constructor and terminates.
Try 3
from multiprocessing import Process
zmq_client = Client()
con = Process(target=zmq_client.consumer)
con.start()
pro = Process(target=zmq_client.producer, args=({'msg' : "Hello World!"}))
pro.start()
pro.join()
con.join()
Output 3
calls the constructor
calls the consumer
calls the producer
keeps hanging
Here I was expecting zmq server to receive the message. Any suggestions to get this class working as expected?

Stomp Consumer using deferred.inlinecallback

I am implementing stomp consumer as a library. By calling this library in other application i should be able to get the data in ActiveMQ. I am implementing it as below, but I have a problem in returning the frame.body. I am not able to retrieve the data from outside the class.
from twisted.internet import defer
from stompest.async import Stomp
from stompest.async.listener import SubscriptionListener
from stompest.config import StompConfig
from socket import gethostname
from uuid import uuid1
import json
class Consumer(object):
def __init__(self, amq_uri):
self.amq_uri = amq_uri
self.hostname = gethostname()
self.config = StompConfig(uri=self.amq_uri)
#defer.inlineCallbacks
def run(self, in_queue):
client = yield Stomp(self.config)
headers = {
StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL,
StompSpec.ID_HEADER: self.hostname,
'activemq.prefetchSize': '1000',
}
yield client.connect(headers=self._return_client_id())
client.subscribe(
in_queue,
headers,
listener=SubscriptionListener(self.consume)
)
try:
client = yield client.disconnected
except StompConnectionError:
yield client.connect(headers=self._return_client_id())
client.subscribe(
in_queue,
headers,
listener=SubscriptionListener(self.consume)
)
while True:
try:
yield client.disconnected
except StompProtocolError:
pass
except StompConnectionError:
yield client.connect(headers=self._return_client_id())
client.subscribe(
in_queue,
headers,
listener=SubscriptionListener(self.consume)
)
def _return_client_id(self):
client_id = {}
client_id['client-id'] = gethostname() + '-' + str(uuid1())
return client_id
def consume(self, client, frame):
data = json.loads(frame.body)
print 'Received Message Type {}'.format(type(data))
print 'Received Message {}'.format(data)
## I want to return data here. I am able to print the frame.body here.
# Call from another application
import Queue
from twisted.internet import reactor
amq_uri = 'tcp://localhost:61613'
in_queue = '/queue/test_queue'
c = Consumer(amq_uri)
c.run(in_queue)
print "data is from outside function", data # Should be able to get the data which is returned by consume here
reactor.run()
Can someone please let me know how can i achieve this.
Thanks
I found a solution to my problem. Instead of using async stomp library, i used sync stomp library. Implemented it as below,
class Consumer(object):
def __init__(self, amq_uri):
self.amq_uri = amq_uri
self.hostname = gethostname()
self.config = StompConfig(uri=self.amq_uri)
def run(self, in_queue, return_dict):
client = Stomp(self.config)
headers = {
StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL,
StompSpec.ID_HEADER: self.hostname
}
client.connect()
client.subscribe(in_queue, headers)
try:
frame = client.receiveFrame()
data = json.dumps(frame.body)
except Exception as exc:
print exc
client.ack(frame)
client.disconnect()
return_dict['data'] = data
return data

Categories

Resources