pysnmp trap receiver - processing with asyncio queue (w/o threads) - python

My goal is to have a pure asyncio implementation to receive and process SNMP-TRAPS with pysnmp. So far I only managed to accomplish it by using (one or several) separate Threads.
The examples I have found for pysnmp is using a sync callback Function (cbFun). I did not manage/understand how to make a async function there.
Below example works fine for me but is using a separate Thread (or pool). It would be nice to have something in the lines of:
queue = asyncio.Queue()
loop = asyncio.get_event_loop()
loop.create_task(run_daemon)
loop.create_task(process_trap)
loop.run_forever()
but I did not manage to do it this way because of the cbFun.
here is my working (threaded) example:
#!/usr/bin/env python3.8
import asyncio
import concurrent.futures
from pysnmp.entity import engine, config
from pysnmp.carrier.asyncio.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv
class SnmpTrapDaemon():
#staticmethod
def run_daemon(pool):
# Create SNMP engine with autogenernated engineID and pre-bound
# to socket transport dispatcher
snmpEngine = engine.SnmpEngine()
# Transport Setup
config.addTransport(
snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode(('0.0.0.0', '162'))
)
# SNMPv1/2c setup
config.addV1System(
snmpEngine, 'public', 'public')
# Callback function for receiving notifications
# noinspection PyUnusedLocal
def cbFun(snmpEngine, stateReference, contextEngineId,
contextName, varBinds, cbCtx):
trap = {}
for oid, val in varBinds:
trap[oid.prettyPrint()] = val.prettyPrint()
pool.submit(asyncio.run, process_trap(trap))
# Register SNMP Application at the SNMP engine
ntfrcv.NotificationReceiver(snmpEngine, cbFun)
snmpEngine.transportDispatcher.jobStarted(1)
try:
print(f'Trap Listener started on port 162. Press Ctrl-c to quit.')
snmpEngine.transportDispatcher.runDispatcher()
except KeyboardInterrupt:
print('user quit')
finally:
snmpEngine.transportDispatcher.closeDispatcher()
async def process_trap(trap):
print('Processing TRAP - this might take while...')
await asyncio.sleep(3)
for item in trap.items():
print(item)
print('...done')
def main():
print('Starting SNMP-TRAP Processor. Test with "snmptrap -v2c -c public 127.0.0.1:162 123 1.3.6.1.6.3.1.1.5.1 1.3.6.1.2.1.1.5.0 s test"')
pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)
SnmpTrapDaemon.run_daemon(pool)
if __name__ == '__main__':
main()

Related

Using RabbitMQ with multiple consumer threads alongside a socketio setup with eventlet

I have a socketio setup that uses an eventlet server. My program gets logs from multiple machines and writes them to a database. I have an event called "new_log" which is triggered whenever a new log is sent through the websocket. Since database insertions take longer than the intervals between new logs, when I don't use any queueing system, the logs accumulate on the client side and when the client queue is filled to it's max, I no longer receive any new log. That is the reason I decided to use RabbitMQ.
But, I thought that since db insertions still take longer, a RabbitMQ setup with a single consumer doesn't really solve the problem. This time, the queue will be on the server side but it will still get bigger and bigger. So I wanted it to launch a new consumer thread with each log. I found the following multi-threaded example from Pika's repo:
https://github.com/pika/pika/blob/0.13.1/examples/basic_consumer_threaded.py
and modified it a bit to use it like this:
main.py
import socketio
import os
import threading
import json
import pika
import functools
import config as cfg
from util.rabbitmq import consumer_threaded
sio = socketio.Server(async_mode="eventlet", namespaces='*', cors_allowed_origins=['*'])
app = socketio.WSGIApp(sio)
credentials = pika.PlainCredentials('guest', 'guest')
parameters = pika.ConnectionParameters('localhost', credentials=credentials, heartbeat=100)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange="test_exchange", exchange_type="direct", passive=False, durable=True, auto_delete=False)
channel.queue_declare(queue="standard", durable=True)
channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
channel.basic_qos(prefetch_count=1)
#sio.on("new_log")
def client_activity(pid, data):
channel.basic_publish(
exchange='test_exchange',
routing_key='standard_key',
body=json.dumps(data),
properties=pika.BasicProperties(
delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE
))
return "OK"
#sio.event
def connect(sid, environ, auth):
print(f"[NEW CONNECTION]] {sid}", flush=True)
#sio.event
def disconnect(sid):
sio.disconnect(sid)
print(f"[DISCONNECTED] {sid}", flush=True)
def start_consumer():
on_message_callback = functools.partial(consumer_threaded.on_message, args=(connection, channel))
channel.basic_consume('standard', on_message_callback)
channel.start_consuming()
print("Started consuming", flush=True)
if __name__ == "__main__":
consumer_thread = threading.Thread(target=start_consumer)
consumer_thread.start()
import eventlet
eventlet.monkey_patch()
eventlet.wsgi.server(eventlet.listen(("", 1234)), app)
consumer_threaded.py
import functools
import logging
import threading
import json
from util.logger import save_log
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
def ack_message(channel, delivery_tag):
if channel.is_open:
channel.basic_ack(delivery_tag)
def do_work(connection, channel, delivery_tag, body):
thread_id = threading.get_ident()
fmt1 = 'Thread id: {} Total threads: {} Delivery tag: {} Message body: {}'
LOGGER.info(fmt1.format(thread_id, threading.active_count(), delivery_tag, body))
save_log.save_log(json.loads(body.decode()))
cb = functools.partial(ack_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
def on_message(channel, method_frame, header_frame, body, args):
(connection, channel) = args
delivery_tag = method_frame.delivery_tag
t = threading.Thread(target=do_work, args=(connection, channel, delivery_tag, body))
t.start()
t.join()
This seems to be working for a bit but after a while, I get the following error:
AssertionError: ('_AsyncTransportBase._produce() tx buffer size
underflow', -44, 1)
How can I achieve what I described without getting this error?

Python aiohttp + asyncio: How to execute code after loop.run_forever()

Code:
#!/usr/bin/env python
import asyncio
import os
import socket
import time
import traceback
from aiohttp import web
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import cpu_count
CPU_COUNT = cpu_count()
print("CPU Count:", CPU_COUNT)
def mk_socket(host="127.0.0.1", port=9090, reuseport=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if reuseport:
SO_REUSEPORT = 15
sock.setsockopt(socket.SOL_SOCKET, SO_REUSEPORT, 1)
sock.bind((host, port))
return sock
async def index(request):
icecast_index_path = os.path.abspath("../test/icecast/icecast_index.html")
print(icecast_index_path)
try:
content = open(icecast_index_path, encoding="utf8").read()
return web.Response(content_type="text/html", text=content)
except Exception as e:
return web.Response(content_type="text/html", text="<!doctype html><body><h1>Error: "+str(e)+"</h1></body></html>")
async def start_server():
try:
host = "127.0.0.1"
port=8080
reuseport = True
app = web.Application()
app.add_routes([web.get('/', index)])
runner = web.AppRunner(app)
await runner.setup()
sock = mk_socket(host, port, reuseport=reuseport)
srv = web.SockSite(runner, sock)
await srv.start()
print('Server started at http://127.0.0.1:8080')
return srv, app, runner
except Exception:
traceback.print_exc()
raise
async def finalize(srv, app, runner):
sock = srv.sockets[0]
app.loop.remove_reader(sock.fileno())
sock.close()
#await handler.finish_connections(1.0)
await runner.cleanup()
srv.close()
await srv.wait_closed()
await app.finish()
def init():
loop = asyncio.get_event_loop()
srv, app, runner = loop.run_until_complete(start_server())
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete((finalize(srv, app, runner)))
if __name__ == '__main__':
with ProcessPoolExecutor() as executor:
for i in range(0, int(CPU_COUNT/2)):
executor.submit(init)
#after the aiohttp start i want to execute more code
#for example:
print("Hello world.")
#in actual programm the ProcessPoolExecutor is called
#inside a pyqt5 app
#so i don't want the pyqt5 app to freeze.
The problem is that with that code i can't execute code after the ProcessPoolExecutor calls.
How can i fix that?
I tried to remove this part:
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete((finalize(srv, app, runner)))
in init() method but after that the aiohttp server is closed instantly.
Edit: If i use a thread instead of ProcessPoolExecutor then there is an aiohttp errors that says:
RuntimeError: set_wakeup_fd only works in main thread
RuntimeError: There is no current event loop in thread
Aiohttp + asyncio related errors.
Maybe i may use a # signature up to def declarations (i suppose).
Using with with an Executor will cause your process to block until the jobs in the executor are completed; since they’re running infinite event loops, they will never complete and your Executor will never unblock.
Instead, just use the executor to kick off the jobs, and run your stuff afterwards. When you’re finally done, call .shutdown() to wait for processes to exit:
executor = ProcessPoolExecutor()
for i in range(0, int(CPU_COUNT/2)):
executor.submit(init)
# other code…
executor.shutdown()

Simple Python multi threaded webserver with Asyncio and events called in main function

I have a simple Python program that I want to do three things:
Serve an HTTP document
Serve Websockets
Interact with the Websocket data
I am trying to use / grok asyncio. The issue is that I can't figure out how to access data acquired from a function in the main event loop.
For example in my code below I have two threads.
One thread is the HTTP server thread, one thread is the Websocket server thread and there is the main thread.
What I want to do is to print data captured in the websocket receiving thread in the main thread.
The only way I know how to do this is to use Queues to pass data between threads at which point I do not even know what the advantage of using asyncio is.
Similarly, it feels weird to pass the event loop to the serve_websocket function.
Can anyone please explain how to architect this to get data from the Websocket function into the main function?
It seems like / I want a way to do this without using the threading library at all, which seems possible. In an async project I would want to react to websocket events in different function than where they are called.
NOTE: I know there are other libraries for websockets and http serving with asyncio but this is an example to help me understarnd how to structure projects using this paradigm.
Thanks
#!/usr/bin/env python
import json
import socketserver
import threading
import http.server
import asyncio
import time
import websockets
SERVER_ADDRESS = '127.0.0.1'
HTTP_PORT = 8087
WEBSOCKET_PORT = 5678
def serve_http():
http_handler = http.server.SimpleHTTPRequestHandler
with socketserver.TCPServer(("", HTTP_PORT), http_handler) as httpd:
print(f'HTTP server listening on port {HTTP_PORT}')
httpd.serve_forever()
def serve_websocket(server, event_loop):
print(f'Websocket server listening on port {WEBSOCKET_PORT}')
event_loop.run_until_complete(server)
event_loop.run_forever()
async def ws_callback(websocket, path):
while True:
data = await websocket.recv()
# How do I access parsed_data in the main function below
parsed_data = json.loads(data)
await websocket.send(data)
def main():
event_loop = asyncio.get_event_loop()
ws_server = websockets.serve(ws_callback, SERVER_ADDRESS, WEBSOCKET_PORT)
threading.Thread(target=serve_http, daemon=True).start()
threading.Thread(target=serve_websocket, args=(ws_server, event_loop), daemon=True).start()
try:
while True:
# Keep alive - this is where I want to access the data from ws_callback
# i.e.
# print(data.values)
time.sleep(.01)
except KeyboardInterrupt:
print('Exit called')
if __name__ == '__main__':
main()
I believe that you should not mix asyncio and multithreading without special need. And in your case, use only asyncio tools.
In this case, you have no problem sharing data between coroutines, because they all run on the same thread using cooperative multitasking.
Your code can be rewtitten as:
#!/usr/bin/env python
import json
import socketserver
import threading
import http.server
import asyncio
import time
import websockets
SERVER_ADDRESS = '127.0.0.1'
HTTP_PORT = 8087
WEBSOCKET_PORT = 5678
parsed_data = {}
async def handle_http(reader, writer):
data = await reader.read(100)
message = data.decode()
writer.write(data)
await writer.drain()
writer.close()
async def ws_callback(websocket, path):
global parsed_data
while True:
data = await websocket.recv()
# How do I access parsed_data in the main function below
parsed_data = json.loads(data)
await websocket.send(data)
async def main():
ws_server = await websockets.serve(ws_callback, SERVER_ADDRESS, WEBSOCKET_PORT)
print(f'Websocket server listening on port {WEBSOCKET_PORT}')
http_server = await asyncio.start_server(
handle_http, SERVER_ADDRESS, HTTP_PORT)
print(f'HTTP server listening on port {HTTP_PORT}')
try:
while True:
if parsed_data:
print(parsed_data.values())
await asyncio.sleep(0.1)
except KeyboardInterrupt:
print('Exit called')
if __name__ == '__main__':
asyncio.run(main())

python - handle tornado connections into while loop

I have a server running a loop that reads data from a device and I want to send them to all clients who connect on a websocket on tornado.
I tried putting the loop inside the open function but then it can't handle on_close function or new connections.
What is best practice to do that?
#!/usr/bin/env python
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import socket
class MyWebSocketServer(tornado.websocket.WebSocketHandler):
def open(self):
print('new connection'+self.request.remote_ip)
try:
while True:
'''
read and send data
'''
except Exception,error:
print "Error on Main: "+str(error)
def on_close(self):
print('connection closed'+self.request.remote_ip)
application=tornado.web.Application([(r'/ws',MyWebSocketServer),])
if __name__=="__main__":
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8000)
print('start')
tornado.ioloop.IOLoop.instance().start()
Thanks
Here's a full example about running your blocking code in a separate thread and broadcasting messages to all connected clients.
...
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=1) # spawn only 1 thread
class MyWebSocketServer(tornado.websocket.WebSocketHandler):
connections = set() # create a set to hold connections
def open(self):
# put the new connection in connections set
self.connections.add(self)
def on_close(self):
print('connection closed'+self.request.remote_ip)
print('new connection'+self.request.remote_ip)
# remove client from connections
self.connections.remove(self)
#classmethod
def send_message(cls, msg):
for client in cls.connections:
client.write_message(msg)
def read_from_serial(loop, msg_callback):
"""This function will read from serial
and will run in aseparate thread
`loop` is the IOLoop instance
`msg_allback` is the function that will be
called when new data is available from usb
"""
while True:
# your code ...
# ...
# when you get new data
# tell the IOLoop to schedule `msg_callback`
# to send the data to all clients
data = "new data"
loop.add_callback(msg_callback, data)
...
if __name__ == '__main__':
loop = tornado.ioloop.IOLoop.current()
msg_callback = MyWebSocketServer.send_message
# run `read_from_serial` in another thread
executor.submit(read_from_serial, loop, msg_callback)
...
loop.start()

Python and RabbitMQ - Best way to listen to consume events from multiple channels?

I have two, separate RabbitMQ instances. I'm trying to find the best way to listen to events from both.
For example, I can consume events on one with the following:
credentials = pika.PlainCredentials(user, pass)
connection = pika.BlockingConnection(pika.ConnectionParameters(host="host1", credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(Exclusive=True)
self.channel.queue_bind(exchange="my-exchange", result.method.queue, routing_key='*.*.*.*.*')
channel.basic_consume(callback_func, result.method.queue, no_ack=True)
self.channel.start_consuming()
I have a second host, "host2", that I'd like to listen to as well. I thought about creating two separate threads to do this, but from what I've read, pika isn't thread safe. Is there a better way? Or would creating two separate threads, each listening to a different Rabbit instance (host1, and host2) be sufficient?
The answer to "what is the best way" depends heavily on your usage pattern of queues and what you mean by "best". Since I can't comment on questions yet, I'll just try to suggest some possible solutions.
In each example I'm going to assume exchange is already declared.
Threads
You can consume messages from two queues on separate hosts in single process using pika.
You are right - as its own FAQ states, pika is not thread safe, but it can be used in multi-threaded manner by creating connections to RabbitMQ hosts per thread. Making this example run in threads using threading module looks as follows:
import pika
import threading
class ConsumerThread(threading.Thread):
def __init__(self, host, *args, **kwargs):
super(ConsumerThread, self).__init__(*args, **kwargs)
self._host = host
# Not necessarily a method.
def callback_func(self, channel, method, properties, body):
print("{} received '{}'".format(self.name, body))
def run(self):
credentials = pika.PlainCredentials("guest", "guest")
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._host,
credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(exclusive=True)
channel.queue_bind(result.method.queue,
exchange="my-exchange",
routing_key="*.*.*.*.*")
channel.basic_consume(self.callback_func,
result.method.queue,
no_ack=True)
channel.start_consuming()
if __name__ == "__main__":
threads = [ConsumerThread("host1"), ConsumerThread("host2")]
for thread in threads:
thread.start()
I've declared callback_func as a method purely to use ConsumerThread.name while printing message body. It might as well be a function outside the ConsumerThread class.
Processes
Alternatively, you can always just run one process with consumer code per queue you want to consume events.
import pika
import sys
def callback_func(channel, method, properties, body):
print(body)
if __name__ == "__main__":
credentials = pika.PlainCredentials("guest", "guest")
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=sys.argv[1],
credentials=credentials))
channel = connection.channel()
result = channel.queue_declare(exclusive=True)
channel.queue_bind(result.method.queue,
exchange="my-exchange",
routing_key="*.*.*.*.*")
channel.basic_consume(callback_func, result.method.queue, no_ack=True)
channel.start_consuming()
and then run by:
$ python single_consume.py host1
$ python single_consume.py host2 # e.g. on another console
If the work you're doing on messages from queues is CPU-heavy and as long as number of cores in your CPU >= number of consumers, it is generally better to use this approach - unless your queues are empty most of the time and consumers won't utilize this CPU time*.
Async
Another alternative is to involve some asynchronous framework (for example Twisted) and running whole thing in single thread.
You can no longer use BlockingConnection in asynchronous code; fortunately, pika has adapter for Twisted:
from pika.adapters.twisted_connection import TwistedProtocolConnection
from pika.connection import ConnectionParameters
from twisted.internet import protocol, reactor, task
from twisted.python import log
class Consumer(object):
def on_connected(self, connection):
d = connection.channel()
d.addCallback(self.got_channel)
d.addCallback(self.queue_declared)
d.addCallback(self.queue_bound)
d.addCallback(self.handle_deliveries)
d.addErrback(log.err)
def got_channel(self, channel):
self.channel = channel
return self.channel.queue_declare(exclusive=True)
def queue_declared(self, queue):
self._queue_name = queue.method.queue
self.channel.queue_bind(queue=self._queue_name,
exchange="my-exchange",
routing_key="*.*.*.*.*")
def queue_bound(self, ignored):
return self.channel.basic_consume(queue=self._queue_name)
def handle_deliveries(self, queue_and_consumer_tag):
queue, consumer_tag = queue_and_consumer_tag
self.looping_call = task.LoopingCall(self.consume_from_queue, queue)
return self.looping_call.start(0)
def consume_from_queue(self, queue):
d = queue.get()
return d.addCallback(lambda result: self.handle_payload(*result))
def handle_payload(self, channel, method, properties, body):
print(body)
if __name__ == "__main__":
consumer1 = Consumer()
consumer2 = Consumer()
parameters = ConnectionParameters()
cc = protocol.ClientCreator(reactor,
TwistedProtocolConnection,
parameters)
d1 = cc.connectTCP("host1", 5672)
d1.addCallback(lambda protocol: protocol.ready)
d1.addCallback(consumer1.on_connected)
d1.addErrback(log.err)
d2 = cc.connectTCP("host2", 5672)
d2.addCallback(lambda protocol: protocol.ready)
d2.addCallback(consumer2.on_connected)
d2.addErrback(log.err)
reactor.run()
This approach would be even better, the more queues you would consume from and the less CPU-bound the work performing by consumers is*.
Python 3
Since you've mentioned pika, I've restricted myself to Python 2.x-based solutions, because pika is not yet ported.
But in case you would want to move to >=3.3, one possible option is to use asyncio with one of AMQP protocol (the protocol you speak in with RabbitMQ) , e.g. asynqp or aioamqp.
* - please note that these are very shallow tips - in most cases choice is not that obvious; what will be the best for you depends on queues "saturation" (messages/time), what work do you do upon receiving these messages, what environment you run your consumers in etc.; there's no way to be sure other than to benchmark all implementations
Below is an example of how I use one rabbitmq instance to listen to 2 queues at the same time:
import pika
import threading
threads=[]
def client_info(channel):
channel.queue_declare(queue='proxy-python')
print (' [*] Waiting for client messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
channel.basic_consume(callback, queue='proxy-python', no_ack=True)
channel.start_consuming()
def scenario_info(channel):
channel.queue_declare(queue='savi-virnet-python')
print (' [*] Waiting for scenrio messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print (" Received %s" % (body))
channel.basic_consume(callback, queue='savi-virnet-python', no_ack=True)
channel.start_consuming()
def manager():
connection1= pika.BlockingConnection(pika.ConnectionParameters
(host='localhost'))
channel1 = connection1.channel()
connection2= pika.BlockingConnection(pika.ConnectionParameters
(host='localhost'))
channel2 = connection2.channel()
t1 = threading.Thread(target=client_info, args=(channel1,))
t1.daemon = True
threads.append(t1)
t1.start()
t2 = threading.Thread(target=scenario_info, args=(channel2,))
t2.daemon = True
threads.append(t2)
t2.start()
for t in threads:
t.join()
manager()
import asyncio
import tornado.ioloop
import tornado.web
from aio_pika import connect_robust, Message
tornado.ioloop.IOLoop.configure("tornado.platform.asyncio.AsyncIOLoop")
io_loop = tornado.ioloop.IOLoop.current()
asyncio.set_event_loop(io_loop.asyncio_loop)
QUEUE = asyncio.Queue()
class SubscriberHandler(tornado.web.RequestHandler):
async def get(self):
message = await QUEUE.get()
self.finish(message.body)
class PublisherHandler(tornado.web.RequestHandler):
async def post(self):
connection = self.application.settings["amqp_connection"]
channel = await connection.channel()
try:
await channel.default_exchange.publish(
Message(body=self.request.body), routing_key="test",
)
finally:
await channel.close()
print('ok')
self.finish("OK")
async def make_app():
amqp_connection = await connect_robust()
channel = await amqp_connection.channel()
queue = await channel.declare_queue("test", auto_delete=True)
await queue.consume(QUEUE.put, no_ack=True)
return tornado.web.Application(
[(r"/publish", PublisherHandler), (r"/subscribe", SubscriberHandler)],
amqp_connection=amqp_connection,
)
if __name__ == "__main__":
app = io_loop.asyncio_loop.run_until_complete(make_app())
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
You can use aio-pika in async way
more examples here
https://buildmedia.readthedocs.org/media/pdf/aio-pika/latest/aio-pika.pdf
Happy coding :)
Pika can be used into a multithreaded consumer. The only requirement is to have a Pika connection per thread.
Pika Github repository has an example here.
A snippet from basic_consumer_threaded.py:
def on_message(ch, method_frame, _header_frame, body, args):
(conn, thrds) = args
delivery_tag = method_frame.delivery_tag
t = threading.Thread(target=do_work, args=(conn, ch, delivery_tag, body))
t.start()
thrds.append(t)
threads = []
on_message_callback = functools.partial(on_message, args=(connection, threads))
channel.basic_consume('standard', on_message_callback)

Categories

Resources