Not receiving messages from Kafka Topic - python

I am receiving None when calling poll() in this program but I am getting the messages when running the kafka-console-consumer.bat from cmd, I can't figure out what exactly the problem.
The execution starts from main.py
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
import time
import json
from kafka_message_consumer import KafkaMessageConsumer
from kafka_discovery_executor import KafkaDiscoveryExecutor
with open('kafka_properties.json') as f:
kafka_properties = json.loads(f.read())
message_queue = Queue()
kafka_message_consumer = KafkaMessageConsumer(kafka_properties, message_queue)
kafka_discovery_executor = KafkaDiscoveryExecutor(message_queue, kafka_properties)
with ThreadPoolExecutor(max_workers=5) as executor:
executor.submit(kafka_message_consumer.run())
time.sleep(1)
executor.submit(kafka_discovery_executor.run())
time.sleep(1)
KafkaDiscoveryExecutor class is for consuming messages from shared queue and processing that messages.
This is kafka_message_consumer.py
import logging
from confluent_kafka import Consumer
class KafkaMessageConsumer:
def __init__(self, kafka_properties, message_queue):
self.message_queue = message_queue
self.logger = logging.getLogger('KafkaMessageConsumer')
self.kafka_stream_consumer = None
self.create_consumer(kafka_properties)
def create_consumer(self, kafka_properties):
"""
Create an instance of Kafka Consumer with the consumer configuration properties
and subscribes to the defined topic(s).
"""
consumer_config = dict()
# Consumer configuration properties.
consumer_config['bootstrap.servers'] = kafka_properties.get('bootstrap.servers')
consumer_config['group.id'] = kafka_properties.get('group.id')
consumer_config['enable.auto.commit'] = True
consumer_config['auto.offset.reset'] = 'earliest'
# For SSL Security
# consumer_config['security.protocol'] = 'SASL_SSL'
# consumer_config['sasl.mechanisms'] = 'PLAIN'
# consumer_config['sasl.username'] = ''
# consumer_config['sasl.password'] = ''
# Create the consumer using consumer_config.
self.kafka_stream_consumer = Consumer(consumer_config)
# Subscribe to the specified topic(s).
self.kafka_stream_consumer.subscribe(['mytopic'])
def run(self):
while True:
msg = self.kafka_stream_consumer.poll(1.0)
if msg is None:
# No message available within timeout.
print("Waiting for message or event/error in poll()")
continue
elif msg.error():
print("Error: {}".format(msg.error()))
else:
# Consume the record.
# Push the message into message_queue
try:
self.message_queue.put(msg)
except Exception as e:
self.logger.critical("Error occured in kafka Consumer: {}".format(e))
The specified topic has events but I am getting None here and the print statement inside 'if msg is None:' is executing.

I am still not sure as to why the above code is not working as it should.
Here's what changes I made to make this code work
I used threading module instead of concurrent.futures
used daemon thread
make a call to thread.init() inside the constructor of the classes [KafkaMessageConsumer, KafkaDiscoveryExecutor]
Here's main.py
from queue import Queue
import threading
import time
import json
from kafka_message_consumer import KafkaMessageConsumer
from kafka_discovery_executor import KafkaDiscoveryExecutor
def main():
with open('kafka_properties.json') as f:
kafka_properties = json.loads(f.read())
message_queue = Queue()
threads = [
KafkaMessageConsumer(kafka_properties, message_queue),
KafkaDiscoveryExecutor(message_queue, kafka_properties)
]
for thread in threads:
thread.start()
time.sleep(1)
for thread in threads:
thread.join()
time.sleep(1)
if __name__ == "__main__":
main()
and kafka_message_consumer.py
import logging
from confluent_kafka import Consumer
import threading
class KafkaMessageConsumer(threading.Thread):
daemon = True
def __init__(self, kafka_properties, message_queue):
threading.Thread.__init__(self)
self.message_queue = message_queue
self.logger = logging.getLogger('KafkaMessageConsumer')
self.kafka_stream_consumer = None
self.create_consumer(kafka_properties)
def create_consumer(self, kafka_properties):
"""
Create an instance of Kafka Consumer with the consumer configuration properties
and subscribes to the defined topic(s).
"""
consumer_config = dict()
# Consumer configuration properties.
consumer_config['bootstrap.servers'] = kafka_properties.get('bootstrap.servers')
consumer_config['group.id'] = kafka_properties.get('group.id')
consumer_config['enable.auto.commit'] = True
consumer_config['auto.offset.reset'] = 'earliest'
# Create the consumer using consumer_config.
self.kafka_stream_consumer = Consumer(consumer_config)
# Subscribe to the specified topic(s).
self.kafka_stream_consumer.subscribe(['mytopic'])
def run(self):
while True:
msg = self.kafka_stream_consumer.poll(1.0)
if msg is None:
# No message available within timeout.
print("Waiting for message or event/error in poll()")
continue
elif msg.error():
print("Error: {}".format(msg.error()))
else:
# Consume the record.
# Push the message into message_queue
try:
self.message_queue.put(msg)
except Exception as e:
self.logger.critical("Error occured in kafka Consumer: {}".format(e))
self.kafka_stream_consumer.close()

Related

RabbitMQ exchange becomes unresponsive after some amount of time

I have RabbitMQ server running in Docker and two python clients that connect to the server and send messages to each other using headers exchange. Message rate is about 10/s. After some amount of time (most of the time after 300-500 messages have been exchanged) one of the exchange become unresponsive. channel.basic_publish call passes without any exception but receiver doesn't receive any messages. Also on rabbitmq dashboard there's no any activity on this exchange. rabbitmq dashboard screenshot
Here is the code example:
import pika
import threading
import time
import sys
class Test:
def __init__(
self,
p_username,
p_password,
p_host,
p_port,
p_virtualHost,
p_outgoingExchange,
p_incomingExchange
):
self.__outgoingExch = p_outgoingExchange
self.__incomingExch = p_incomingExchange
self.__headers = {'topic': 'test'}
self.__queueName = ''
self.__channelConsumer = None
self.__channelProducer = None
self.__isRun = False
l_credentials = pika.PlainCredentials(p_username, p_password)
l_parameters = pika.ConnectionParameters(
host=p_host,
port=p_port,
virtual_host=p_virtualHost,
credentials=l_credentials,
socket_timeout=30,
connection_attempts=5,
)
self.__connection = pika.SelectConnection(
parameters=l_parameters,
on_open_callback=self.__on_connection_open,
on_open_error_callback=self.__on_connection_open_error,
on_close_callback=self.__on_connection_closed
)
def __on_connection_open(self, _conn):
print("Connection opened")
self.__connection.channel(on_open_callback=self.__on_consume_channel_open)
self.__connection.channel(on_open_callback=self.__on_produce_channel_open)
def __on_connection_open_error(self, _conn, _exception):
print("Failed to open connection")
def __on_connection_closed(self, _conn, p_exception):
print("Connection closed: {}".format(p_exception))
def __on_consume_channel_open(self, p_ch):
print("Consumer channel opened")
self.__channelConsumer = p_ch
self.__channelConsumer.exchange_declare(
exchange=self.__incomingExch,
exchange_type="headers",
callback=self.__on_consume_exchange_declared
)
def __on_consume_exchange_declared(self, p_method):
print("Consumer exchange declared")
self.__channelConsumer.queue_declare(
queue='',
callback=self.__on_queue_declare
)
def __on_queue_declare(self, p_method):
print("Consumer queue declared")
self.__queueName = p_method.method.queue
self.__channelConsumer.queue_bind(
queue=self.__queueName,
exchange=self.__incomingExch,
arguments=self.__headers,
)
self.__channelConsumer.basic_consume(self.__queueName, self.__onMessageReceived)
def __on_produce_channel_open(self, p_ch):
print("Producer channel opened")
self.__channelProducer = p_ch
self.__channelProducer.exchange_declare(
exchange=self.__outgoingExch,
exchange_type="headers",
callback=self.__on_produce_exchange_declared
)
def __on_produce_exchange_declared(self, p_method):
print("Producer exchange declared")
l_publisher = threading.Thread(target=self.__publishProcedure)
l_publisher.start()
def __onMessageReceived(self, p_channel, p_method, p_properties, p_body):
p_channel.basic_ack(p_method.delivery_tag)
print("Message received: {}".format(p_body))
def __publishProcedure(self):
print("Start publishing")
l_msgCounter = 0
while self.__isRun:
l_msgCounter += 1
self.__publish(l_msgCounter)
time.sleep(0.1)
def __publish(self, p_msgCounter):
self.__channelProducer.basic_publish(
exchange=self.__outgoingExch,
routing_key="#",
body=str(p_msgCounter),
properties=pika.BasicProperties(headers=self.__headers)
)
def run(self):
self.__isRun = True
try:
self.__connection.ioloop.start()
except KeyboardInterrupt:
self.__isRun = False
self.__connection.close()
print("Exit...")
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Provide node name [node1 | node2]")
exit(-1)
l_outgoingExch = ''
l_incomingExch = ''
if sys.argv[1] == 'node1':
l_outgoingExch = 'node2.headers'
l_incomingExch = 'node1.headers'
elif sys.argv[1] == 'node2':
l_outgoingExch = 'node1.headers'
l_incomingExch = 'node2.headers'
else:
print("Wrong node name")
exit(-1)
l_testInstance = Test(
p_username='admin',
p_password='admin',
p_host='localhost',
p_port=5672,
p_virtualHost='/',
p_incomingExchange=l_incomingExch,
p_outgoingExchange=l_outgoingExch
)
l_testInstance.run()
I run two instances as two nodes (node1 and node2) so they should communicate with each other.
Also sometimes I have the issue described here:
Stream connection lost: AssertionError(('_AsyncTransportBase._produce() tx buffer size underflow', -275, 1),)
I found that I misused pika. As pika documentation states, it's not safe to share connection across multiple threads. The only way you can interact with connection from other threads is to use add_callback_threadsafe function. In my example it should look like this:
def __publishProcedure(self):
print("Start publishing")
l_msgCounter = 0
while self.__isRun:
l_msgCounter += 1
l_cb = functools.partial(self.__publish, l_msgCounter)
self.__connection.ioloop.add_callback_threadsafe(l_cb)
time.sleep(0.1)
def __publish(self, p_msgCounter):
self.__channelProducer.basic_publish(
exchange=self.__outgoingExch,
routing_key="#",
body=str(p_msgCounter),
properties=pika.BasicProperties(headers=self.__headers)
)

Exception in subscribe loop: Subscribe key not configured

Can someone help me figure out why I am getting this exception please? Even though I have the suscribe key configured, it seems like it is not.
Here is the exception: "Exception in subscribe loop: Subscribe key not configured
reconnection policy is disabled, please handle reconnection manually."
import time
from pubnub.pubnub import PubNub
from pubnub.pnconfiguration import PNConfiguration
from pubnub.callbacks import SubscribeCallback
from backend.blockchain.block import Block
pnconfig = PNConfiguration()
pnconfig.suscribe_key = 'sub-c-6d0fe192-dee4-11ea-9b19-...'
pnconfig.publish_key = 'pub-c-c3553c68-bf24-463c-ae43-...'
CHANNELS = {
'TEST': 'TEST',
'BLOCK': 'BLOCK'
}
class Listener(SubscribeCallback):
def __init__(self, blockchain):
self.blockchain = blockchain
def message(self, pubnub, message_object):
print('\n-- Channel: {message_object.channel} | Message: {message_object.message}')
if message_object.channel == CHANNELS['BLOCK']:
block = Block.from_json(message_object.message)
potential_chain = self.blockchain.chain[:]
potential_chain.append(block)
try:
self.blockchain.replace_chain(potential_chain)
print('\n -- Successfully replaced the local chain')
except Exception as e:
print('\n -- Did not replace chain: {e}')
class PubSub():
"""
Handles the publish/subscribe layer of the application.
Provides communication between the nodes of the blockchain network.
"""
def __init__(self, blockchain):
self.pubnub = PubNub(pnconfig)
self.pubnub.subscribe().channels(CHANNELS.values()).execute()
self.pubnub.add_listener(Listener(blockchain))
def publish(self, channel, message):
"""
Publish the message object to the channel.
"""
self.pubnub.publish().channel(channel).message(message).sync()
def broadcast_block(self, block):
"""
Broadcast a block object to all nodes.
"""
self.publish(CHANNELS['BLOCK'], block.to_json())
def main():
pubsub = PubSub()
time.sleep(1)
pubsub.publish(CHANNELS['TEST'], { 'foo': 'bar' })
if __name__ == '__main__':
main()
I wasn't able to get the error "Exception in subscribe loop: Subscribe key not configured reconnection policy is disabled, please handle reconnection manually.".
I used the code block:
pnconfig = PNConfiguration()
pnconfig.subscribe_key = "my_subkey"
pnconfig.publish_key = "my_pubkey"
pnconfig.ssl = True
pnconfig.uuid = "my_custom_uuid"
pnconfig.reconnect_policy = "LINEAR"
pubnub = PubNub(pnconfig)
from page: https://www.pubnub.com/docs/python/api-reference-configuration
and added:
PNReconnectionPolicy.NONE
from page: https://www.pubnub.com/docs/python/api-reference-configuration

python multiprocessing doesn't seem to be working with class

I am trying to implement zmq client wrapper class. Hope the comments written are useful to understand. Please see the __main__ for the different ways I tried.
import zmq
class Client(object):
"""
This is class for passing message using pipeline(push-pull) pattern of zmq.
"""
def __init__(self, pull_addr="tcp://127.0.0.1:5757",\
push_addr="tcp://127.0.0.1:5858"):
"""
The constructor
Parameters:
pull_addr (string) : Local endpoint for pushing messages and pulling from
push_addr (string) : Remote endpoint for pushing messages.
"""
super(Client, self).__init__()
print("Initialized...")
self.context = zmq.Context()
self.push_addr = push_addr
self.pull_addr = pull_addr
def producer(self, msg):
"""
Produces messages.
It's a wrapper and will receive the message which will be queued locally by
pushing to self.pull_addr.
Parameters:
msg (json) : Messages to be queued.
Returns:
None
"""
print("Production started.... ")
zmq_socket = self.context.socket(zmq.PUSH)
zmq_socket.bind(self.pull_addr)
zmq_socket.send_json(msg)
def consumer(self):
"""
Consumes messages produced by self.producer.
Reads messages queued by self.producer and pushes to the self.push_addr
where it assumes the collector would be expecting messages.
Parameters:
None
Returns:
None
"""
print("consumption is awaiting...")
receiver = self.context.socket(zmq.PULL)
receiver.connect(self.pull_addr)
sender = self.context.socket(zmq.PUSH)
sender.connect(self.push_addr)
while True:
msg = receiver.recv_json()
sender.send_json(msg)
if __name__ == '__main__':
import multiprocessing
# con_pool = multiprocessing.Pool()
# # pro_pool = multiprocessing.Pool()
zmq_client = Client()
# consumer = con_pool.apply_async(zmq_client.consumer, ())
# # producer = pro_pool.apply_async(zmq_client.producer,({"msg" : "Hello World!"}))
# print("DONE>>>>")
# from multiprocessing import Process
# consumer = Process(target=zmq_client.consumer)
# consumer.start()
# producer = Process(target=zmq_client.producer, args=({'msg' : "Hello World!"}))
# producer.start()
# producer.join()
# consumer.join()
pool = multiprocessing.Pool()
p = pool.apply_async(zmq_client.consumer)
p2 = pool.apply_async(zmq_client.producer, ({"msg" : "Hello World!"}))
Try 1
pool = multiprocessing.Pool()
p = pool.apply_async(zmq_client.consumer)
p2 = pool.apply_async(zmq_client.producer, ({"msg" : "Hello World!"}))
Output 1
calls the constructor and terminates.
Try 2
con_pool = multiprocessing.Pool()
pro_pool = multiprocessing.Pool()
zmq_client = Client()
consumer = con_pool.apply_async(zmq_client.consumer, ())
producer = pro_pool.apply_async(zmq_client.producer,({"msg" : "Hello World!"}))
Output 2
calls the constructor and terminates.
Try 3
from multiprocessing import Process
zmq_client = Client()
con = Process(target=zmq_client.consumer)
con.start()
pro = Process(target=zmq_client.producer, args=({'msg' : "Hello World!"}))
pro.start()
pro.join()
con.join()
Output 3
calls the constructor
calls the consumer
calls the producer
keeps hanging
Here I was expecting zmq server to receive the message. Any suggestions to get this class working as expected?

How to run code every x seconds inside while true - python

I need to execute code inside while loop every x seconds without stoping loop work
I have trying threading and lock combinations but it is still not working. I am working on python 3.7.4, pycharm 2019.2
#!/usr/bin/env python3
import configparser
import logging
import threading
import time
import ts3
__all__ = ["notify_bot"]
logging.basicConfig(filename='ts3bot.log',
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
)
logging.getLogger().addHandler(logging.StreamHandler())
def notify_bot(ts3conn, config, lock):
logging.info("Start Notify Bot ...")
lock.acquire()
ts3conn.exec_("servernotifyregister", event="server")
lock.release()
while True:
event = ts3conn.wait_for_event()
try:
reasonid_ = event[0]["reasonid"]
except KeyError:
continue
if reasonid_ == "0":
logging.info("User joined Lobby:")
logging.info(event[0])
servergroups = event[0]['client_servergroups']
guestname = event[0]['client_nickname']
lock.acquire()
if not set(servergroups):
print(f"s1 {guestname}")
else:
print(f"s2{guestname}")
lock.release()
return None
def keep_alive(ts3conn, lock):
while True:
logging.info("Send keep alive!")
lock.acquire()
ts3conn.send_keepalive()
lock.release()
time.sleep(5)
if __name__ == "__main__":
logging.info("Start TS Bot ...")
config = configparser.ConfigParser()
config.sections()
config.read("settings_test.ini")
logging.info("Config loaded!")
HOST = config['server']['url']
PORT = config['server']['query_port']
USER = config['server']['query_user']
PASS = config['server']['query_pw']
SID = config['server']['sid']
NAME = config['bot']['name']
logging.info("Connecting to query interface ...")
URI = f"telnet://{USER}:{PASS}#{HOST}:{PORT}"
try:
with ts3.query.TS3ServerConnection(URI) as ts3conn:
ts3conn.exec_("use", sid=SID)
ts3conn.query("clientupdate", client_nickname="x123d")
logging.info("Connected!")
lock = threading.Lock()
notify_thread = threading.Thread(target=notify_bot, args=(ts3conn, config, lock), daemon=True,
name="notify")
keep_alive_thread = threading.Thread(target=keep_alive, args=(ts3conn, lock), daemon=True,
name="keep_alive")
notify_thread.start()
keep_alive_thread.start()
keep_alive_thread.join()
notify_thread.join()
except KeyboardInterrupt:
logging.INFO(60 * "=")
logging.info("TS Bot terminated by user!")
logging.INFO(60 * "=")
After run work for 1 person who join server and do nothing, dont send keep alive and dont work at all
you can use Bibio TIME
You can check it from official python website (https://docs.python.org/3/library/time.html)
Personally, for simple things, I find the _thread library easier. Here's a function that you can run in a thread, and an example of starting that thread:
import _thread
def mythread(arg1):
while True:
time.sleep(arg1)
do.whatever()
_thread.start_new_thread(mythread, (5,))
The important thing to note is the second argument I passed to the _thread.start_new_thread function. It must be a tuple, which is why there is a comma after the 5. Even if your function doesn't require any arguments, you have to pass a tuple.
I am using time module and threading,
I'v made some changes and it seems to work
#!/usr/bin/env python3
import configparser
import logging
import threading
import time
import ts3
logging.basicConfig(filename='ts3bot.log',
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
)
logging.getLogger().addHandler(logging.StreamHandler())
def notify_bot(ts3conn):
logging.info("Start Notify Bot ...")
ts3conn.exec_("servernotifyregister", event="server")
while True:
event = ts3conn.wait_for_event()
try:
reasonid_ = event[0]["reasonid"]
except KeyError:
continue
if reasonid_ == "0":
logging.info("User joined Lobby:")
logging.info(event[0])
servergroups = event[0]['client_servergroups']
guestname = event[0]['client_nickname']
if not set(servergroups):
print(f"s1 {guestname}")
else:
print(f"s2{guestname}")
return None
def keep_alive(ts3conn, time):
while True:
logging.info("Send keep alive!")
ts3conn.send_keepalive()
time.sleep(20)
if __name__ == "__main__":
logging.info("Start TS Bot ...")
config = configparser.ConfigParser()
config.sections()
config.read("settings_test.ini")
logging.info("Config loaded!")
HOST = config['server']['url']
PORT = config['server']['query_port']
USER = config['server']['query_user']
PASS = config['server']['query_pw']
SID = config['server']['sid']
NAME = config['bot']['name']
logging.info("Connecting to query interface ...")
URI = f"telnet://{USER}:{PASS}#{HOST}:{PORT}"
try:
with ts3.query.TS3ServerConnection(URI) as ts3conn:
ts3conn.exec_("use", sid=SID)
ts3conn.query("clientupdate", client_nickname="x123d")
logging.info("Connected!")
notify_thread = threading.Thread(target=notify_bot, args=(ts3conn,), daemon=True,
name="notify")
keep_alive_thread = threading.Thread(target=keep_alive, args=(ts3conn, time), daemon=True,
name="keep_alive")
notify_thread.start()
keep_alive_thread.start()
keep_alive_thread.join()
notify_thread.join()
except KeyboardInterrupt:
logging.INFO(60 * "=")
logging.info("TS Bot terminated by user!")
logging.INFO(60 * "=")
It looks like ts3conn.send_keepalive() making error, when I delete it, code work fine, when I'v add it, code stop working after send ts3conn.send_keepalive() once

I/O intensive serial port application: Porting from Threading, Queue based design to Asynchronous (ala Twisted)

So, I've been working on an application for a client that communicates with wireless devices via a Serial (RS-232) "Master". I've currently written the core of the app using threading (below). I've been noticing on #python that the consensus seems to be to NOT use threads and to use Twisted's asynchronous communication abilities.
I haven't been able to find any good examples of using twisted for serial port async I/O communication. However, I have found Dave Peticolas' 'Twisted Introduction' (thanks nosklo) that I'm currently working through, but, it uses sockets instead of serial communication (but the async concept is definitely very well explained).
How would I go about porting this app over to Twisted from using Threading, Queues? Are there any advantages/disadvantages (I have noticed that, on occasion, if a thread hangs it will BSOD the system)?
The Code (msg_poller.py)
from livedatafeed import LiveDataFeed
from msg_build import build_message_to_send
from utils import get_item_from_queue
from protocol_wrapper import ProtocolWrapper, ProtocolStatus
from crc16 import *
import time
import Queue
import threading
import serial
import gc
gc.enable()
PROTOCOL_HEADER = '\x01'
PROTOCOL_FOOTER = '\x0D\x0A'
PROTOCOL_DLE = '\x90'
INITIAL_MODBUS = 0xFFFF
class Poller:
"""
Connects to the serial port and polls nodes for data.
Reads response from node(s) and loads that data into queue.
Parses qdata and writes that data to database.
"""
def __init__(self,
port,
baudrate,
parity,
rtscts,
xonxoff,
echo=False):
try:
self.serial = serial.serial_for_url(port,
baudrate,
parity=parity,
rtscts=rtscts,
xonxoff=xonxoff,
timeout=.01)
except AttributeError:
self.serial = serial.Serial(port,
baudrate,
parity=parity,
rtscts=rtscts,
xonxoff=xonxoff,
timeout=.01)
self.com_data_q = None
self.com_error_q = None
self.livefeed = LiveDataFeed()
self.timer = time.time()
self.dtr_state = True
self.rts_state = True
self.break_state = False
def start(self):
self.data_q = Queue.Queue()
self.error_q = Queue.Queue()
com_error = get_item_from_queue(self.error_q)
if com_error is not None:
print 'Error %s' % (com_error)
self.timer = time.time()
self.alive = True
# start monitor thread
#
self.mon_thread = threading.Thread(target=self.reader)
self.mon_thread.setDaemon(1)
self.mon_thread.start()
# start sending thread
#
self.trans_thread = threading.Thread(target=self.writer)
self.trans_thread.setDaemon(1)
self.trans_thread.start()
def stop(self):
try:
self.alive = False
self.serial.close()
except (KeyboardInterrupt, SystemExit):
self.alive = False
def reader(self):
"""
Reads data from the serial port using self.mon_thread.
Displays that data on the screen.
"""
from rmsg_format import message_crc, message_format
while self.alive:
try:
while self.serial.inWaiting() != 0:
# Read node data from the serial port. Data should be 96B.
data = self.serial.read(96)
data += self.serial.read(self.serial.inWaiting())
if len(data) > 0:
# Put data in to the data_q object
self.data_q.put(data)
if len(data) == 96:
msg = self.data_q.get()
pw = ProtocolWrapper(
header=PROTOCOL_HEADER,
footer=PROTOCOL_FOOTER,
dle=PROTOCOL_DLE)
status = map(pw.input, msg)
if status[-1] == ProtocolStatus.IN_MSG:
# Feed all the bytes of 'msg' sequentially into pw.input
# Parse the received CRC into a 16-bit integer
rec_crc = message_crc.parse(msg[-4:]).crc
# Compute the CRC on the message
calc_crc = calcString(msg[:-4], INITIAL_MODBUS)
from datetime import datetime
ts = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
if rec_crc != calc_crc:
print ts
print 'ERROR: CRC Mismatch'
print msg.encode('hex')
else:
#msg = message_format.parse(msg[1:])
#print msg.encode('hex') + "\r\n"
msg = message_format.parse(msg[1:])
print msg
#return msg
gc.collect()
time.sleep(.2)
except (KeyboardInterrupt, SystemExit, Exception, TypeError):
self.alive = False
self.serial.close()
raise
def writer(self):
"""
Builds the packet to poll each node for data.
Writes that data to the serial port using self.trans_thread
"""
import time
try:
while self.alive:
try:
dest_module_code = ['DRILLRIG',
'POWERPLANT',
'GENSET',
'MUDPUMP']
dest_ser_no = lambda x: x + 1
for code in dest_module_code:
if code != 'POWERPLANT':
msg = build_message_to_send(
data_len=0x10,
dest_module_code='%s' % (code),
dest_ser_no=dest_ser_no(0),
dest_customer_code='*****',
ret_ser_no=0x01,
ret_module_code='DOGHOUSE',
ret_customer_code='*****',
command='POLL_NODE',
data=[])
self.serial.write(msg)
time.sleep(.2)
gc.collect()
elif code == 'POWERPLANT':
msg = build_message_to_send(
data_len=0x10,
dest_module_code='POWERPLANT',
dest_ser_no=dest_ser_no(0),
dest_customer_code='*****',
ret_ser_no=0x01,
ret_module_code='DOGHOUSE',
ret_customer_code='*****',
command='POLL_NODE',
data=[])
self.serial.write(msg)
time.sleep(.2)
gc.collect()
msg = build_message_to_send(
data_len=0x10,
dest_module_code='POWERPLANT',
dest_ser_no=dest_ser_no(1),
dest_customer_code='*****',
ret_ser_no=0x01,
ret_module_code='DOGHOUSE',
ret_customer_code='*****',
command='POLL_NODE',
data=[])
self.serial.write(msg)
time.sleep(.2)
gc.collect()
except (KeyboardInterrupt, SystemExit):
self.alive = False
self.serial.close()
raise
except (KeyboardInterrupt, SystemExit):
self.alive = False
self.serial.close()
raise
def main():
poller = Poller(
port='COM4',
baudrate=115200,
parity=serial.PARITY_NONE,
rtscts=0,
xonxoff=0,
)
poller.start()
poller.reader()
poller.writer()
poller.stop()
if __name__ == '__main__':
main()
It is very difficult (if not impossible) to write a direct one-to-one mapping program between threading/queue approach and one that uses twisted.
I would suggest that, get a hang of twisted and its reactor way it's use of Protocol and the protocol specific methods. Think about it as as all the asynchronous things that you had been explicitly coding using threads and queues are given to you for free when you are using deferred using twisted.
twisted does seem to support SerialPort over it's reactor using SerialPort transport class and the basic structure seems to be somewhat like this.
from twisted.internet import reactor
from twisted.internet.serialport import SerialPort
SerialPort(YourProtocolClass(), Port, reactor, baudrate=baudrate))
reactor.run()
In YourProtocolClass() would you handle the various events that are specific to your Serial Port Communication requirements. The doc/core/examples directory contains examples such as gpsfix.py and mouse.py.

Categories

Resources