I'm trying to make a script to read holding registers from modbus TCP devices and insert the UINT32 decoded into MySQL :
Read 2 registers with pymodbus
Decode into uint32
Insert decoded values into mySQL
Start again every x minutes
I'm not a programmer and discovering Python.
I tried this code which seems to work but I do not understand everything, I do not know if it is the right solution to use "threading". Do you have something better to offer me?
Thank you
import time
import pymysql.cursors
from pymodbus.constants import Endian
from pymodbus.payload import BinaryPayloadDecoder
from threading import Thread, Lock
from pyModbusTCP.client import ModbusClient
# connect database
#sqlconnection = pymysql.connect(host='127.0.0.1',
# user='root',
# password='',
# database='database',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
# read devices informations (ID, IP, Port, Register address)
#with sqlconnection:
# with sqlconnection.cursor() as cursor:
# sql = "SELECT `dev_id`, H0.hw_address, H0.hw_port FROM `devicestatus` LEFT JOIN `hardware` H0 ON H0.hw_id = devicestatus.dev_hw_id WHERE H0.hw_enabled = 1 AND devicestatus.dev_used = 1 ORDER BY H0.hw_address, dev_name"
# cursor.execute(sql,)
# result = cursor.fetchall()
# sample of result
result = [{'dev_id': 7, 'hw_address': '127.0.0.1', 'hw_port': 502}, {'dev_id': 8, 'hw_address': '127.0.0.1', 'hw_port': 502}, {'dev_id': 9, 'hw_address': '127.0.0.1', 'hw_port': 502}]
# set global variables
regs = []
sf_value = 0
# init a thread lock
regs_lock = Lock()
# modbus polling thread
def polling_thread():
global regs
# polling loop
while True:
REGISTER_ADDRESS = 98 #First device is at address %MD100 (in Schneider M221 simulator), i dont' understand why i need to do -2
# For each dictionary of the list, "extract" each value of element : information useful to launch Modbus requests, one per device.
for val in result:
# set modbusTCP variables :
DEV_ID = val['dev_id']
SERVER_HOST = val['hw_address']
SERVER_PORT = val['hw_port']
SERVER_ID = 1
REGISTER_ADDRESS = REGISTER_ADDRESS + 2 # to read %MD100 %MD102 %MD104 ...
REGISTER_COUNT = 2
print("Target :", SERVER_HOST, SERVER_PORT, REGISTER_ADDRESS)
client = ModbusClient(host=SERVER_HOST, port=int(SERVER_PORT), unit_id=int(SERVER_ID), auto_open=True, auto_close=True, timeout=2)
# keep TCP open
if not client.is_open():
client.open()
# do modbus reading on socket
reg_read = client.read_holding_registers(REGISTER_ADDRESS, REGISTER_COUNT)
# if read is ok, store result in regs (with thread lock synchronization)
if reg_read:
# decode the 2 registers into a 32 bit integer
decoder = BinaryPayloadDecoder.fromRegisters(reg_read, byteorder=Endian.Big, wordorder=Endian.Little)
sf_value = decoder.decode_32bit_int()
with regs_lock:
regs = sf_value
print(regs)
# To do : insert each data into sql table
#with sqlconnection:
#with sqlconnection.cursor() as cursor:
# Create a new record
#sql = "INSERT INTO ...
# x sec before next polling
time.sleep(2)
# start polling thread
tp = Thread(target=polling_thread)
# set daemon: polling thread will exit if main thread exit
tp.daemon = True
tp.start()
# display loop (in main thread)
while True:
# print regs (with thread lock synchronization)
with regs_lock:
#print(regs)
print("What is this part ? everything is done in the polling_thread")
# x sec before next print
time.sleep(2)
Related
The bounty expires in 5 days. Answers to this question are eligible for a +50 reputation bounty.
Haley Mueller wants to draw more attention to this question.
I'm new to Python so this could be a simple fix.
I am using Flask and sockets for this Python project. I am starting the socket on another thread so I can actively listen for new messages. I have an array variable called 'SocketConnections' that is within my UdpComms class. The variable gets a new 'Connection' appended to it when a new socket connection is made. That works correctly. My issue is that when I try to read 'SocketConnections' from outside of the thread looking in, it is an empty array.
server.py
from flask import Flask, jsonify
import UdpComms as U
app = Flask(__name__)
#app.route('/api/talk', methods=['POST'])
def talk():
global global_server_socket
apples = global_server_socket.SocketConnections
return jsonify(message=apples)
global_server_socket = None
def start_server():
global global_server_socket
sock = U.UdpComms(udpIP="127.0.0.1", portTX=8000, portRX=8001, enableRX=True, suppressWarnings=True)
i = 0
global_server_socket = sock
while True:
i += 1
data = sock.ReadReceivedData() # read data
if data != None: # if NEW data has been received since last ReadReceivedData function call
print(data) # print new received data
time.sleep(1)
if __name__ == '__main__':
server_thread = threading.Thread(target=start_server)
server_thread.start()
app.run(debug=True,host='192.168.0.25')
UdpComms.py
import json
import uuid
class UdpComms():
def __init__(self,udpIP,portTX,portRX,enableRX=False,suppressWarnings=True):
self.SocketConnections = []
import socket
self.udpIP = udpIP
self.udpSendPort = portTX
self.udpRcvPort = portRX
self.enableRX = enableRX
self.suppressWarnings = suppressWarnings # when true warnings are suppressed
self.isDataReceived = False
self.dataRX = None
# Connect via UDP
self.udpSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # internet protocol, udp (DGRAM) socket
self.udpSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # allows the address/port to be reused immediately instead of it being stuck in the TIME_WAIT state waiting for late packets to arrive.
self.udpSock.bind((udpIP, portRX))
# Create Receiving thread if required
if enableRX:
import threading
self.rxThread = threading.Thread(target=self.ReadUdpThreadFunc, daemon=True)
self.rxThread.start()
def __del__(self):
self.CloseSocket()
def CloseSocket(self):
# Function to close socket
self.udpSock.close()
def SendData(self, strToSend):
# Use this function to send string to C#
self.udpSock.sendto(bytes(strToSend,'utf-8'), (self.udpIP, self.udpSendPort))
def SendDataAddress(self, strToSend, guid):
# Use this function to send string to C#
print('finding connection: ' + guid)
if self.SocketConnections:
connection = self.GetConnectionByGUID(guid)
print('found connection: ' + guid)
if connection is not None:
self.udpSock.sendto(bytes(strToSend,'utf-8'), connection.Address)
def ReceiveData(self):
if not self.enableRX: # if RX is not enabled, raise error
raise ValueError("Attempting to receive data without enabling this setting. Ensure this is enabled from the constructor")
data = None
try:
data, _ = self.udpSock.recvfrom(1024)
print('Socket data recieved from: ', _)
if self.IsNewConnection(_) == True:
print('New socket')
self.SendDataAddress("INIT:" + self.SocketConnections[-1].GUID, self.SocketConnections[-1].GUID)
data = data.decode('utf-8')
except WindowsError as e:
if e.winerror == 10054: # An error occurs if you try to receive before connecting to other application
if not self.suppressWarnings:
print("Are You connected to the other application? Connect to it!")
else:
pass
else:
raise ValueError("Unexpected Error. Are you sure that the received data can be converted to a string")
return data
def ReadUdpThreadFunc(self): # Should be called from thread
self.isDataReceived = False # Initially nothing received
while True:
data = self.ReceiveData() # Blocks (in thread) until data is returned (OR MAYBE UNTIL SOME TIMEOUT AS WELL)
self.dataRX = data # Populate AFTER new data is received
self.isDataReceived = True
# When it reaches here, data received is available
def ReadReceivedData(self):
data = None
if self.isDataReceived: # if data has been received
self.isDataReceived = False
data = self.dataRX
self.dataRX = None # Empty receive buffer
if data != None and data.startswith('DIALOG:'): #send it info
split = data.split(':')[1]
return data
class Connection:
def __init__(self, gUID, address) -> None:
self.GUID = gUID
self.Address = address
def IsNewConnection(self, address):
for connection in self.SocketConnections:
if connection.Address == address:
return False
print('Appending new connection...')
connection = self.Connection(str(uuid.uuid4()),address)
self.SocketConnections.append(connection)
return True
def GetConnectionByGUID(self, guid):
for connection in self.SocketConnections:
if connection.GUID == guid:
return connection
return None
As mentioned above. When IsNewConnection() is called in UdpComms it does append a new object to SocketConnections. It's just trying to view the SocketConnections in the app.route that is empty. My plans are to be able to send socket messages from the app.routes
For interprocess communication you may try to use something like shared memory documented here
Instead of declaring your self.SocketConnections as a list = []
you'd use self.SocketConnections = Array('i', range(10)) (you are then limited to remembering only 10 connections though).
I'm getting threading errors when I try to use or create a db cursor in my process_id function. Each thread will have to use the database to process data for their passed id.
I can't utilize a cursor in the thread/process_id at all(I get threading errors and the DB never updates)...I've coded it a lot of different ways. The code works when I don't use threads.
I have very specific requirements for how this code is to be written, slow and stable is fine. I also cut out a lot of error handling/logging before posting. Daemon/Infinite loop is required.
How do I spin up a new cursor in each thread?
import threading
import time
from datetime import datetime
import os
import jaydebeapi, sys
#Enter the values for you database connection
database = "REMOVED"
hostname = "REMOVED"
port = "REMOVED"
uid = "REMOVED"
pwd = "REMOVED"
connection_string='jdbc:db2://'+hostname+':'+port+'/'+database
if (sys.version_info >= (3,0)):
conn = jaydebeapi.connect("com.ibm.db2.jcc.DB2Driver", connection_string, [uid, pwd], jars="REMOVED")
else:
conn = jaydebeapi.connect("com.ibm.db2.jcc.DB2Driver", [connection_string, uid, pwd])
# Thread Pool Variables
max_threads = 5
used_threads = 0
# define main cursor
cus=conn.cursor()
def process_id(id):
#create a cursor for a thread
cus_id=conn.cursor()
cus_id.execute("SOME QUERY;")
cus_id.close()
global used_threads
used_threads = used_threads - 1
return 0
def daemon():
global num_threads, used_threads
print("Daemon running...")
while True:
#ids to process are loaded into a list...
for id in ids_to_process:
if used_threads < max_threads:
t = threading.Thread(target=process_id, args=(int(id),))
t.start()
used_threads += 1
return 0
daemon()
***Python code:***
import serial
import pandas as pd
import time
import re
import xlrd
from msvcrt import getch
import numpy as np
i = 0
x = 0
y = 0
df = pd.read_excel(r'C:\Users\lynchfamily\Desktop\mlglovesdata.xls')
# Read COM9
# Read from COM10 as well
# Readline() only works with a timeout (IMPORTANT)
serHC = serial.Serial('COM9', 115200,timeout=.250,parity=serial.PARITY_NONE,rtscts=1) # This is the JY
serRN = serial.Serial('COM10', 115200,timeout=.250,parity=serial.PARITY_NONE,rtscts=1) # This is the silvermate
def serialin():
# Sensor lists
sensor_names = list()
sensor_values = list()
global i
# Read a certain amount of bytes from serial and then continue
# Regular expressions for finding the proper data
while i < 6:
# print(i) for debugging
global serHC
global serRN
#searchObj = re.search(r'(A\d?\d*)?(\d*)?',serHC.read(4).decode(),re.I)
#searchObjRN = re.search(r'(A\d?\d*)?(\d*)?',serRN.read(4).decode(),re.I)
# Serial data stops while in loop
# The if statements keep the false values out of the program
#if searchObj.group(1):
sensor_names.append(serHC.read(2))
#if searchObj.group(2):
sensor_values.append(serHC.read(2))
#if searchObjRN.group(1):
sensor_names.append(serRN.read(2))
#if searchObjRN.group(2):
sensor_values.append(serRN.read(2))
i = i + 1
while 1:
# Get the key from the msvcrt module
key = getch().decode('ASCII')
# If key is pressed, do something
if key:
print(key)
# Zip them together
# Final 2D list
final_2d_list = zip(sensor_names,sensor_values)
print(list(sorted(final_2d_list)))
#vals = df.Dataframe([
#df.append(vals)
#print(sorted_array_1stdim[r])
#sensor_values = [0] * 10
# Thread for reading definition
break
# Fancy recursion
sensor_values.clear()
sensor_names.clear()
i = 0
serialin()
serialin()
Arduino Code:
// The device with green colored wires
void setup() {
Serial.begin(115200);
}
void loop() {
// It won't work with the I2C while loop for some reason. Perhaps it is getting stuck up on it
Serial.print("A4");
Serial.print(analogRead(0)); // Read the local analog signal
delay(5);
Serial.print("A5");
Serial.print(analogRead(1)); // Read the local analog signal
delay(5);
Serial.print("A6");
Serial.print(analogRead(2)); // Read the local analog signal
delay(5);
Serial.print("A7");
Serial.print(analogRead(3)); // Read the local analog signal
}
I'm trying to send analog data from sensors over through bluetooth silver mate from sparkfun, and HC-06 modules to python.
I have to read the analog data at a delay of 5 seconds between each, so that the readings aren't conflicted.
The data comes through serial ports COM9 and COM10. I know that serial in python can be blocking, that's why I attempted to read it first, and then put it in a list.
I also knows that once serial has been read through, it appears to be non-blocking. When I was using serHC.readline() and serRN.readline(), I was getting something like what I'd expect to see.
However, the data in the list were not updating according to the change in the sensors. I have to admit python is not my main programming language, so that is why I'm asking for help.
I thought maybe using multiple threads might work, but I wasn't able to get the serHC and serRN variables in the main thread.
Any help will be appreciated!!
As you have discovered it is not possible to read sequentially from serial ports: a blocking read over one port implies a loss of data simultaneous sent over the other port.
Use a thread based approach.
The following sketch should be enough to get started:
import serial
import time
import re
import threading
BYTES_TO_READ = 6
# read from serial port
def read_from_serial(board, port):
print("reading from {}: port {}".format(board, port))
payload = b''
ser = serial.Serial(port, 115200,timeout=.250, parity=serial.PARITY_NONE, rtscts=1)
bytes_count = 0
while bytes_count < BYTES_TO_READ:
read_bytes = ser.read(2)
# sum number of bytes returned (not 2), you have set the timeout on serial port
# see https://pythonhosted.org/pyserial/pyserial_api.html#serial.Serial.read
bytes_count = bytes_count + len(read_bytes)
payload = payload + read_bytes
# here you have the bytes, do your logic
# ...
print("READ from {}: [{}]".format(board, payload))
return
def main():
board = {
'JY': 'COM9',
'SILVER': 'COM10'
}
threads = []
for b in board:
t = threading.Thread(target=read_from_serial, args=(b, board[b],))
threads.append(t)
t.start()
# wait for all threads termination
for t in threads:
t.join()
main()
For learning about threading: https://pymotw.com/3/threading/
Periodic read from serials
Below a sketch for reading each TIME_PERIOD seconds.
A parte the infinite while loop around the read there is a "thread" loop with a nested try/catch block
for catching serials communication problems and retrying to connect after TIME_PERIOD.
Take it just as a starting example!
import serial
import time
import re
import threading
BYTES_TO_READ = 6
TIME_PERIOD = 5
def read_message(board, port, handle):
payload = b''
bytes_count = 0
while bytes_count < BYTES_TO_READ:
read_bytes = handle.read(2)
bytes_count = bytes_count + len(read_bytes)
payload = payload + read_bytes
# here you have the bytes, do your logic
# ...
print("READ from {}: [{}]".format(board, payload))
def serial_thread(board, port):
print("reading from {}: port {}".format(board, port))
while True:
try:
handle = serial.Serial(port, 115200,timeout=.250, parity=serial.PARITY_NONE, rtscts=1)
while True:
read_message(board, port, handle)
time.sleep(TIME_PERIOD)
except Exception as e:
print("ERROR: {}".format(e))
print("retrying in {} seconds".format(TIME_PERIOD))
handle.close()
time.sleep(TIME_PERIOD)
def main():
board = {
'JY': '/dev/ttyUSB0',
'SILVER': '/dev/ttyACM0'
}
threads = []
for b in board:
t = threading.Thread(target=serial_thread, args=(b, board[b],))
threads.append(t)
t.start()
# wait for all threads termination
for t in threads:
t.join()
main()
I have a ROUTER whose purpose is to accumulate image data from multiple DEALER clients and perform OCR on the complete image. I found that the most efficient way of handling the OCR is through the utilization of Python's multiprocessing library; the accumulated image bytes are put into a Queue for due procession in a separate Process. However, I need to ensure that when a client experiences a timeout that the Process is properly terminated and doesn't meaninglessly linger and hog resources.
In my current solution I insert each newly-connected client into a dict where the value is my ClientHandler class that possesses all image data and spawns a Thread that sets a boolean variable named "timeout" to True when 5 seconds have elapsed. Should a new message be received within that 5 second frame, bump is called & the timer is reset back to 0, otherwise I cleanup prior to thread termination and the reference is deleted from the dict in the main loop:
import threading
import time
import zmq
class ClientHandler(threading.Thread):
def __init__(self, socket):
self.elapsed = time.time()
self.timeout = False
self.socket = socket
super(ClientHandler, self).__init__()
def run(self):
while time.time() - self.elapsed < 5.0:
pass
self.timeout = True
# CLIENT TIMED OUT
# HANDLE TERMINATION AND CLEAN UP HERE
def bump(self):
self.elapsed = time.time()
def handle(self, id, header, data):
# HANDLE CLIENT DATA HERE
# ACCUMULATE IMAGE BYTES, ETC
self.socket.send_multipart([id, str(0)])
def server_task():
clients = dict()
context = zmq.Context.instance()
server = context.socket(zmq.ROUTER)
server.setsockopt(zmq.RCVTIMEO, 0)
server.bind("tcp://127.0.0.1:7777")
while True:
try:
id, header, data = server.recv_multipart()
client = clients.get(id)
if client == None:
client = clients[id] = ClientHandler(server)
client.start()
client.bump()
client.handle(id, header, data)
except zmq.Again:
for id in clients.keys():
if clients[id].timeout:
del clients[id]
context.term()
if __name__ == "__main__":
server_task()
But this entire method just doesn't feel right. Am I going about this improperly? If so, I would greatly appreciate if someone could point me in the right direction.
Figured it out myself, hoping it may be of assistance to others.
I instead have a ROUTER on an assigned port that distributes unique ports to each client, which thereafter connects to the newly-bound socket on said unique port. When a client disconnects, the port is recycled for reassignment.
import sys
import zmq
from multiprocessing import Process, Queue, Value
def server_task():
context = zmq.Context.instance()
server = context.socket(zmq.ROUTER)
server.bind("tcp://127.0.0.1:7777")
timeout_queue = Queue()
port_list = [ 1 ]
proc_list = [ ]
while True:
try:
id = server.recv_multipart()[0]
# Get an unused port from the list
# Ports from clients that have timed out are recycled here
while not timeout_queue.empty():
port_list.append(timeout_queue.get())
port = port_list.pop()
if len(port_list) == 0:
port_list.append(port + 1)
# Spawn a new worker task, binding the port to a socket
proc_running = Value("b", True)
proc_list.append(proc_running)
Process(target=worker_task, args=(proc_running, port, timeout_queue)).start()
# Send the new port to the client
server.send_multipart([id, str(7777 + port)])
except KeyboardInterrupt:
break
# Safely allow our worker processes to terminate
for proc_running in proc_list:
proc_running.value = False
context.term()
def worker_task(proc_running, port, timeout_queue):
context = zmq.Context.instance()
worker = context.socket(zmq.ROUTER)
worker.setsockopt(zmq.RCVTIMEO, 5000)
worker.bind("tcp://127.0.0.1:%d" % (7777 + port, ))
while proc_running.value:
try:
id, data = worker.recv_multipart()
worker.send_multipart([id, data])
except zmq.Again:
timeout_queue.put(port)
context.term()
break
print("Client on port %d disconnected" % (7777 + port, ))
my script is a server that listens to clients requests and send responses. It handles requests by threading:
class Server:
def __init__(self):
self.host = ''
self.port = 50000
self.backlog = 5
self.size = 1024
self.server = None
self.threads = []
def open_socket(self):
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.host,self.port))
self.server.listen(5)
except socket.error, (value,message):
if self.server:
self.server.close()
print "Could not open socket: " + message
sys.exit(1)
def run(self):
self.open_socket()
input = [self.server,sys.stdin]
running = 1
while running:
inputready,outputready,exceptready = select.select(input,[],[])
for s in inputready:
if s == self.server:
# handle the server socket
c = Client(self.server.accept())
c.start()
self.threads.append(c)
elif s == sys.stdin:
# handle standard input
junk = sys.stdin.readline()
running = 0
# close all threads
self.server.close()
for c in self.threads:
c.join()
class Client(threading.Thread):
def __init__(self,(client,address)):
threading.Thread.__init__(self)
self.client = client
self.address = address
self.size = 1024
def run(self):
running = 1
while running:
data = self.client.recv(self.size)
if data:
data2 = data.split()
if data2[0] == 'Hello':
status = 'Hello'
#fetch from database users by location
reply= '6'
if data2[0] == 'Index':
status = 'Index'
#fetch from database users by location
reply='I'
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="Rambo_9134", # your password
db="secure_login") # name of the data base
# you must create a Cursor object. It will let
# you execute all the queries you need
cur = db.cursor()
# Use all the SQL you like
cur.execute("SELECT ml.member,m.username FROM locations l JOIN memberlocation ml ON(l.id = ml.location) JOIN members m ON(m.id = ml.member) where l.id = 1;")
# print all the first cell of all the rows
data = []
for row in cur.fetchall() :
print row[1]
data.append({row[0]:row[1]})
print 'JSON', json.dumps(data)
reply = data
self.client.send(json.dumps(reply))
else:
self.client.close()
running = 0
if __name__ == "__main__":
s = Server()
s.run()
this script runs perfectly but it stops when i press enter. I have tried many alternatives: deamon, nohup, ... i couldn't make it run as a service in the background. i think this is a programming issue
how can i make this script run in the background as a service ?
For a quick and easy way in a test/dev environment you can use screen.
screen -S mySessionName
This starts a new screen session with the name mySessionName and attaches to that session. Inside this session you can now run your code.
Use Ctrl+A, D to detach from that session. Your code will continue to run.
To reattach to that session use:
screen -r mySessionName
To show all sessions use:
screen -ls
In a production environment however you should be looking at supervisor. This serverfault question might help.
Make a PHP or HTML script devoted solely to running that python program. Then, run that PHP/HTML script on the server and you're good :).