I wanted to develop an app with Python that would run on a Raspberry and be remote controlled.
This app should control (switch on and off) 2 leds based on the current day, time and the user's indication.
The app is to be executed when the Raspberry is booted (auto start) and should be run endlessly. For this purpose, a service is created under /usr/lib/systemd/system. The postgres (Heroku) databases were used for this purpose.
I have read several articles and used several suggestions, but unfortunately without much success. The problem with my app is that it crashes every 24 hours with the following error:
Light_system Error 1: SSL SYSCALL error: Connection timed out
Light_system Error 1: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The raspberry is connected to the internet through a sim module (SIM7600E -H 4G HAT). But this is not supposed to be the problem, because I also connect the raspberry to the direct router, but get the same error again.
I am very grateful for any suggestion!
I have already tried the following steps:
keepalive_kwargs = {
"keepalives": 1,
"keepalives_idle": 30,
"keepalives_interval": 5,
"keepalives_count": 5,
}
conn = psycopg2.connect(conn_string, **keepalive_kwargs)
and
engine = sqlalchemy.create_engine(DATABASE_URL, pool_pre_ping=True)
Here is my code:
crud.py
keepalive_kwargs = {
"keepalives": 1,
"keepalives_idle": 30,
"keepalives_interval": 5,
"keepalives_count": 5,
}
conn_string = "host, dbname, user, password"
conn = psycopg2.connect(conn_string, **keepalive_kwargs)
class CRUDSettings:
#staticmethod
def read_from_db():
try:
cur = conn.cursor()
cur.execute("SELECT * FROM time_settings_tabel")
data = cur.fetchall()
return data
except Exception as e:
with open("read_from_db.log", "a") as f:
f.write(f"ERROR : {str(e)}")
database.py
DATABASE_URL = "URL for DB"
database = databases.Database(DATABASE_URL)
engine = sqlalchemy.create_engine(DATABASE_URL, pool_pre_ping=True)
SessionLocal = sessionmaker(autocommit=False, autoflush = False, bind = engine)
Base = declarative_base()
metadata = sqlalchemy.MetaData()
Base.metadata.create_all(bind=engine)
def get_db():
Base.metadata.create_all(bind=engine)
database = engine.connect()
db = SessionLocal()
try:
yield db
finally:
db.close()
app.py
class System:
async def run(self, app):
try:
while(app.state.isRunning):
for row in crud.settings.read_from_db():
now = datetime.now()
current_time = convert(now.hour, now.minute,now.second)
current_day= now.strftime("%A")
start_split = row[2].split(":")
end_split = row[3].split(":")
start = convert(int(start_split[0]), int(start_split[1]), int(start_split[2]))
end = convert(int(end_split[0]), int(end_split[1]), int(end_split[2]))
current_id = row[0]
GPIO.output(20,GPIO.LOW)
GPIO.output(21,GPIO.LOW)
while ((current_day == row[1].replace(" ","")) and (current_time >= start) and (current_time <= end) and pumpe and (current_id == row[0])):
print("START")
GPIO.output(20,GPIO.HIGH)
GPIO.output(21,GPIO.HIGH)
now = datetime.now()
current_time = convert(now.hour, now.minute,now.second)
if (current_day == row[1].replace(" ","") and current_time >= end):
print("END")
GPIO.output(20,GPIO.LOW)
GPIO.output(21,GPIO.LOW)
break
await asyncio.sleep(1)
except Exception as e:
with open("watersystem.log", "a") as f:
f.write("ERROR : %s", str(e))
system = System()
main.py
#app.on_event("startup")
async def startup_event():
db = SessionLocal()
app.state.isRunning = True
for row in get_db():
app.state.settings = row
asyncio.create_task(system.run(app))
Related
This code is a DNS resolver that check from a DB for an entry not older than 5 minutes.
#!/usr/bin/python3
from MySQLdb import _mysql as MySQL
from dnslib import RR, QTYPE, RCODE, A
from dnslib.label import DNSLabel
from dnslib.server import DNSServer, BaseResolver
from time import sleep, time
class MariaResolver(BaseResolver):
DELTA = 300
def __init__(self):
self.password = "********************"
def resolve(self, request, handler):
reply = request.reply()
qname = request.q.qname
fqdn = str(request.q.qname)
try:
if fqdn.find("iut-") == -1:
reply.header.rcode = RCODE.REFUSED
else:
hostname = fqdn.split(".")[0]
timestamp = int(time()) - self.DELTA
query = "SELECT ip FROM dns WHERE record='{}' AND timestamp>{}"
db = MySQL.connect("localhost", "dns", self.password, "salles")
db.query(query.format(hostname, timestamp))
result = db.store_result()
row = result.fetch_row(how=1)
if row:
ip = row[0]["ip"].decode("utf-8")
reply.add_answer(RR(qname, QTYPE.A, ttl=0,
rdata=A(ip)))
else:
reply.header.rcode = RCODE.REFUSED
db.close()
except Exception as e:
print(e)
reply.header.rcode = RCODE.REFUSED
return reply
if __name__ == '__main__':
resolver = MariaResolver()
udp_server = DNSServer(resolver, port=53)
udp_server.start_thread()
while udp_server.isAlive():
sleep(0.1)
This code leaks over time and I do not understand why.
In the Proxmox screenshot, you can see service restarted at the and.
I wrote a Flask app that is working fine, and I wanted that while it is running, a separate background thread should parallel to it doing some stuff. The problem is, doing this doesn't spawn the thread at all, but I know that my code is right because using the exact same portion of the thread code on a simple python script works as intended.
app.py
weatherCollectorThread = WeatherDataCollectorThread()
...
if __name__ == '__main__':
try:
print("Starting Weather Collector Thread...")
weatherCollectorThread.start()
print("Starting the WebApp...")
app.run(debug=True)
except KeyboardInterrupt:
try:
weatherCollectorThread.stop()
except:
pass
WeatherDataCollectorThread Class
class WeatherDataCollectorThread:
def __init__(self):
self.weatherStations = DBHelper.get_weather_stations()
self.weatherApiKey = "REDACTED"
self.baseURL = "SOME URL"
self.isThreadRunning = False
self.result_log = open('results.log','a+')
def storeWeatherData(self,weather):
conn = DBHelper.get_connection()
cur = conn.cursor()
cur.execute("INSERT INTO weather_data(city,country,now_unixtime,last_updated_unixtime,temperature,isDay,condition_text,condition_icon,windspeed,winddir,pressure,precipitation,cloud,humidity) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)",[weather['city'],weather['country'],weather['now_unixtime'],weather['last_updated_unixtime'],weather['temperature'],weather['isDay'],weather['condition_text'],weather['condition_icon'],weather['windspeed'],weather['winddir'],weather['pressure'],weather['precipitation'],weather['cloud'],weather['humidity']])
conn.commit()
conn.close()
def collectWeatherData(self):
self.isThreadRunning = True
while self.isThreadRunning:
for each_station in self.weatherStations:
if each_station['isWorking'] != 1:
continue
print("Sending request")
params = {'q':each_station['location'],'key':self.weatherApiKey}
resp = requests.get(url=self.baseURL,params=params)
print("Request received")
weatherData = json.loads(resp.text)
location = weatherData['location']
current = weatherData['current']
weather = {}
weather['city'] = location['name']
weather['country'] = location['country']
weather['now_unixtime'] = location['localtime_epoch']
weather['last_updated_unixtime'] = current['last_updated_epoch']
weather['temperature'] = current['temp_c']
weather['isDay'] = current['is_day']
weather['condition_text'] = current['condition']['text']
weather['condition_icon'] = current['condition']['icon']
weather['windspeed'] = current['wind_kph']
weather['winddir'] = current['wind_dir']
weather['pressure'] = current['pressure_mb']
weather['precipitation'] = current['precip_mm']
weather['cloud'] = current['cloud']
weather['humidity'] = current['humidity']
self.storeWeatherData(weather)
print("Data stored\n" + '-'*24)
self.result_log.write(resp.text + '\n')
sleep(60)
def start(self):
self.thread = Thread(target=self.collectWeatherData)
self.thread.start()
def join_instrument(self,session):
conn = DBHelper.get_connection()
cur = conn.cursor()
cur.execute("UPDATE weather_stations SET isWorking=1 WHERE weatherStationID=?",[session['weatherStationID']])
conn.commit()
conn.close()
def detach_instrument(self,session):
conn = DBHelper.get_connection()
cur = conn.cursor()
cur.execute("UPDATE weather_stations SET isWorking=0 WHERE weatherStationID=?",[session['weatherStationID']])
conn.commit()
conn.close()
def stop(self):
self.result_log.close()
self.isThreadRunning = False
So I figured out the solution.
You see, when you use flask run to run your web-app, it ignores every single function call in the script and parses through the decorators and starts the app on its own. So, if you do something like:
if __name__ == '__main__':
app.start()
someOtherFunction()
Neither the app.start() nor the someOtherFunction() would start.
So the solution?
Simply use python3 app.py to run the script.
... yes, it's that simple :|
I am using MariaDB Database Connector for Python and I have a singleton database class that is responsible for creating a pool and performing database operations on that pool. I have made every effort to close the pool after every access. But, still, after a while the pool becomes unusable and gets stuck, never to be freed. This might be a bug with the connector or a bug in my code. Once the pool is exhausted, I create and return a normal connection, which is not efficient for every database access.
Here's my database module code:
import mariadb
import configparser
import sys
from classes.logger import AppLogger
logger = AppLogger(__name__)
connections = 0
class Db:
"""
Main database for the application
"""
config = configparser.ConfigParser()
config.read('/app/config/conf.ini')
db_config = db_config = config['db']
try:
conn_pool = mariadb.ConnectionPool(
user = db_config['user'],
password = db_config['password'],
host = db_config['host'],
port = int(db_config['port']),
pool_name = db_config['pool_name'],
pool_size = int(db_config['pool_size']),
database = db_config['database'],
)
except mariadb.PoolError as e:
print(f'Error creating connection pool: {e}')
logger.error(f'Error creating connection pool: {e}')
sys.exit(1)
def get_pool(self):
return self.conn_pool if self.conn_pool != None else self.create_pool()
def __get_connection__(self):
"""
Returns a db connection
"""
global connections
try:
pconn = self.conn_pool.get_connection()
pconn.autocommit = True
print(f"Receiving connection. Auto commit: {pconn.autocommit}")
connections += 1
print(f"New Connection. Open Connections: {connections}")
logger.debug(f"New Connection. Open Connections: {connections}")
except mariadb.PoolError as e:
print(f"Error getting pool connection: {e}")
logger.error(f'Error getting pool connection: {e}')
# exit(1)
pconn = self.ــcreate_connectionــ()
pconn.autocommit = True
connections += 1
logger.debug(f'Created normal connection following failed pool access. Connections: {connections}')
return pconn
def ــcreate_connectionــ(self):
"""
Creates a new connection. Use this when getting a
pool connection fails
"""
db_config = self.db_config
return mariadb.connect(
user = db_config['user'],
password = db_config['password'],
host = db_config['host'],
port = int(db_config['port']),
database = db_config['database'],
)
def exec_sql(self, sql, values = None):
global connections
pconn = self.__get_connection__()
try:
cur = pconn.cursor()
print(f'Sql: {sql}')
print(f'values: {values}')
cur.execute(sql, values)
# pconn.commit()
# Is this a select operation?
if sql.startswith('SELECT') or sql.startswith('Select') or sql.startswith('select'):
result = cur.fetchall() #Return a result set for select operations
else:
result = True
pconn.close()
connections -= 1
print(f'connection closed: connections: {connections}')
logger.debug(f'connection closed: connections: {connections}')
# return True #Return true for insert, update, and delete operations
return result
except mariadb.Error as e:
print(f"Error performing database operations: {e}")
# pconn.rollback()
pconn.close()
connections -=1
print(f'connection closed: connections: {connections}')
return False
To use the class in a module, I import the class there and simply instantiate an object from the class and run sql queries on it:
db = Db()
users = db.exec_sql("SELECT * FROM users")
Any ideas why the pool gets exhausted after a while (maybe days) and never gets healed?
Maybe a different error from mariadb.Error is raised sometimes and the connection is never closed. I believe the best practice would be to use a finally section to guarantee that the connection is always closed, like this:
pconn = None
try:
pconn = self.__get_connection__()
# ...
except mariadb.Error as e:
# ...
finally:
if pconn:
try:
pconn.close()
except:
# Not really expected, but if this ever happens it should not alter
# whatever happened in the try or except sections above.
my script is a server that listens to clients requests and send responses. It handles requests by threading:
class Server:
def __init__(self):
self.host = ''
self.port = 50000
self.backlog = 5
self.size = 1024
self.server = None
self.threads = []
def open_socket(self):
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.host,self.port))
self.server.listen(5)
except socket.error, (value,message):
if self.server:
self.server.close()
print "Could not open socket: " + message
sys.exit(1)
def run(self):
self.open_socket()
input = [self.server,sys.stdin]
running = 1
while running:
inputready,outputready,exceptready = select.select(input,[],[])
for s in inputready:
if s == self.server:
# handle the server socket
c = Client(self.server.accept())
c.start()
self.threads.append(c)
elif s == sys.stdin:
# handle standard input
junk = sys.stdin.readline()
running = 0
# close all threads
self.server.close()
for c in self.threads:
c.join()
class Client(threading.Thread):
def __init__(self,(client,address)):
threading.Thread.__init__(self)
self.client = client
self.address = address
self.size = 1024
def run(self):
running = 1
while running:
data = self.client.recv(self.size)
if data:
data2 = data.split()
if data2[0] == 'Hello':
status = 'Hello'
#fetch from database users by location
reply= '6'
if data2[0] == 'Index':
status = 'Index'
#fetch from database users by location
reply='I'
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="Rambo_9134", # your password
db="secure_login") # name of the data base
# you must create a Cursor object. It will let
# you execute all the queries you need
cur = db.cursor()
# Use all the SQL you like
cur.execute("SELECT ml.member,m.username FROM locations l JOIN memberlocation ml ON(l.id = ml.location) JOIN members m ON(m.id = ml.member) where l.id = 1;")
# print all the first cell of all the rows
data = []
for row in cur.fetchall() :
print row[1]
data.append({row[0]:row[1]})
print 'JSON', json.dumps(data)
reply = data
self.client.send(json.dumps(reply))
else:
self.client.close()
running = 0
if __name__ == "__main__":
s = Server()
s.run()
this script runs perfectly but it stops when i press enter. I have tried many alternatives: deamon, nohup, ... i couldn't make it run as a service in the background. i think this is a programming issue
how can i make this script run in the background as a service ?
For a quick and easy way in a test/dev environment you can use screen.
screen -S mySessionName
This starts a new screen session with the name mySessionName and attaches to that session. Inside this session you can now run your code.
Use Ctrl+A, D to detach from that session. Your code will continue to run.
To reattach to that session use:
screen -r mySessionName
To show all sessions use:
screen -ls
In a production environment however you should be looking at supervisor. This serverfault question might help.
Make a PHP or HTML script devoted solely to running that python program. Then, run that PHP/HTML script on the server and you're good :).
I am newbie in python, so, it looks like my first project on that lang.
Everytime when I'm trying to run my script - I get different answers from mysql server.
The most frequent answer is OperationalError: (2006, 'MySQL server has gone away')
Sometimes I get output Thread: 11 commited (see code below).
And sometimes emergency stop (traslated, I have russian output in console).
Whatever if output full of commited - records in table still the same.
import MySQLdb
import pyping
import socket, struct
from threading import Thread
def ip2int(addr):
"""Convert ip to integer"""
return struct.unpack("!I", socket.inet_aton(addr))[0]
def int2ip(addr):
"""Convert integer to ip"""
return socket.inet_ntoa(struct.pack("!I", addr))
def ping(ip):
"""Pinging client"""
request = pyping.ping(ip, timeout=100, count=1)
return int(request.max_rtt)
class UpdateThread(Thread):
def __init__(self, records, name):
Thread.__init__(self)
self.database = MySQLdb.connect(host="***", port=3306, user="root", passwd="***", db="dns")
self.cursor = database.cursor()
self.name = name
self.records = records
def run(self):
print(self.name)
for r in self.records:
#latency = ping(int2ip(r[1])) what the hell :x
#ip = str(int2ip(r[1]))
id = str(r[0])
self.cursor.execute("""update clients set has_subn=%s where id=%s""" % (id, id))
self.database.commit()
print(self.name + " commited")
#start
database = MySQLdb.connect(host="***", port=3306, user="root", passwd="***", db="dns")
cursor = database.cursor()
cursor.execute("""select * from clients""")
data = cursor.fetchall() #All records from DataBase
count = len(data)
threads_counter = 10 #We are creating 10 threads for all records
th_count = count / threads_counter #Count of records for each thread
last_thread = count % threads_counter #Last records
threads = []
i = 0
while i < (count - last_thread):
temp_list = data[i:(i+th_count)]
#print(temp_list)
threads.append(UpdateThread(records = temp_list, name = "Thread: " + str((i/3) + 1)).start())
i += th_count
threads.append(UpdateThread(records = data[i: count], name = "Thread: 11").start())
P.S.
Another answers I found here is not helping me.
UPD:
I found that some(everytime another) thread print
OperationalError: (2013, 'Lost connection to MySQL server during query') and all next threads print OperationalError: (2013, 'Lost connection to MySQL server during query')
You need to close your DB connections when you're done with them or else the DB server will become overwhelmed and make your connections expire. For your program, I would change your code so that you have only one DB connection. You can pass a reference to it to your UpdateThread instances and close it when you're done.
database.close()