Python clean keyboard interupt - python

I have a python script in which I'm using threading to wait for inputs on my raspberry pi. This is the first time I've ever used threading before and I'm having some difficulties handling the KeyboardInterrupt. Its not really an issue, but if the user presses control-c to early, like right as soon as the program starts, python freaks out and spews a console full of import errors. Ive tried wrapping the imports in try - excepts, but that doesn't seem to work. I have a feeling its something with the threading as I've never had this issue before.
Besides wrapping everything in try-excepts i also tried:
from threading import Thread
cond = threading.Condition(threading.Lock())
cond.acquire()
cond.wait(None)
which I hear was suppose to not allow the keyboard interrupt until its been loaded?
Is there anyway just to not have python do anything on keyboardinterrupts?
Any help would be appreciated. Thanks!
My Code:
import RPi.GPIO as GPIO
import pymysql as mysql
import time
import urllib.request
from threading import Thread
GPIO.setmode(GPIO.BOARD)
GPIO.cleanup()
class table():
__slots__ = ['pin','num','time']
def __init__(self, pin, num, time=0):
self.pin = pin
self.num = num
self.time = time
# Room ID
room = 6
# time (minutes)
thetime = 38
# table Setup
tableList = [ \
table(11,6), \
table(13,2), \
table(15,4), \
table(19,5), \
table(21,3), \
table(23,1), \
]
def settable(table):
with urllib.request.urlopen("http://time.zyphox.com") as url:
curtime = int(url.read())+(thetime*60)
con = mysql.connect(host="345", user="435", passwd="345435", db="34534543")
cur = con.cursor()
cur.execute("UPDATE tables SET time='"+str(curtime)+"' WHERE number='"+str(table)+"' AND id_room='"+str(room)+"'")
cur.close()
con.close()
print("Setting table " + str(table) + " to " + str(thetime) + " minutes...")
def watchtable(table):
GPIO.setup(table.pin, GPIO.IN)
while True:
if(GPIO.input(table.pin)!=1 and table.time <= time.time()):
settable(table.num)
table.time = time.time() + 10
def main():
print("Loading kitchen System...")
for table in tableList:
t = Thread(target=watchtable,args=(table,))
t.daemon=True
t.start()
time.sleep(1)
print("Successful!")
while True:
time.sleep(300)
print("- Pinging MYSQL database.")
main()

Related

How do I use/create a db cursor in my python thread?

I'm getting threading errors when I try to use or create a db cursor in my process_id function. Each thread will have to use the database to process data for their passed id.
I can't utilize a cursor in the thread/process_id at all(I get threading errors and the DB never updates)...I've coded it a lot of different ways. The code works when I don't use threads.
I have very specific requirements for how this code is to be written, slow and stable is fine. I also cut out a lot of error handling/logging before posting. Daemon/Infinite loop is required.
How do I spin up a new cursor in each thread?
import threading
import time
from datetime import datetime
import os
import jaydebeapi, sys
#Enter the values for you database connection
database = "REMOVED"
hostname = "REMOVED"
port = "REMOVED"
uid = "REMOVED"
pwd = "REMOVED"
connection_string='jdbc:db2://'+hostname+':'+port+'/'+database
if (sys.version_info >= (3,0)):
conn = jaydebeapi.connect("com.ibm.db2.jcc.DB2Driver", connection_string, [uid, pwd], jars="REMOVED")
else:
conn = jaydebeapi.connect("com.ibm.db2.jcc.DB2Driver", [connection_string, uid, pwd])
# Thread Pool Variables
max_threads = 5
used_threads = 0
# define main cursor
cus=conn.cursor()
def process_id(id):
#create a cursor for a thread
cus_id=conn.cursor()
cus_id.execute("SOME QUERY;")
cus_id.close()
global used_threads
used_threads = used_threads - 1
return 0
def daemon():
global num_threads, used_threads
print("Daemon running...")
while True:
#ids to process are loaded into a list...
for id in ids_to_process:
if used_threads < max_threads:
t = threading.Thread(target=process_id, args=(int(id),))
t.start()
used_threads += 1
return 0
daemon()

How to run code every x seconds inside while true - python

I need to execute code inside while loop every x seconds without stoping loop work
I have trying threading and lock combinations but it is still not working. I am working on python 3.7.4, pycharm 2019.2
#!/usr/bin/env python3
import configparser
import logging
import threading
import time
import ts3
__all__ = ["notify_bot"]
logging.basicConfig(filename='ts3bot.log',
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
)
logging.getLogger().addHandler(logging.StreamHandler())
def notify_bot(ts3conn, config, lock):
logging.info("Start Notify Bot ...")
lock.acquire()
ts3conn.exec_("servernotifyregister", event="server")
lock.release()
while True:
event = ts3conn.wait_for_event()
try:
reasonid_ = event[0]["reasonid"]
except KeyError:
continue
if reasonid_ == "0":
logging.info("User joined Lobby:")
logging.info(event[0])
servergroups = event[0]['client_servergroups']
guestname = event[0]['client_nickname']
lock.acquire()
if not set(servergroups):
print(f"s1 {guestname}")
else:
print(f"s2{guestname}")
lock.release()
return None
def keep_alive(ts3conn, lock):
while True:
logging.info("Send keep alive!")
lock.acquire()
ts3conn.send_keepalive()
lock.release()
time.sleep(5)
if __name__ == "__main__":
logging.info("Start TS Bot ...")
config = configparser.ConfigParser()
config.sections()
config.read("settings_test.ini")
logging.info("Config loaded!")
HOST = config['server']['url']
PORT = config['server']['query_port']
USER = config['server']['query_user']
PASS = config['server']['query_pw']
SID = config['server']['sid']
NAME = config['bot']['name']
logging.info("Connecting to query interface ...")
URI = f"telnet://{USER}:{PASS}#{HOST}:{PORT}"
try:
with ts3.query.TS3ServerConnection(URI) as ts3conn:
ts3conn.exec_("use", sid=SID)
ts3conn.query("clientupdate", client_nickname="x123d")
logging.info("Connected!")
lock = threading.Lock()
notify_thread = threading.Thread(target=notify_bot, args=(ts3conn, config, lock), daemon=True,
name="notify")
keep_alive_thread = threading.Thread(target=keep_alive, args=(ts3conn, lock), daemon=True,
name="keep_alive")
notify_thread.start()
keep_alive_thread.start()
keep_alive_thread.join()
notify_thread.join()
except KeyboardInterrupt:
logging.INFO(60 * "=")
logging.info("TS Bot terminated by user!")
logging.INFO(60 * "=")
After run work for 1 person who join server and do nothing, dont send keep alive and dont work at all
you can use Bibio TIME
You can check it from official python website (https://docs.python.org/3/library/time.html)
Personally, for simple things, I find the _thread library easier. Here's a function that you can run in a thread, and an example of starting that thread:
import _thread
def mythread(arg1):
while True:
time.sleep(arg1)
do.whatever()
_thread.start_new_thread(mythread, (5,))
The important thing to note is the second argument I passed to the _thread.start_new_thread function. It must be a tuple, which is why there is a comma after the 5. Even if your function doesn't require any arguments, you have to pass a tuple.
I am using time module and threading,
I'v made some changes and it seems to work
#!/usr/bin/env python3
import configparser
import logging
import threading
import time
import ts3
logging.basicConfig(filename='ts3bot.log',
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
)
logging.getLogger().addHandler(logging.StreamHandler())
def notify_bot(ts3conn):
logging.info("Start Notify Bot ...")
ts3conn.exec_("servernotifyregister", event="server")
while True:
event = ts3conn.wait_for_event()
try:
reasonid_ = event[0]["reasonid"]
except KeyError:
continue
if reasonid_ == "0":
logging.info("User joined Lobby:")
logging.info(event[0])
servergroups = event[0]['client_servergroups']
guestname = event[0]['client_nickname']
if not set(servergroups):
print(f"s1 {guestname}")
else:
print(f"s2{guestname}")
return None
def keep_alive(ts3conn, time):
while True:
logging.info("Send keep alive!")
ts3conn.send_keepalive()
time.sleep(20)
if __name__ == "__main__":
logging.info("Start TS Bot ...")
config = configparser.ConfigParser()
config.sections()
config.read("settings_test.ini")
logging.info("Config loaded!")
HOST = config['server']['url']
PORT = config['server']['query_port']
USER = config['server']['query_user']
PASS = config['server']['query_pw']
SID = config['server']['sid']
NAME = config['bot']['name']
logging.info("Connecting to query interface ...")
URI = f"telnet://{USER}:{PASS}#{HOST}:{PORT}"
try:
with ts3.query.TS3ServerConnection(URI) as ts3conn:
ts3conn.exec_("use", sid=SID)
ts3conn.query("clientupdate", client_nickname="x123d")
logging.info("Connected!")
notify_thread = threading.Thread(target=notify_bot, args=(ts3conn,), daemon=True,
name="notify")
keep_alive_thread = threading.Thread(target=keep_alive, args=(ts3conn, time), daemon=True,
name="keep_alive")
notify_thread.start()
keep_alive_thread.start()
keep_alive_thread.join()
notify_thread.join()
except KeyboardInterrupt:
logging.INFO(60 * "=")
logging.info("TS Bot terminated by user!")
logging.INFO(60 * "=")
It looks like ts3conn.send_keepalive() making error, when I delete it, code work fine, when I'v add it, code stop working after send ts3conn.send_keepalive() once

python program stops after wifi connection discontinuity

I started learning Python two months ago and I have written a code for a Raspberry pi project. My problem is that the program stucks after some hours of operation. I think that in all cases, it stopped after some wifi connection drops. But I don't understand why the whole program stops if there something wrong with the wifi connection. It stops uploading values and renew the lcd screen messages it prints (I removed this and other stuff from the code in order to be more easy to read.)
The code starts at the startup (sudo python /home/pi/test.py &) and contains two Threads:
The "Control" thread reads temperature and humidity by using an i2c bus and a sensor am2315 and according to a temperature threshold, controls a relay through GPIO.
The "Thingspeak" thread reads the temperature threshold from a 'Thingspeak' channel and then uploads the measurements from the previous thread to 'Thingspeak'.
I really don't know what to do and how to search for any solutions.
Any help will be much appreciated.
#! /usr/bin/env python
from time import sleep
import datetime
import urllib2
import RPi.GPIO as GPIO
import threading
import smbus
from tentacle_pi.AM2315 import AM2315
import smtplib
import contextlib
sleep(120)
# Lock
tLock = threading.Lock()
# Global variables
tem_global = 0; hum_global = 0
tem_hi = 35; relay = 21
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, False)
sleep(1)
def Control():
global temg, humg, tem_hi, relay
# AM2315 setup
am = AM2315(0x5c,"/dev/i2c-1")
I2C_address = 0x70; I2C_bus_number = 1; i2c_channel_setup = 1
bus = smbus.SMBus(I2C_bus_number)
bus.write_byte(I2C_address, i2c_channel_setup)
sleep(1)
while True:
try:
tem_local, hum_local = am2315meas()
except:
tem_local = -1; hum_local = -1
tLock.acquire()
tem_global = tem_local; hum_global = hum_local
if tem_local < tem_hi:
GPIO.output(relay, True)
else:
GPIO.output(relay, False)
tLock.release()
sleep(150)
def Thingspeak():
global tem_global, hum_global, tem_hi
myAPI = "..."
channelID = "..."
baseURL = 'https://api.thingspeak.com/update?api_key=%s' % myAPI
while True:
sleep(30)
try:
# Reading value from thingspeak
tLock.acquire()
with contextlib.closing(urllib2.urlopen("https://api.thingspeak.com/channels/%s/fields/1/last?" % channelID)) as f_read:
tem_hi = float(fread.read())
t.Lock.release()
sleep(30)
# Uploading values to thingspeak
tLock.acquire()
with contextlib.closing(urllib2.urlopen(baseURL + "&field1=%s" % tem_global + "&field2=%s" % hum_global)) as f_upload:
pass
tLock.release()
except:
with open('/home/pi/errors.txt', mode='a') as file:
file.write('Network error recorded at %s.\n' % datetime.datetime.now())
file.close()
sleep(60)
continue
def Main():
t1 = threading.Thread(target=Thingspeak)
t2 = threading.Thread(target=Control)
t1.start()
t2.start()
t1.join()
t2.join()
GPIO.cleanup()
if __name__ == '__main__':
Main()
Problem solved. As James K Polk indicated, there was an error after the tLock.acquire(), every time the internet connection went off, causing deadlock of the program. Below is a corrected part of the code for anyone would be interested.
def Control():
global tem_global, hum_global, tem_hi, relay
# AM2315 setup
am = AM2315(0x5c,"/dev/i2c-1")
I2C_address = 0x70; I2C_bus_number = 1; i2c_channel_setup = 1
bus = smbus.SMBus(I2C_bus_number)
bus.write_byte(I2C_address, i2c_channel_setup)
sleep(1)
while True:
try:
tem_local, hum_local = am2315meas()
except:
tem_local = -1; hum_local = -1
with tLock:
tem_global = tem_local; hum_global = hum_local
if tem_local < tem_hi:
GPIO.output(relay, True)
else:
GPIO.output(relay, False)
sleep(150)
def Thingspeak():
global tem_global, hum_global, tem_hi
myAPI = "..."
channelID = "..."
baseURL = 'https://api.thingspeak.com/update?api_key=%s' % myAPI
while True:
sleep(30)
try:
# Reading value from thingspeak
with tLock:
with contextlib.closing(urllib2.urlopen("https://api.thingspeak.com/channels/%s/fields/1/last?" % channelID)) as f_read:
tem_hi = float(fread.read())
sleep(30)
# Uploading values to thingspeak
with tLock:
with contextlib.closing(urllib2.urlopen(baseURL + "&field1=%s" % tem_global + "&field2=%s" % hum_global)) as f_upload:
pass
except:
with open('/home/pi/errors.txt', mode='a') as file:
file.write('Network error recorded at %s.\n' % datetime.datetime.now())
sleep(60)
continue

Python script partially inactive after few hours - Raspberry Pi

I wrote a Python script for the Raspberry Pi in order to monitor changes of 4 GPIO inputs, write them into a databse and send a message to my phone via a telegram bot.
Everything works fine, except that after a couple of hours, some processes of the script shut down. The script is still running (knwoing from a log file), but especially the GPIO event detect and the telegram bot API stop working. This mostly occurs over night.
Because it works perfectly fine for the first few hours after startup, I guess it shouldn't be a bug in the program, but you never know.
I started up the script via rc.local, but switched to System.d for running on startup instead. Works fine for that.
So in a nutshell: Script runs fine, but after a couple of hours some functions in the script become inactive (GPIO event detect, log messages, telegram API - kinda feels like the Pi is falling asleep).
#!/usr/bin/env python
#Import libraries
import time
import RPi.GPIO as GPIO
import telepot
import MySQLdb
import os.path
import logging
import datetime
#start logging
logfile = '/home/pi/Betrieb/logs/log_'+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")
logging.basicConfig(filename=logfile,level=logging.DEBUG,format='%(asctime)s %(message)s')
logging.debug('=========================================================')
logging.debug('Logging started!')
#initialize connection to Telegram bot
bot = telepot.Bot('##########################################')
#Connect to MySQL-database
con = MySQLdb.connect('localhost',"root","###########","################")
c = con.cursor()
###########################################################################################################
class User:
def __init__(self, id, name, user_id, enable):
self.id = id
self.name = name
self.chat_id = user_id
self.enable = enable
class Machine:
def __init__(self, id, number, name, pin, enable):
self.id = id
self.number = number
self.name = name
self.pin = pin
self.enable = enable
###########################################################################################################
def my_callback(pin):
c.execute("SELECT name FROM machines WHERE pin=%s" % pin)
con.commit()
data = c.fetchall()
for set in data:
machine_name=set[0]
### Attention: multiple sets with same ID can exist
if GPIO.input(pin):
print "Rising edge detected "+str(pin)
string = "INSERT INTO malfunction(machine_name,machine_pin,status) VALUES('%s',%s,1)" % (machine_name,pin)
c.execute(string)
con.commit()
for i in range(0,len(user)):
if user[i].enable:
bot.sendMessage(user[i].chat_id,"Stoerung "+ machine_name)
print "Sent message to", user[i].name
logging.debug('Detected malfunction on Pin %s and sent message to %s', str(pin), user[i].name)
else:
print "Falling edge detected on "+str(pin)
string = "INSERT INTO malfunction(machine_name,machine_pin,status) VALUES('%s',%s,0)" % (machine_name,pin)
c.execute(string)
con.commit()
for i in range(0,len(user)):
if user[i].enable:
bot.sendMessage(user[i].chat_id,"Stoerung behoben "+ machine_name)
print "Sent message to", user[i].name
logging.debug('Solved malfunction on Pin %s and sent message to %s', str(pin), user[i].name)
def updateData():
global machine
global user
logging.debug('Update data.')
#Update user data
c.execute("SELECT * FROM telegram_users")
con.commit()
data = c.fetchall()
user = []
del user[:]
for set in data:
newUser = User(set[0],set[1],set[2],set[3])
user.append(newUser)
#Update machine data
try:
machine
except NameError:
machine = []
else:
for i in range(0,len(machine)):
if machine[i].enable:
GPIO.remove_event_detect(machine[i].pin)
del machine[:]
c.execute("SELECT * FROM machines")
con.commit()
data = c.fetchall()
GPIO.setmode(GPIO.BCM)
for set in data:
# 0 = id / 1 = number / 2 = name / 3 = pin / 4 = enable #
newMachine = Machine(set[0],set[1],set[2],set[3],set[4])
machine.append(newMachine)
if set[4]:
GPIO.setup(set[3], GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.add_event_detect(set[3], GPIO.BOTH, callback=my_callback, bouncetime=300)
logging.debug('Added Event Detect on Pin %s', set[3])
###########################################################################################################
updateData()
logging.debug('Initial Update finished. Start looping.')
Counter = 0
while True:
Counter +=1
if Counter == 600:
logging.debug('Still running!')
Counter = 0
time.sleep(1);
lockfile = "/home/pi/Betrieb/lockfiles/request_update"
if os.path.isfile(lockfile):
os.remove(lockfile)
updateData()
print "Data updated."
logging.debug('Deleted lockfile and updated data.')

Python Threading Error when executing a MSSQL statement

I have a producer, consumer application where the producer reads a database and puts the results into a website. On success, The consumer then gets the id of the transaction and updates the db.
The program runs as required until it attempts to execute the update. It fails sometimes with this error 'HY000', 'The driver did not supply an error!'
The code can comfortably write to file without any issues.
What could i do to fix this? We need to update the db.
Thanks
Notes...using python 2.7 with pyodbc on mssql 2008.
code below
#!/usr/bin/env python
from urlparse import urlparse
from threading import Thread
import httplib, sys
import Queue
import urllib2
import urllib
from time import localtime, strftime
from ConfigParser import SafeConfigParser
from functions import Functions
from pyodbcclass import db_mssql
now = strftime("%y-%m-%d-%H%M%S", localtime())
k = db_mssql()
thread_list = []
thread_list2 = []
getFromDBQueue = Queue.Queue()
updateDBQueue = Queue.Queue()
number_of_consumer_threads = 3
def putURL():
querySql = "select distinct top 3 id,url from tblURL where processed=0 order by id asc"
re = k.query2(querySql)
if re:
for r in re:
id = r.id
params = urllib.urlencode({'user': user, 'password': password})
ourl = urlini + "?%s" % params
urlplusid = {'url':ourl.strip(),'id':id}
getFromDBQueue.put(urlplusid)
def getURL(thread_id):
while 1:
try:
URL_toget = getFromDBQueue.get(block=False)
url2 = URL_toget['url']
msgid2 = URL_toget['id']
except Queue.Empty:
print "thread exiting, id: " + str(thread_id) + "++getFromDB++"
sys.exit()
status,url = getStatus(url2)
if status == 200:
updateDBQueue.put(msgid2)
print(status)
def updateDB(thread_id):
while 1:
try:
id2 = updateDBQueue.get(block=False)
if id2:
params = ['true',id2]
sqlupdate = "UPDATE tblURL SET processed=? WHERE id=?"
k.execute3(sqlupdate,params)
except Queue.Empty:
print "thread exiting, id: " + str(thread_id) + "**update**"
sys.exit()
# fill the queue with work and block until we are done filling the queue
producer_thread = Thread(target=putURL)
producer_thread.start()
producer_thread.join()
# we can now start consumers
for i in range(number_of_consumer_threads):
getfromDB = Thread(target=getURL, args=(i,))
getfromDB.start()
thread_list.append(getfromDB)
for i in range(number_of_consumer_threads):
update = Thread(target=updateDB, args=(i,))
update.start()
thread_list2.append(update)
for thread in thread_list:
thread.join()
for thread2 in thread_list2:
thread2.join()

Categories

Resources