Could not Connect different host to retrieve value - python

def main():
global L1Aresult
global L5Aresult
global total
global limit
global trigger
global check
x=0
while True:
try:
L1A = pymysql.connect(host="10.8.22.59",port = 3306,user = "root",passwd="root",db="halm_tables")
L5A = pymysql.connect(host="10.8.22.3",port = 3306,user = "root",passwd="root",db="halm_tables")
break
except Exception as ex:
print(ex)
continue
finally:
pass
sqlstr = """SELECT UniqueID,TestDate,Testtime, EL2FingerDefaultCount FROM halm_tables.halm_results ORDER BY uniqueid Desc limit 1"""
L1Acursor = L1A.cursor()
L1Acursor.execute(sqlstr)
previous=L1Aresult
L1Aresult = L1Acursor.fetchall()
When one of the host connection is closed, the program will not skip that closed connection and continue the execution. What happened to my code??

This code will loop till it finds a healthy server. Then it would fetch the results and bail out. I kept your global variables, but I think you should create a class like Host which has data members like cursor, result, previous, etc. This would allow refactoring this code and remove some redundancies.
sqlstr = """SELECT UniqueID,TestDate,Testtime, EL2FingerDefaultCount FROM halm_tables.halm_results ORDER BY uniqueid Desc limit 1"""
while True:
try:
L1A = pymysql.connect(host="10.8.22.59",port = 3306,user = "root",passwd="root",db="halm_tables")
L1Acursor = L1A.cursor()
L1Acursor.execute(sqlstr)
previous = L1Aresult
L1Aresult = L1Acursor.fetchall()
break
except Exception as ex:
print(ex)
try:
L5A = pymysql.connect(host="10.8.22.3",port = 3306,user = "root",passwd="root",db="halm_tables")
L5Acursor = L5A.cursor()
L5Acursor.execute(sqlstr)
previous = L5Aresult
L5Aresult = L5Acursor.fetchall()
break
except Exception as ex:
print(ex)

Related

MariaDB Connection Pool Gets Exhausted After A While

I am using MariaDB Database Connector for Python and I have a singleton database class that is responsible for creating a pool and performing database operations on that pool. I have made every effort to close the pool after every access. But, still, after a while the pool becomes unusable and gets stuck, never to be freed. This might be a bug with the connector or a bug in my code. Once the pool is exhausted, I create and return a normal connection, which is not efficient for every database access.
Here's my database module code:
import mariadb
import configparser
import sys
from classes.logger import AppLogger
logger = AppLogger(__name__)
connections = 0
class Db:
"""
Main database for the application
"""
config = configparser.ConfigParser()
config.read('/app/config/conf.ini')
db_config = db_config = config['db']
try:
conn_pool = mariadb.ConnectionPool(
user = db_config['user'],
password = db_config['password'],
host = db_config['host'],
port = int(db_config['port']),
pool_name = db_config['pool_name'],
pool_size = int(db_config['pool_size']),
database = db_config['database'],
)
except mariadb.PoolError as e:
print(f'Error creating connection pool: {e}')
logger.error(f'Error creating connection pool: {e}')
sys.exit(1)
def get_pool(self):
return self.conn_pool if self.conn_pool != None else self.create_pool()
def __get_connection__(self):
"""
Returns a db connection
"""
global connections
try:
pconn = self.conn_pool.get_connection()
pconn.autocommit = True
print(f"Receiving connection. Auto commit: {pconn.autocommit}")
connections += 1
print(f"New Connection. Open Connections: {connections}")
logger.debug(f"New Connection. Open Connections: {connections}")
except mariadb.PoolError as e:
print(f"Error getting pool connection: {e}")
logger.error(f'Error getting pool connection: {e}')
# exit(1)
pconn = self.ــcreate_connectionــ()
pconn.autocommit = True
connections += 1
logger.debug(f'Created normal connection following failed pool access. Connections: {connections}')
return pconn
def ــcreate_connectionــ(self):
"""
Creates a new connection. Use this when getting a
pool connection fails
"""
db_config = self.db_config
return mariadb.connect(
user = db_config['user'],
password = db_config['password'],
host = db_config['host'],
port = int(db_config['port']),
database = db_config['database'],
)
def exec_sql(self, sql, values = None):
global connections
pconn = self.__get_connection__()
try:
cur = pconn.cursor()
print(f'Sql: {sql}')
print(f'values: {values}')
cur.execute(sql, values)
# pconn.commit()
# Is this a select operation?
if sql.startswith('SELECT') or sql.startswith('Select') or sql.startswith('select'):
result = cur.fetchall() #Return a result set for select operations
else:
result = True
pconn.close()
connections -= 1
print(f'connection closed: connections: {connections}')
logger.debug(f'connection closed: connections: {connections}')
# return True #Return true for insert, update, and delete operations
return result
except mariadb.Error as e:
print(f"Error performing database operations: {e}")
# pconn.rollback()
pconn.close()
connections -=1
print(f'connection closed: connections: {connections}')
return False
To use the class in a module, I import the class there and simply instantiate an object from the class and run sql queries on it:
db = Db()
users = db.exec_sql("SELECT * FROM users")
Any ideas why the pool gets exhausted after a while (maybe days) and never gets healed?
Maybe a different error from mariadb.Error is raised sometimes and the connection is never closed. I believe the best practice would be to use a finally section to guarantee that the connection is always closed, like this:
pconn = None
try:
pconn = self.__get_connection__()
# ...
except mariadb.Error as e:
# ...
finally:
if pconn:
try:
pconn.close()
except:
# Not really expected, but if this ever happens it should not alter
# whatever happened in the try or except sections above.

forloop in flask code arguments not looping for the second position

I am working on a use case to creating groups in AD through PyAD and create folder and groups for that folder through flask.
I am using for loop for passing arguments and returning responses. If the group exists code should not create if else it should create and then move on to create folder and set permissions.
But the logic works fine for for the first group passed in request, but 2nd one is not getting into the loop.
Facing issues making it work through flask and handle responses. Is there is a way to achive it, please help.
app = Flask(__name__)
api = Api(app)
#Class to create fileshare
class Test(Resource):
def post(self):
pythoncom.CoInitialize()
# Get JSON arguments from Payload shared NAS path, directorname groupname with read access and right access
parentdir = request.json.get("shareUNCPath")
dirname = request.json.get("shareFolderName")
readGroup = request.json.get("readGroup")
writeGroup = request.json.get("writeGroup")
domainName = request.json.get("domain")
groupList = [readGroup,writeGroup]
#for gn in groupList:
try:
j=(len(groupList))+1
if readGroup == writeGroup:
j=(len(groupList))-1
#for gn in len(groupList):
for i in range(4):
groupName = groupList[i]
pyad.set_defaults(username="username", password="password", ldap_server="ldapServer")
rGroup = adgroup.ADGroup.from_cn(groupName)
logging.debug("read group {} available in AD ".format(groupName))
if __name__ == "__main__":
os.makedirs(path)
igroup, domain, type = win32security.LookupAccountName (domainName, groupName)
sd = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = sd.GetSecurityDescriptorDacl()
logging.debug("Domain1 {}, Group1 {}".format(domainName, groupName))
if groupName in readGroup:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,con.GENERIC_READ, igroup)
if groupName in writeGroup:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,con.GENERIC_WRITE, igroup)
isdir = os.path.isdir(path)
if isdir == True:
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION, sd)
dacl = sd.GetSecurityDescriptorDacl()
cnt=dacl.GetAceCount()
for i in range(0, cnt):
rev, access, usersid = dacl.GetAce(i)
user, group, type = win32security.LookupAccountSid(domainName, usersid)
details = ('Group: {}/{}'.format(group, user), rev, access)))
resp = Response('Successfully created file share {}. Details {}'.format(dirname, details))
print (resp)
resp.status_code = 200
return resp
except Exception as e:
errormsg = str(e)
print (errormsg)
if "The server is not operational" in errormsg:
resp = Response('AD operation failed, unable to connect to Active Directory. Error - {}'.format(e))
print (resp)
resp.status_code = 301
return resp
else:
try:
for i in range(4):
groupName = groupList[i]
pyad.set_defaults(username="username", password="pasword",ldap_server="ldapServer")
ou = pyad.adcontainer.ADContainer.from_dn(group_OU)
rGroup = adgroup.ADGroup.create(
name=groupName,
security_enabled = True,
scope=groupScope,
container_object=ou,
optional_attributes={"description": description}
)
if rGroup.Displayname == (groupName):
if __name__ == "__main__":
os.makedirs(path)
#groupr = win32security.LookupAccountName ("", readGroup)
a.logon()
time.sleep(5)
igroup, domain, type = win32security.LookupAccountName (domainName, groupName)
sd = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
#dacl = win32security.ACL()
dacl = sd.GetSecurityDescriptorDacl()
#acl = pywintypes.ACL()
#set permessions for readGroup with GENERIC_READ level permessions
#dacl.AddAccessAllowedAce(win32security.ACL_REVISION,con.GENERIC_READ, groupr)
if groupName in readGroup:
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION,con.OBJECT_INHERIT_ACE|con.CONTAINER_INHERIT_ACE,con.GENERIC_READ|con.GENERIC_EXECUTE, igroup)
if groupName in writeGroup:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,con.GENERIC_WRITE, igroup)
isdir = os.path.isdir(path)
if isdir == True:
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION, sd)
dacl = sd.GetSecurityDescriptorDacl()
cnt=dacl.GetAceCount()
for i in range(0, cnt):
rev, access, usersid = dacl.GetAce(i)
user, group, type = win32security.LookupAccountSid(domainName, usersid)
details = ('Group: {}/{}'.format(group, user), rev, access)
#return ("Success Fileshare created: {} ".format(dirname))
resp = Response('Successfully created file share {}. Details {}'.format(dirname, details))
print (resp)
resp.status_code = 200
return resp
except Exception as e:
print(e)
resp = Response('AD operation failed, unable to create to group {}. Error - {}'.format(groupName, e))
print (resp)
resp.status_code = 302
return resp
api.add_resource(Test, '/test')
if __name__ == "__main__":
#context = ('local.crt', 'local.key')#certificate and key files
app.run(port="7050", host="0.0.0.0", use_reloader=True)
I reviewed your code. There are two things that should changed.
You use i as the loop variable for the outer and inner loop
In the first loop, you use an exception to trigger the group creation. This exits the loop and no more groups are processed. You should move the exception block inside the range(4) loop.
Here is your code with comments.
class Test(Resource):
def post(self):
.......
try:
..........
for i in range(4): # using i as loop variable, loop will exit if exception
........
if __ name __ == "__ main __": # if group exists, update permissions, throws exception if group does not exist
........
if isdir == True:
........
for i in range(0, cnt): # using i as loop variable, again
.........
# here is the problem - if the first group does not exist, an exception is thrown and the other groups are not processed
except Exception as e: # group does not exist, must add # You should move this inside the for loop
............
try:
for i in range(4): # using i as loop variable
...........
if rGroup.Displayname == (groupName):
if __ name __ == "__main__":
.........
if isdir == True:
........
for i in range(0, cnt): # using i as loop variable, again
..........
To clarify, the overall logic should be like this:
for i in range(4): # each group
try:
# update permissions
except Exception as e:
# add new group
As a side note, try to check if the group exists without using the try\except block. Exceptions should not be used in normal program flow.

Query mysql in multi thread/process

1.I know it will occur the error when connection is shared between threads.what about multi processing?
without db.commit(),the error will occur when events start by thread,but process will not.why?
aren't they shared the same connection from db = sql.connect(ipAddress,db='mydb')?
with db.commit(),both thread and process motheds will occur the error when sharing connection.
db = sql.connect(ipAddress,db='mydb')
def query():
ii = 0
while 1:
cur = db.cursor(sql.cursors.Cursor)
try:
ii += 1
s = 'ii:{}'.format(ii)
cur.execute('''update test set count='{}' where name = 'sean' ''' .format(s))
db.commit()
cur.execute('''select count from test ''')
rcv = cur.fetchall()
print(cur,rcv)
except (sql.Error,sql.Warning) as e:
print(e)
cur.close()
time.sleep(1)
def getdb():
while 1:
cur1 = db.cursor(sql.cursors.Cursor)
try:
cur1.execute('''select count from test where name ='sean' ''')
rcv = cur1.fetchall()
print(cur1,rcv)
except (sql.Error,sql.Warning) as e:
print(e)
cur1.close()
time.sleep(1)
event = mp.Process(target = query)
event.start()
time.sleep(3)
event = mp.Process(target = getdb)
event.start()
2.I create two connections for each thread or process.
I don't know why I got the latest value ii only at first time when both connections access the same database.How could this be happened?
db = sql.connect(ipAddress,db='mydb')
db1 = sql.connect(ipAddress,db='mydb')
def query():
ii = 0
while 1:
cur = db.cursor(sql.cursors.Cursor)
# same code given above
def getdb():
while 1:
cur1 = db1.cursor(sql.cursors.Cursor)
# same code given above
<MySQLdb.cursors.Cursor object at 0x75ff3ef0> (('ii:50',), ('!999!',), ('$5555555555$',))
<MySQLdb.cursors.Cursor object at 0x75ff3ef0> (('ii:3',),)
<MySQLdb.cursors.Cursor object at 0x75ff3ed0> (('ii:51',), ('!999!',), ('$5555555555$',))
<MySQLdb.cursors.Cursor object at 0x75ff3e50> (('ii:3',),)
<MySQLdb.cursors.Cursor object at 0x75ff3e90> (('ii:52',), ('!999!',), ('$5555555555$',))
<MySQLdb.cursors.Cursor object at 0x75ff3f70> (('ii:3',),)
<MySQLdb.cursors.Cursor object at 0x766cb0b0> (('ii:53',), ('!999!',), ('$5555555555$',))

Darwinex ZeroMQ & MT4 - How to iterate on ZeroQM returned values? (Python)

I am trying to create a bridge between Metatrader 4 and Darwinex ZeroMQ (Python). I got the connection 100% working - returning values. The problem is the values are returned as 'NoneType', looks like a dictionary but it is not iterable. Does anybory knows how can I assign the information to a variable?
I am new in python and I am trying to create a small robot.
Follow the link for for the Darwinex docs: https://github.com/darwinex/dwx-zeromq-connector
See below my Python code and the returned values:
from DWX_ZeroMQ_Connector_v2_0_1_RC8 import DWX_ZeroMQ_Connector
_zmq = DWX_ZeroMQ_Connector(_verbose=True)
_zmq._generate_default_order_dict()
_zmq._DWX_MTX_GET_ALL_OPEN_TRADES_()
_zmq._DWX_MTX_GET_ALL_OPEN_TRADES_().get('_trades')
Follow below an screenshot on Jupiter notebook, easier to see the results:
i use my own Sockets for communication. Here some edited Code Examples:
import zmq
import threading
import time
import ast
def fetch_to_db(data):
try:
if 'OPEN_TRADES' in data:
res = ast.literal_eval(data)
orders = res['_trades']
for key,value in orders.items():
#print(key)
value['orderID'] = key
print(value)
print()
else:
print('FETCH ERROR')
except Exception as e:
print(e)
def get_open_trades(stop):
try:
while True:
data = str('TRADE;GET_OPEN_TRADES')
c = zmq.Context()
s = c.socket(zmq.PUSH)
s.connect('tcp://127.0.0.1:32768')
s.send_string(data)
s.send_string(data)
time.sleep(1)
if stop():
s.close()
break
except Exception as e:
print(e)
def receiver_sock(stop):
try:
c = zmq.Context()
s = c.socket(zmq.PULL)
s.setsockopt(zmq.RCVHWM, 1)
s.connect('tcp://127.0.0.1:32769')
while True:
data = s.recv_string()
fetch_to_db(data)
time.sleep(0.00001)
if stop():
s.close()
break
except Exception as e:
print(e)
def loop_s():
try:
stop_threads = False
receiver_socket = threading.Thread(target = receiver_sock, args =(lambda : stop_threads, ))
receiver_socket.setDaemon(True)
receiver_socket.start()
open_trades = threading.Thread(target = get_open_trades, args =(lambda : stop_threads, ))
open_trades.setDaemon(True)
open_trades.start()
except Exception as e:
print(e)
try:
loop_s()
while True: time.sleep(100)
except KeyboardInterrupt:
print('CLOSING')
processes = [get_open_trades,receiver_sock]
for i in processes:
stop_threads = True
t1 = threading.Thread(target = i, args =(lambda : stop_threads, ))
the fetch_to_db function return a dict for every order including orderID
regards

Why are certificates expiration time different everytime I check for them?

I want to get the expiration time of some certificates,beacause I want to write a program to notify when the certificate about to expire.
But when I run the code, I found sometimes the results (expiration time) are different.It is not nowtime is different,there is a lot of difference in numbers,like 1 and 100.You can see the picture.I guess the process of getting certificate is wrong but I can't find where is it.Besides,it is not the problem of timezones.
for length_list_domain in range(0,len(all_domains['HostedZones'])):
HostedZoneId = all_domains['HostedZones'][length_list_domain]['Id']
sub_domains = client.list_resource_record_sets(HostedZoneId = HostedZoneId)
for length_list_subdomain in range(0,len(sub_domains['ResourceRecordSets'])):
try:
if all_domains['HostedZones'][length_list_domain]['Name'] in null_domain:
break
else:
hostname = sub_domains['ResourceRecordSets'][length_list_subdomain]['Name']
port = 443
conn = ssl.create_connection((hostname,port))
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sock = context.wrap_socket(conn,server_hostname = hostname)
certificate = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
x509 = reqs.OpenSSL.crypto.load_certificate(reqs.OpenSSL.crypto.FILETYPE_PEM,certificate)
expire_time_first = x509.get_notAfter()[:-1].decode()
now = datetime.datetime.utcnow()
expire_time = datetime.datetime.strptime(expire_time_first,'%Y%m%d%H%M%S')
remain_time = expire_time - now
print(remain_time.days)
remain_time_list[all_domains['HostedZones'][length_list_domain]['Name']] = remain_time.days
break
except:
pass
return remain_time_list
Then I tried threading lock to solve this problem,but it didn't work.
mutex = threading.Lock()
for length_list_domain in range(0,len(all_domains['HostedZones'])):
HostedZoneId = all_domains['HostedZones'][length_list_domain]['Id']
sub_domains = client.list_resource_record_sets(HostedZoneId = HostedZoneId)
for length_list_subdomain in range(0,len(sub_domains['ResourceRecordSets'])):
try:
if all_domains['HostedZones'][length_list_domain]['Name'] in null_domain:
break
else:
mutex.acquire()
hostname = sub_domains['ResourceRecordSets'][length_list_subdomain]['Name']
port = 443
conn = ssl.create_connection((hostname,port))
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sock = context.wrap_socket(conn,server_hostname = hostname)
certificate = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
x509 = reqs.OpenSSL.crypto.load_certificate(reqs.OpenSSL.crypto.FILETYPE_PEM,certificate)
expire_time_first = x509.get_notAfter()[:-1].decode()
mutex.release()
now = datetime.datetime.utcnow()
expire_time = datetime.datetime.strptime(expire_time_first,'%Y%m%d%H%M%S')
remain_time = expire_time - now
print(remain_time.days)
remain_time_list[all_domains['HostedZones'][length_list_domain]['Name']] = remain_time.days
break
except:
pass
return remain_time_list
The results are as this picture,the number is the day of remain time(expired time - nowtime)
I use x509.get_subject and reqs.get_alt_subject to get information and I just run the first domain and It seems that another domain will appear,why?
the results of the first domain

Categories

Resources