1.I know it will occur the error when connection is shared between threads.what about multi processing?
without db.commit(),the error will occur when events start by thread,but process will not.why?
aren't they shared the same connection from db = sql.connect(ipAddress,db='mydb')?
with db.commit(),both thread and process motheds will occur the error when sharing connection.
db = sql.connect(ipAddress,db='mydb')
def query():
ii = 0
while 1:
cur = db.cursor(sql.cursors.Cursor)
try:
ii += 1
s = 'ii:{}'.format(ii)
cur.execute('''update test set count='{}' where name = 'sean' ''' .format(s))
db.commit()
cur.execute('''select count from test ''')
rcv = cur.fetchall()
print(cur,rcv)
except (sql.Error,sql.Warning) as e:
print(e)
cur.close()
time.sleep(1)
def getdb():
while 1:
cur1 = db.cursor(sql.cursors.Cursor)
try:
cur1.execute('''select count from test where name ='sean' ''')
rcv = cur1.fetchall()
print(cur1,rcv)
except (sql.Error,sql.Warning) as e:
print(e)
cur1.close()
time.sleep(1)
event = mp.Process(target = query)
event.start()
time.sleep(3)
event = mp.Process(target = getdb)
event.start()
2.I create two connections for each thread or process.
I don't know why I got the latest value ii only at first time when both connections access the same database.How could this be happened?
db = sql.connect(ipAddress,db='mydb')
db1 = sql.connect(ipAddress,db='mydb')
def query():
ii = 0
while 1:
cur = db.cursor(sql.cursors.Cursor)
# same code given above
def getdb():
while 1:
cur1 = db1.cursor(sql.cursors.Cursor)
# same code given above
<MySQLdb.cursors.Cursor object at 0x75ff3ef0> (('ii:50',), ('!999!',), ('$5555555555$',))
<MySQLdb.cursors.Cursor object at 0x75ff3ef0> (('ii:3',),)
<MySQLdb.cursors.Cursor object at 0x75ff3ed0> (('ii:51',), ('!999!',), ('$5555555555$',))
<MySQLdb.cursors.Cursor object at 0x75ff3e50> (('ii:3',),)
<MySQLdb.cursors.Cursor object at 0x75ff3e90> (('ii:52',), ('!999!',), ('$5555555555$',))
<MySQLdb.cursors.Cursor object at 0x75ff3f70> (('ii:3',),)
<MySQLdb.cursors.Cursor object at 0x766cb0b0> (('ii:53',), ('!999!',), ('$5555555555$',))
Related
def main():
global L1Aresult
global L5Aresult
global total
global limit
global trigger
global check
x=0
while True:
try:
L1A = pymysql.connect(host="10.8.22.59",port = 3306,user = "root",passwd="root",db="halm_tables")
L5A = pymysql.connect(host="10.8.22.3",port = 3306,user = "root",passwd="root",db="halm_tables")
break
except Exception as ex:
print(ex)
continue
finally:
pass
sqlstr = """SELECT UniqueID,TestDate,Testtime, EL2FingerDefaultCount FROM halm_tables.halm_results ORDER BY uniqueid Desc limit 1"""
L1Acursor = L1A.cursor()
L1Acursor.execute(sqlstr)
previous=L1Aresult
L1Aresult = L1Acursor.fetchall()
When one of the host connection is closed, the program will not skip that closed connection and continue the execution. What happened to my code??
This code will loop till it finds a healthy server. Then it would fetch the results and bail out. I kept your global variables, but I think you should create a class like Host which has data members like cursor, result, previous, etc. This would allow refactoring this code and remove some redundancies.
sqlstr = """SELECT UniqueID,TestDate,Testtime, EL2FingerDefaultCount FROM halm_tables.halm_results ORDER BY uniqueid Desc limit 1"""
while True:
try:
L1A = pymysql.connect(host="10.8.22.59",port = 3306,user = "root",passwd="root",db="halm_tables")
L1Acursor = L1A.cursor()
L1Acursor.execute(sqlstr)
previous = L1Aresult
L1Aresult = L1Acursor.fetchall()
break
except Exception as ex:
print(ex)
try:
L5A = pymysql.connect(host="10.8.22.3",port = 3306,user = "root",passwd="root",db="halm_tables")
L5Acursor = L5A.cursor()
L5Acursor.execute(sqlstr)
previous = L5Aresult
L5Aresult = L5Acursor.fetchall()
break
except Exception as ex:
print(ex)
I am using MariaDB Database Connector for Python and I have a singleton database class that is responsible for creating a pool and performing database operations on that pool. I have made every effort to close the pool after every access. But, still, after a while the pool becomes unusable and gets stuck, never to be freed. This might be a bug with the connector or a bug in my code. Once the pool is exhausted, I create and return a normal connection, which is not efficient for every database access.
Here's my database module code:
import mariadb
import configparser
import sys
from classes.logger import AppLogger
logger = AppLogger(__name__)
connections = 0
class Db:
"""
Main database for the application
"""
config = configparser.ConfigParser()
config.read('/app/config/conf.ini')
db_config = db_config = config['db']
try:
conn_pool = mariadb.ConnectionPool(
user = db_config['user'],
password = db_config['password'],
host = db_config['host'],
port = int(db_config['port']),
pool_name = db_config['pool_name'],
pool_size = int(db_config['pool_size']),
database = db_config['database'],
)
except mariadb.PoolError as e:
print(f'Error creating connection pool: {e}')
logger.error(f'Error creating connection pool: {e}')
sys.exit(1)
def get_pool(self):
return self.conn_pool if self.conn_pool != None else self.create_pool()
def __get_connection__(self):
"""
Returns a db connection
"""
global connections
try:
pconn = self.conn_pool.get_connection()
pconn.autocommit = True
print(f"Receiving connection. Auto commit: {pconn.autocommit}")
connections += 1
print(f"New Connection. Open Connections: {connections}")
logger.debug(f"New Connection. Open Connections: {connections}")
except mariadb.PoolError as e:
print(f"Error getting pool connection: {e}")
logger.error(f'Error getting pool connection: {e}')
# exit(1)
pconn = self.ــcreate_connectionــ()
pconn.autocommit = True
connections += 1
logger.debug(f'Created normal connection following failed pool access. Connections: {connections}')
return pconn
def ــcreate_connectionــ(self):
"""
Creates a new connection. Use this when getting a
pool connection fails
"""
db_config = self.db_config
return mariadb.connect(
user = db_config['user'],
password = db_config['password'],
host = db_config['host'],
port = int(db_config['port']),
database = db_config['database'],
)
def exec_sql(self, sql, values = None):
global connections
pconn = self.__get_connection__()
try:
cur = pconn.cursor()
print(f'Sql: {sql}')
print(f'values: {values}')
cur.execute(sql, values)
# pconn.commit()
# Is this a select operation?
if sql.startswith('SELECT') or sql.startswith('Select') or sql.startswith('select'):
result = cur.fetchall() #Return a result set for select operations
else:
result = True
pconn.close()
connections -= 1
print(f'connection closed: connections: {connections}')
logger.debug(f'connection closed: connections: {connections}')
# return True #Return true for insert, update, and delete operations
return result
except mariadb.Error as e:
print(f"Error performing database operations: {e}")
# pconn.rollback()
pconn.close()
connections -=1
print(f'connection closed: connections: {connections}')
return False
To use the class in a module, I import the class there and simply instantiate an object from the class and run sql queries on it:
db = Db()
users = db.exec_sql("SELECT * FROM users")
Any ideas why the pool gets exhausted after a while (maybe days) and never gets healed?
Maybe a different error from mariadb.Error is raised sometimes and the connection is never closed. I believe the best practice would be to use a finally section to guarantee that the connection is always closed, like this:
pconn = None
try:
pconn = self.__get_connection__()
# ...
except mariadb.Error as e:
# ...
finally:
if pconn:
try:
pconn.close()
except:
# Not really expected, but if this ever happens it should not alter
# whatever happened in the try or except sections above.
In a classical "Threading/Queue"-application. I need to do further calculations in my "consumer"-function. After Queue is empty no further code is executed after urls.task_done().
I am importing market data from an JSON api and import it into my MariaDB database.
On the API every item that i want to fetch has an own url, so I am creating a queue for all available urls in a function.
A "consumer"-function processes the queue importing a new set of data or updating an existent entry depending on the already existing data in my database. I already tried to wrap the actual while True loop into its own function but it didn't work for me.
def create_url():
try:
mariadb_connection = mariadb.connect(host='host
database='db',
user='user',
password='pw')
cursor = mariadb_connection.cursor()
cursor.execute('SELECT type_id from tbl_items')
item_list = cursor.fetchall()
print("Create URL - Record retrieved successfully")
for row in item_list:
url = 'https://someinternet.com/type_id=' + \
str(row[0])
urls.put(url)
return urls
except mariadb.Error as error:
mariadb_connection.rollback()
print("Failed retrieving itemtypes from tbl_items table
{}".format(error))
finally:
if mariadb_connection.is_connected():
cursor.close()
mariadb_connection.close()
def import(urls):
list_mo_esi = []
try:
mariadb_connection = mariadb.connect(host='host',
database='db',
user='user',
password='pw')
cursor = mariadb_connection.cursor()
while True:
s = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
s.mount('https://', HTTPAdapter(max_retries=retries))
jsonraw = s.get(urls.get())
jsondata = ujson.loads(jsonraw.text)
for row in jsondata:
cursor.execute('SELECT order_id from tbl_mo WHERE order_id = %s',
(row['order_id'], ))
exists_mo = cursor.fetchall()
list_mo_esi.append(row['order_id'])
if len(exists_mo) != 0:
print("updating order#", row['order_id'])
cursor.execute('UPDATE tbl_mo SET volume = %s, price = %s WHERE order_id = %s',
(row['volume_remain'], row['price'], row['order_id'], ))
mariadb_connection.commit()
else:
cursor.execute('INSERT INTO tbl_mo (type_id, order_id, ordertype,volume, price) VALUES (%s,%s,%s,%s,%s)',
(row['type_id'], row['order_id'], row['is_buy_order'], row['volume_remain'], row['price'], ))
mariadb_connection.commit()
urls.task_done()
except mariadb.Error as error:
mariadb_connection.rollback()
print("Failed retrieving itemtypes from tbl_items table {}".format(error))
The following finally part of my function is not executed, but should.
finally:
list_mo_purge = list(set(list_mo_sql)-set(list_mo_esi))
cursor.execute('SELECT order_id FROM tbl_mo')
list_mo_sql = cursor.fetchall()
print(len(list_mo_esi))
print(len(list_mo_sql))
if mariadb_connection.is_connected():
cursor.close()
mariadb_connection.close()
main thread
for i in range(num_threads):
worker = Thread(target=import_mo, args=(urls,))
worker.setDaemon(True)
worker.start()
create_url()
urls.join()
After all tasks are completed my worker stop executing code right after urls.task_done().
However, i have some more code after the function urls.task_done() i need to be executed for closing database connection and cleaning up my database from old entries. How can I make this "finally"-part run?
You are not breaking from the while.
You should do the following:
if urls.empty():
break
Most likely your import thread gets blocked at urls.get()
I would like to produce the following set up in Python 3.4, SQLite3 v.3.8.11:
(1) Create an in-memory shared-cache SQLite3 database:
(2) Create one connection that only writes to this DB from one thread
(3) Create multiple connections that concurrently read from this DB from various other threads
This is what I have created to test this:
import time
import zmq
import random
from threading import Thread
import sqlite3
def producer(context):
zmq_socket = context.socket(zmq.PUB)
zmq_socket.bind("inproc://test_pub")
while True:
msg = random.random()
zmq_socket.send(str(msg).encode())
wait_time = random.uniform(0, 0.05)
time.sleep(wait_time)
def subscriber_writer(context):
# Create database connection for writing to memory
write_con = sqlite3.connect('file::memory:?cache=shared', uri=True)
cursor = write_con.cursor()
zmq_socket = context.socket(zmq.SUB)
zmq_socket.connect("inproc://test_pub")
zmq_socket.setsockopt(zmq.SUBSCRIBE, b'')
while True:
msg = float(zmq_socket.recv().decode())
cursor.execute('UPDATE TEST SET Value=? WHERE Key="Val"', [msg])
write_con.commit()
def consumer(context):
# Create database connection for reading from memory in read-only mode
read_con = sqlite3.connect('file::memory:?cache=shared&mode=ro', uri=True)
cursor = read_con.cursor()
while True:
cursor.execute('SELECT Value FROM TEST WHERE Key="Val"')
row = cursor.fetchone()
result = row[0]
print(str(result))
wait_time = random.uniform(0, 0.05)
time.sleep(wait_time)
def main():
# Create context
context = zmq.Context()
# Create database
con = sqlite3.connect('file::memory:?cache=shared', uri=True)
# Create db table
cursor = con.cursor()
cursor.execute('CREATE TABLE TEST(Key TEXT, Value NUMERIC)')
cursor.execute('INSERT INTO TEST VALUES (?,?)', ["Val", 0.00])
con.commit()
Thread(target=subscriber_writer, args=(context,)).start()
Thread(target=producer, args=(context,)).start()
Thread(target=consumer, args=(context,)).start()
if __name__ == '__main__':
main()
This works for a while .....but then I get the following error:
...
0.2504188310554989
0.2504188310554989
0.8038719720740617
0.42408896748682956
0.21361498908206744
0.3404497358396832
0.010459475861968603
0.5070540941748318
0.5070540941748318
0.23151535812095037
0.636881359928549
0.4184038089576615
0.9920311052908629
Exception in thread Thread-3:
Traceback (most recent call last):
File "E:\Python34-64\lib\threading.py", line 911, in _bootstrap_inner
self.run()
File "E:\Python34-64\lib\threading.py", line 859, in run
self._target(*self._args, **self._kwargs)
File "test.py", line 43, in consumer
cursor.execute('SELECT Value FROM TEST WHERE Key="Val"')
sqlite3.OperationalError: database table is locked: TEST
How can I make this work?
As a side note, CREATING ONLY CONNECTION WITH check_same_thread=False and sharing this across the whole process works even when the wait times are eliminated....is this advisable to do instead? See below:
import time
import zmq
import random
from threading import Thread
import sqlite3
def producer(context):
zmq_socket = context.socket(zmq.PUB)
zmq_socket.bind("inproc://test_pub")
while True:
msg = random.random()
zmq_socket.send(str(msg).encode())
# wait_time = random.uniform(0, 0.05)
# time.sleep(wait_time)
def subscriber_writer(context, con):
zmq_socket = context.socket(zmq.SUB)
zmq_socket.connect("inproc://test_pub")
zmq_socket.setsockopt(zmq.SUBSCRIBE, b'')
cursor = con.cursor()
while True:
msg = float(zmq_socket.recv().decode())
cursor.execute('UPDATE TEST SET Value=? WHERE Key="Val"', [msg])
def consumer(context, con):
cursor = con.cursor()
while True:
cursor.execute('SELECT Value FROM TEST WHERE Key="Val"')
row = cursor.fetchone()
result = row[0]
print(str(result))
# wait_time = random.uniform(0, 0.05)
# time.sleep(wait_time)
def main():
# Create context
context = zmq.Context()
# Create database
con = sqlite3.connect('file::memory:?cache=shared', uri=True, isolation_level=None, check_same_thread=False)
# Create db table
cursor = con.cursor()
cursor.execute('CREATE TABLE TEST(Key TEXT, Value NUMERIC)')
cursor.execute('INSERT INTO TEST VALUES (?,?)', ["Val", 0.00])
Thread(target=subscriber_writer, args=(context, con)).start()
Thread(target=producer, args=(context,)).start()
Thread(target=consumer, args=(context, con)).start()
if __name__ == '__main__':
main()
I am newbie in python, so, it looks like my first project on that lang.
Everytime when I'm trying to run my script - I get different answers from mysql server.
The most frequent answer is OperationalError: (2006, 'MySQL server has gone away')
Sometimes I get output Thread: 11 commited (see code below).
And sometimes emergency stop (traslated, I have russian output in console).
Whatever if output full of commited - records in table still the same.
import MySQLdb
import pyping
import socket, struct
from threading import Thread
def ip2int(addr):
"""Convert ip to integer"""
return struct.unpack("!I", socket.inet_aton(addr))[0]
def int2ip(addr):
"""Convert integer to ip"""
return socket.inet_ntoa(struct.pack("!I", addr))
def ping(ip):
"""Pinging client"""
request = pyping.ping(ip, timeout=100, count=1)
return int(request.max_rtt)
class UpdateThread(Thread):
def __init__(self, records, name):
Thread.__init__(self)
self.database = MySQLdb.connect(host="***", port=3306, user="root", passwd="***", db="dns")
self.cursor = database.cursor()
self.name = name
self.records = records
def run(self):
print(self.name)
for r in self.records:
#latency = ping(int2ip(r[1])) what the hell :x
#ip = str(int2ip(r[1]))
id = str(r[0])
self.cursor.execute("""update clients set has_subn=%s where id=%s""" % (id, id))
self.database.commit()
print(self.name + " commited")
#start
database = MySQLdb.connect(host="***", port=3306, user="root", passwd="***", db="dns")
cursor = database.cursor()
cursor.execute("""select * from clients""")
data = cursor.fetchall() #All records from DataBase
count = len(data)
threads_counter = 10 #We are creating 10 threads for all records
th_count = count / threads_counter #Count of records for each thread
last_thread = count % threads_counter #Last records
threads = []
i = 0
while i < (count - last_thread):
temp_list = data[i:(i+th_count)]
#print(temp_list)
threads.append(UpdateThread(records = temp_list, name = "Thread: " + str((i/3) + 1)).start())
i += th_count
threads.append(UpdateThread(records = data[i: count], name = "Thread: 11").start())
P.S.
Another answers I found here is not helping me.
UPD:
I found that some(everytime another) thread print
OperationalError: (2013, 'Lost connection to MySQL server during query') and all next threads print OperationalError: (2013, 'Lost connection to MySQL server during query')
You need to close your DB connections when you're done with them or else the DB server will become overwhelmed and make your connections expire. For your program, I would change your code so that you have only one DB connection. You can pass a reference to it to your UpdateThread instances and close it when you're done.
database.close()