import pyodbc
import time
connection = pyodbc.connect(..............)
cursor = connection.cursor()
while True:
time.sleep(1)
cursor.execute(INSERT_QUERY)
cursor.commit()
that works. but suddenly I got an exception pyodbc.Error: ('08S01', '[08S01] [Microsoft][ODBC SQL Server Driver]Communication link failure (0) (SQLExecDirectW)')
Why is that ?Why the link suddenly disconnect ? How can I handle that exeption and re-connect? how can I fix that?
By googling the error code, it means the connection failed for some reason for another.
You might want to add retry/reconnection logic for that case; crudely something like this.
import pyodbc
import time
connection = None
while True:
time.sleep(1)
if not connection: # No connection yet? Connect.
connection = pyodbc.connect("..............")
cursor = connection.cursor()
try:
cursor.execute(INSERT_QUERY)
cursor.commit()
except pyodbc.Error as pe:
print("Error:", pe)
if pe.args[0] == "08S01": # Communication error.
# Nuke the connection and retry.
try:
connection.close()
except:
pass
connection = None
continue
raise # Re-raise any other exception
Related
I am trying to achieve the same thing as in earlier question psycopg2: How to execute vacuum postgresql query in python script; however, the recommendation to open an autocommit connection includes a link which is broken.
The below code runs without error BUT the table is not vacuumed.
How does this need to be written to call the Vacuum Full correctly?
#!/usr/bin/python
import psycopg2
from config import config
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
conn.autocommit=1
# create a cursor
cur = conn.cursor()
# execute Vacuum Full
cur.execute('Vacuum Full netsuite_display')
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
I'm using Python with asnycpg to interact with my PostgreSQL database.
After some time, if I don't interact with it and then try to do so, I get an connection is closed error. Is this a server side config or client side config issue?
How to solve it?
The database automatically closes the connection due to security reasons.
So I suggest you to open the connection to db just before running queries with asyncpg, and then to reclose the connection right afterwards.
Furthermore, you can manage the possible errors that you get when the connection is closed by properly rising exceptions.
Take a look at this example:
Import asyncpg
print("I'm going to run a query with asyncpg")
# if the connection to db is not opened, then open it
if not connection:
# try to open the connection to db
try:
connection = await asyncpg.connect(
host=YOUR_DATABASE_HOST,
user=YOUR_DATABASE_USER,
password=YOUR_DATABASE_PASS,
database=YOUR_DATABASE_DB_NAME
)
except (Exception, asyncpg.ConnectionFailureError) as error:
Print("Error while connecting to db: {}".format(error))
else:
#connection already up and running
pass
QUERY_STRING = """
INSERT INTO my_table(field_1, field_2)
VALUES ($1, $2);
"""
try:
await connection.execute(QUERY_STRING, value_to_assign_to_field_1, value_to_assign_to_field_2)
return None
# except (Exception, asyncpg.UniqueViolationError) as integrError:
# print("You are violating unique constraint.")
except (Exception, asyncpg.ConnectionFailureError) as error:
print("Connection to db has failed. Cannot add data.")
return "{}".format(error)
finally:
if (connection):
await Utils.close_connection(connection)
print("data has been added. closing the connection.")
I am writing a Python script that will read data from a SQL Server database. For this I have used pyodbc to connect to SQL Server on Windows (my driver is ODBC Driver 17 for SQL Server).
My script works fine, but I need to use a connection pool instead of a single connection to manage resources more effectively. However the documentation for pyodbc only mentions pooling without providing examples of how connection pooling can be implemented. Any ideas of how this can be done using Python while connecting to an SQL Server? I only found solutions for PostgreSQL that use psycopg2, but this does not work for me obviously.
At the moment my code looks like this (please disregard the missing indentation which happened when copying the file from my IDE):
def get_limited_rows(size):
try:
server = 'here-is-IP-address-of-servier'
database = 'here-is-my-db-name'
username = 'here-is-my-username'
password = 'here-is-my-password'
conn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+password)
cursor = conn.cursor()
print('Connected to database')
select_query = 'select APPN, APPD from MAIN'
cursor.execute(select_query)
while True:
records = cursor.fetchmany(size)
if not records:
cursor.close()
sys.exit("Completed")
else:
for record in records:
print(record)
time.sleep(10)
except pyodbc.Error as error:
print('Error reading data from table', error)
finally:
if (conn):
conn.close()
print('Data base connection closed')
I am trying to delete records from a postgresql table using psycopg2 from pyspark code. But I am getting error. Not sure what is wrong. Thanks in advance
def delete_records(table,city_list,key):
connection = None
try:
connection = psycopg2.connect(host=host,
database=db,
user=user,
password=password)
cursor = connection.cursor()
delete_query = "Delete from " +table+ " where "+key+" in "+ str(tuple(city_list))
cursor.execute(delete_query)
connection.commit()
logger.debug("Record deleted successfully")
except (Exception, psycopg2.DatabaseError) as error :
logger.error("%s transction error Reverting all other operations of a transction ", error)
connection.rollback()
finally:
if connection is not None:
cursor.close()
connection.close()
logger.debug("PostgreSQL connection is closed")
delete_records(table_name,city_list,"id")
Error
'NoneType' object has no attribute 'rollback
Please help. Thanks in advance
It looks like an error is probably happening in the first line of your try, so the connection is still None by the time you get to the except.
Like you mention in the comments, adding if connection is not None: to the except block sounds like a good idea.
You probably want to figure out what the logger is saying about the error so that you can troubleshoot, so you may want something like this:
except (Exception, psycopg2.DatabaseError) as error :
logger.error("%s transction error Reverting all other operations of a transction ", error)
if connection is not None:
connection.rollback()
According to http://docs.sqlalchemy.org/en/rel_0_9/core/pooling.html#disconnect-handling-pessimistic, SQLAlchemy can be instrumented to reconnect if an entry in the connection pool is no longer valid. I create the following test case to test this:
import subprocess
from sqlalchemy import create_engine, event
from sqlalchemy import exc
from sqlalchemy.pool import Pool
#event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
cursor = dbapi_connection.cursor()
try:
print "pinging server"
cursor.execute("SELECT 1")
except:
print "raising disconnect error"
raise exc.DisconnectionError()
cursor.close()
engine = create_engine('postgresql://postgres#localhost/test')
connection = engine.connect()
subprocess.check_call(['psql', str(engine.url), '-c',
"select pg_terminate_backend(pid) from pg_stat_activity " +
"where pid <> pg_backend_pid() " +
"and datname='%s';" % engine.url.database],
stdout=subprocess.PIPE)
result = connection.execute("select 'OK'")
for row in result:
print "Success!", " ".join(row)
But instead of recovering I receive this exception:
sqlalchemy.exc.OperationalError: (OperationalError) terminating connection due to administrator command
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
Since "pinging server" is printed on the terminal it seems safe to conclude that the event listener is attached. How can SQLAlchemy be taught to recover from a disconnect?
It looks like the checkout method is only called when you first get a connection from the pool (eg your connection = engine.connect() line)
If you subsequently lose your connection, you will have to explicitly replace it, so you could just grab a new one, and retry your sql:
try:
result = connection.execute("select 'OK'")
except sqlalchemy.exc.OperationalError: # may need more exceptions here
connection = engine.connect() # grab a new connection
result = connection.execute("select 'OK'") # and retry
This would be a pain to do around every bit of sql, so you could wrap database queries using something like:
def db_execute(conn, query):
try:
result = conn.execute(query)
except sqlalchemy.exc.OperationalError: # may need more exceptions here (or trap all)
conn = engine.connect() # replace your connection
result = conn.execute(query) # and retry
return result
The following:
result = db_execute(connection, "select 'OK'")
Should now succeed.
Another option would be to also listen for the invalidate method, and take some action at that time to replace your connection.