Lets have a look at the next snippet -
#event.listens_for(Pool, "checkout")
def check_connection(dbapi_con, con_record, con_proxy):
cursor = dbapi_con.cursor()
try:
cursor.execute("SELECT 1") # could also be dbapi_con.ping(),
# not sure what is better
except exc.OperationalError, ex:
if ex.args[0] in (2006, # MySQL server has gone away
2013, # Lost connection to MySQL server during query
2055): # Lost connection to MySQL server at '%s', system error: %d
# caught by pool, which will retry with a new connection
raise exc.DisconnectionError()
else:
raise
engine = create_engine('mysql://user:puss123#10.0.51.5/dbname', pool_recycle = 3600,pool_size=10, listeners=[check_connection])
session_factory = sessionmaker(bind = engine, autoflush=True, autocommit=False)
db_session = session_factory()
...
some code that may take several hours to run
...
db_session.execute('SELECT * FROM ' + P_TABLE + " WHERE id = '%s'" % id)
I thought that registering the checkout_connection function under the checkout event would solve it but it didnt
now the question is how am i suppose to tell SQLAlchemy handle connection dropouts so every time i call execute() it will check if connection is available and if not it will initiate it once again?
----UPDATE----
The version of SQLAlchemy is 0.7.4
----UPDATE----
def checkout_listener(dbapi_con, con_record, con_proxy):
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError as exc:
if exc.args[0] in (2006, 2013, 2014, 2045, 2055):
raise DisconnectionError()
else:
raise
engine = create_engine(CONNECTION_URI, pool_recycle = 3600,pool_size=10)
event.listen(engine, 'checkout', checkout_listener)
session_factory = sessionmaker(bind = engine, autoflush=True, autocommit=False)
db_session = session_factory()
session_factory is sent to every newly created thread
class IncidentProcessor(threading.Thread):
def __init__(self, queue, session_factory):
if not isinstance(queue, Queue.Queue):
raise TypeError, "first argument should be of %s" (type(Queue.Queue))
self.queue = queue
self.db_session = scoped_session(session_factory)
threading.Thread.__init__(self)
def run(self):
self.db_session().execute('SELECT * FROM ...')
...
some code that takes alot of time
...
self.db_session().execute('SELECT * FROM ...')
now when execute runs after a big period of time i get the "MySQL server has gone away" error
There was a talk about this, and this doc describes the problem pretty nicely, so I used their recommended approach to handle such errors: http://discorporate.us/jek/talks/SQLAlchemy-EuroPython2010.pdf
It looks something like this:
from sqlalchemy import create_engine, event
from sqlalchemy.exc import DisconnectionError
def checkout_listener(dbapi_con, con_record, con_proxy):
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError as exc:
if exc.args[0] in (2006, 2013, 2014, 2045, 2055):
raise DisconnectionError()
else:
raise
db_engine = create_engine(DATABASE_CONNECTION_INFO,
pool_size=100,
pool_recycle=3600)
event.listen(db_engine, 'checkout', checkout_listener)
Try the pool_recycle argument to create_engine.
From the documentation:
Connection Timeouts
MySQL features an automatic connection close behavior, for connections
that have been idle for eight hours or more. To circumvent having this
issue, use the pool_recycle option which controls the maximum age of
any connection:
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
You can try something like this:
while True:
try:
db_session.execute('SELECT * FROM ' + PONY_TABLE + " WHERE id = '%s'" % incident_id)
break
except SQLAlchemyError:
db_session.rollback()
If the connection has go away, this will raise an exception, the session will be rollbackd, it'll try again is likely to succeed.
Related
I'm writing some Python code to connect to our Snowflake Database and getting an error in my Python (I'm new to Python so i've likely done something wrong here).
I'm getting <class 'NameError'> TestConnection.py 98 inside my script, which has the relevant parts below
def connection(obj):
try:
global cur, engine
cur = obj.cursor_connection().cursor()
engine = obj.engine_connection().engine.connect()
except Exception as e:
print(e)
sql = "select current_warehouse(), current_database(), current_schema();"
try:
print("Cursor connection successful")
except Exception as e:
print(e)
try:
print("Engine connection successful")
except Exception as e:
print(e)
return cur, engine
try:
#Setup Connection with both cursor and the engine
db, schema= login.db, login.schema
obj = connect(db, schema)
cur , engine = connection(obj)
The line I'm getting the error on is the cur,engine = connection(obj) part.
I had a previous error before (UnboundLocalError) but putting global cur, engine inside the connection function fixed that, but getting NameError now.
What am I doing wrong here?
Thanks
try this
import snowflake.connector
ctx = snowflake.connector.connect(
user='<user_name>',
password='<password>',
account='<account_identifier>'
)
cs = ctx.cursor()
Ok, I've fixed this now, it was indentation on the functions and class.
I ended up with
class connect:
def __init__
def cursor_connection
def engine_connection
def connection
and that has it working whereas having def_connection tabbed in was breaking it.
Im using python 3 and i have this loop, which iterate on this list -
self.settings["db"]["host"] = ["db-0", "db-1"]
My problem is that it seems to send in return self.conn the first option all the time,
db-0 instead of trying with db-1
I have 2 db container servers, and when i stop one of them - for example db-0 it should try psycopg2.connect with db-1
def db_conn(self):
try:
for server in self.settings["db"]["host"]:
self.conn = psycopg2.connect(
host=server,
user=self.settings["db"]["user"],
password=self.settings["db"]["password"],
database=self.settings["db"]["database"],
connect_timeout=5,
)
return self.conn
except Exception:
pass
if loop has not succeeded i dont want it to return self.conn, only if the try worked.
I also tried :
def db_conn(self):
try:
for server in self.settings["db"]["host"]:
self.conn = psycopg2.connect(
host=server,
user=self.settings["db"]["user"],
password=self.settings["db"]["password"],
database=self.settings["db"]["database"],
connect_timeout=5,
)
except Exception:
pass
return self.conn
You are looping within a try.
Do it the other way around,
push the try down within the loop.
The DbC contract the current code is attempting to offer
is to return a valid connection.
Let's make that more explicit, by writing a very simple helper.
We will spell out its contract in a docstring.
def first_true(iterable, default=False, pred=None):
# from https://docs.python.org/3/library/itertools.html
return next(filter(pred, iterable), default)
def get_conn_or_none(self, server):
"""Returns DB connection to server, or None if that's not possible."""
try:
return psycopg2.connect(
host=server,
user=self.settings["db"]["user"],
password=self.settings["db"]["password"],
database=self.settings["db"]["database"],
connect_timeout=5,
)
except Exception:
return None
Now db_conn is simply:
def db_conn(self):
return first_true(map(self.get_conn_or_none, self.settings["db"]["host"]))
That uses the same logic as in your question.
You may want to have db_conn additionally check whether
all connection attempts failed, and raise fatal error in that case.
BTW, it's very odd that you're apparently storing a list of server hostnames
in self.settings["db"]["host"], given that an individual user / pw / db is
stored in the other fields.
Consider renaming that list to self.servers.
You can try to check the connection status before returning it:
def db_conn(self):
try:
for server in self.settings["db"]["host"]:
self.conn = psycopg2.connect(
host=server,
user=self.settings["db"]["user"],
password=self.settings["db"]["password"],
database=self.settings["db"]["database"],
connect_timeout=5,
)
**if self.conn.isOk()**
return self.conn
except Exception:
pass
This code worked for me:
def db_conn(self):
for server in self.settings["db"]["host"]:
try:
self.print_info("TRYING", server)
self.conn = psycopg2.connect(
host=server,
user=self.settings["db"]["user"],
password=self.settings["db"]["password"],
database=self.settings["db"]["database"],
)
except:
self.print_info("SERVER DOWN", server)
continue
return self.conn
continue will continue the rest of the code if i get exception ( failed connection)
then it goes back to for loop with the second item in list.
According to http://docs.sqlalchemy.org/en/rel_0_9/core/pooling.html#disconnect-handling-pessimistic, SQLAlchemy can be instrumented to reconnect if an entry in the connection pool is no longer valid. I create the following test case to test this:
import subprocess
from sqlalchemy import create_engine, event
from sqlalchemy import exc
from sqlalchemy.pool import Pool
#event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
cursor = dbapi_connection.cursor()
try:
print "pinging server"
cursor.execute("SELECT 1")
except:
print "raising disconnect error"
raise exc.DisconnectionError()
cursor.close()
engine = create_engine('postgresql://postgres#localhost/test')
connection = engine.connect()
subprocess.check_call(['psql', str(engine.url), '-c',
"select pg_terminate_backend(pid) from pg_stat_activity " +
"where pid <> pg_backend_pid() " +
"and datname='%s';" % engine.url.database],
stdout=subprocess.PIPE)
result = connection.execute("select 'OK'")
for row in result:
print "Success!", " ".join(row)
But instead of recovering I receive this exception:
sqlalchemy.exc.OperationalError: (OperationalError) terminating connection due to administrator command
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
Since "pinging server" is printed on the terminal it seems safe to conclude that the event listener is attached. How can SQLAlchemy be taught to recover from a disconnect?
It looks like the checkout method is only called when you first get a connection from the pool (eg your connection = engine.connect() line)
If you subsequently lose your connection, you will have to explicitly replace it, so you could just grab a new one, and retry your sql:
try:
result = connection.execute("select 'OK'")
except sqlalchemy.exc.OperationalError: # may need more exceptions here
connection = engine.connect() # grab a new connection
result = connection.execute("select 'OK'") # and retry
This would be a pain to do around every bit of sql, so you could wrap database queries using something like:
def db_execute(conn, query):
try:
result = conn.execute(query)
except sqlalchemy.exc.OperationalError: # may need more exceptions here (or trap all)
conn = engine.connect() # replace your connection
result = conn.execute(query) # and retry
return result
The following:
result = db_execute(connection, "select 'OK'")
Should now succeed.
Another option would be to also listen for the invalidate method, and take some action at that time to replace your connection.
Error OperationalError: (OperationalError) (2006, 'MySQL server has gone away') i'm already received this error when i coded project on Flask, but i cant understand why i get this error.
I have code (yeah, if code small and executing fast, then no errors) like this \
db_engine = create_engine('mysql://root#127.0.0.1/mind?charset=utf8', pool_size=10, pool_recycle=7200)
Base.metadata.create_all(db_engine)
Session = sessionmaker(bind=db_engine, autoflush=True)
Session = scoped_session(Session)
session = Session()
# there many classes and functions
session.close()
And this code returns me error 'MySQL server has gone away', but return it after some time, when i use pauses in my script.
Mysql i use from openserver.ru (it's web server like such as wamp).
Thanks..
Looking at the mysql docs, we can see that there are a bunch of reasons why this error can occur. However, the two main reasons I've seen are:
1) The most common reason is that the connection has been dropped because it hasn't been used in more than 8 hours (default setting)
By default, the server closes the connection after eight hours if nothing has happened. You can change the time limit by setting the wait_timeout variable when you start mysqld
I'll just mention for completeness the two ways to deal with that, but they've already been mentioned in other answers:
A: I have a very long running job and so my connection is stale. To fix this, I refresh my connection:
create_engine(conn_str, pool_recycle=3600) # recycle every hour
B: I have a long running service and long periods of inactivity. To fix this I ping mysql before every call:
create_engine(conn_str, pool_pre_ping=True)
2) My packet size is too large, which should throw this error:
_mysql_exceptions.OperationalError: (1153, "Got a packet bigger than 'max_allowed_packet' bytes")
I've only seen this buried in the middle of the trace, though often you'll only see the generic _mysql_exceptions.OperationalError (2006, 'MySQL server has gone away'), so it's hard to catch, especially if logs are in multiple places.
The above doc say the max packet size is 64MB by default, but it's actually 16MB, which can be verified with SELECT ##max_allowed_packet
To fix this, decrease packet size for INSERT or UPDATE calls.
SQLAlchemy now has a great write-up on how you can use pinging to be pessimistic about your connection's freshness:
http://docs.sqlalchemy.org/en/latest/core/pooling.html#disconnect-handling-pessimistic
From there,
from sqlalchemy import exc
from sqlalchemy import event
from sqlalchemy.pool import Pool
#event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
# optional - dispose the whole pool
# instead of invalidating one at a time
# connection_proxy._pool.dispose()
# raise DisconnectionError - pool will try
# connecting again up to three times before raising.
raise exc.DisconnectionError()
cursor.close()
And a test to make sure the above works:
from sqlalchemy import create_engine
e = create_engine("mysql://scott:tiger#localhost/test", echo_pool=True)
c1 = e.connect()
c2 = e.connect()
c3 = e.connect()
c1.close()
c2.close()
c3.close()
# pool size is now three.
print "Restart the server"
raw_input()
for i in xrange(10):
c = e.connect()
print c.execute("select 1").fetchall()
c.close()
from documentation you can use pool_recycle parameter:
from sqlalchemy import create_engine
e = create_engine("mysql://scott:tiger#localhost/test", pool_recycle=3600)
I just faced the same problem, which is solved with some effort. Wish my experience be helpful to others.
Fallowing some suggestions, I used connection pool and set pool_recycle less than wait_timeout, but it still doesn't work.
Then, I realized that global session maybe just use the same connection and connection pool didn't work. To avoid global session, for each request generate a new session which is removed by Session.remove() after processing.
Finally, all is well.
One more point to keep in mind is to manually push the flask application context with database initialization. This should resolve the issue.
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
app = Flask(__name__)
with app.app_context():
db.init_app(app)
https://docs.sqlalchemy.org/en/latest/core/pooling.html#disconnect-handling-optimistic
def sql_read(cls, sql, connection):
"""sql for read action like select
"""
LOG.debug(sql)
try:
result = connection.engine.execute(sql)
header = result.keys()
for row in result:
yield dict(zip(header, row))
except OperationalError as e:
LOG.info("recreate pool duo to %s" % e)
connection.engine.pool.recreate()
result = connection.engine.execute(sql)
header = result.keys()
for row in result:
yield dict(zip(header, row))
except Exception as ee:
LOG.error(ee)
raise SqlExecuteError()
I came across PHP way of doing the trick:
my_bool reconnect = 1;
mysql_options(&mysql, MYSQL_OPT_RECONNECT, &reconnect);
but no luck with MySQLdb (python-mysql).
Can anybody please give a clue? Thanks.
I solved this problem by creating a function that wraps the cursor.execute() method since that's what was throwing the MySQLdb.OperationalError exception. The other example above implies that it is the conn.cursor() method that throws this exception.
import MySQLdb
class DB:
conn = None
def connect(self):
self.conn = MySQLdb.connect()
def query(self, sql):
try:
cursor = self.conn.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
cursor = self.conn.cursor()
cursor.execute(sql)
return cursor
db = DB()
sql = "SELECT * FROM foo"
cur = db.query(sql)
# wait a long time for the Mysql connection to timeout
cur = db.query(sql)
# still works
I had problems with the proposed solution because it didn't catch the exception. I am not sure why.
I have solved the problem with the ping(True) statement which I think is neater:
import MySQLdb
con=MySQLdb.Connect()
con.ping(True)
cur=con.cursor()
Got it from here: http://www.neotitans.com/resources/python/mysql-python-connection-error-2006.html
If you are using ubuntu Linux there was a patch added to the python-mysql package that added the ability to set that same MYSQL_OPT_RECONNECT option (see here). I have not tried it though.
Unfortunately, the patch was later removed due to a conflict with autoconnect and transations (described here).
The comments from that page say:
1.2.2-7 Published in intrepid-release on 2008-06-19
python-mysqldb (1.2.2-7) unstable; urgency=low
[ Sandro Tosi ]
* debian/control
- list items lines in description starts with 2 space, to avoid reformat
on webpages (Closes: #480341)
[ Bernd Zeimetz ]
* debian/patches/02_reconnect.dpatch:
- Dropping patch:
Comment in Storm which explains the problem:
# Here is another sad story about bad transactional behavior. MySQL
# offers a feature to automatically reconnect dropped connections.
# What sounds like a dream, is actually a nightmare for anyone who
# is dealing with transactions. When a reconnection happens, the
# currently running transaction is transparently rolled back, and
# everything that was being done is lost, without notice. Not only
# that, but the connection may be put back in AUTOCOMMIT mode, even
# when that's not the default MySQLdb behavior. The MySQL developers
# quickly understood that this is a terrible idea, and removed the
# behavior in MySQL 5.0.3. Unfortunately, Debian and Ubuntu still
# have a patch right now which *reenables* that behavior by default
# even past version 5.0.3.
I needed a solution that works similarly to Garret's, but for cursor.execute(), as I want to let MySQLdb handle all escaping duties for me. The wrapper module ended up looking like this (usage below):
#!/usr/bin/env python
import MySQLdb
class DisconnectSafeCursor(object):
db = None
cursor = None
def __init__(self, db, cursor):
self.db = db
self.cursor = cursor
def close(self):
self.cursor.close()
def execute(self, *args, **kwargs):
try:
return self.cursor.execute(*args, **kwargs)
except MySQLdb.OperationalError:
self.db.reconnect()
self.cursor = self.db.cursor()
return self.cursor.execute(*args, **kwargs)
def fetchone(self):
return self.cursor.fetchone()
def fetchall(self):
return self.cursor.fetchall()
class DisconnectSafeConnection(object):
connect_args = None
connect_kwargs = None
conn = None
def __init__(self, *args, **kwargs):
self.connect_args = args
self.connect_kwargs = kwargs
self.reconnect()
def reconnect(self):
self.conn = MySQLdb.connect(*self.connect_args, **self.connect_kwargs)
def cursor(self, *args, **kwargs):
cur = self.conn.cursor(*args, **kwargs)
return DisconnectSafeCursor(self, cur)
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
disconnectSafeConnect = DisconnectSafeConnection
Using it is trivial, only the initial connect differs. Extend the classes with wrapper methods as per your MySQLdb needs.
import mydb
db = mydb.disconnectSafeConnect()
# ... use as a regular MySQLdb.connections.Connection object
cursor = db.cursor()
# no more "2006: MySQL server has gone away" exceptions now
cursor.execute("SELECT * FROM foo WHERE bar=%s", ("baz",))
you can separate the commit and the close for the connection...that's not cute but it does it.
class SqlManager(object):
"""
Class that handle the database operation
"""
def __init__(self,server, database, username, pswd):
self.server = server
self.dataBase = database
self.userID = username
self.password = pswd
def Close_Transation(self):
"""
Commit the SQL Query
"""
try:
self.conn.commit()
except Sql.Error, e:
print "-- reading SQL Error %d: %s" % (e.args[0], e.args[1])
def Close_db(self):
try:
self.conn.close()
except Sql.Error, e:
print "-- reading SQL Error %d: %s" % (e.args[0], e.args[1])
def __del__(self):
print "close connection with database.."
self.conn.close()
I had a similar problem with MySQL and Python, and the solution that worked for me was to upgrade MySQL to 5.0.27 (on Fedora Core 6; your system may work fine with a different version).
I tried a lot of other things, including patching the Python libraries, but upgrading the database was a lot easier and (I think) a better decision.
In addition to Liviu Chircu solution ... add the following method to DisconnectSafeCursor:
def __getattr__(self, name):
return getattr(self.cursor, name)
and the original cursor properties like "lastrowid" will keep working.
You other bet it to work around dropped connections yourself with code.
One way to do it would be the following:
import MySQLdb
class DB:
conn = None
def connect(self):
self.conn = MySQLdb.connect()
def cursor(self):
try:
return self.conn.cursor()
except (AttributeError, MySQLdb.OperationalError):
self.connect()
return self.conn.cursor()
db = DB()
cur = db.cursor()
# wait a long time for the Mysql connection to timeout
cur = db.cursor()
# still works