Connection problems with SQLAlchemy and multiple processes - python

I'm using PostgreSQL and SQLAlchemy in a project that consists of a main process which launches child processes. All of these processes access the database via SQLAlchemy.
I'm experiencing repeatable connection failures: The first few child processes work correctly, but after a while a connection error is raised. Here's an MWCE:
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, create_engine
from sqlalchemy.orm import sessionmaker
DB_URL = 'postgresql://user:password#localhost/database'
Base = declarative_base()
class Dummy(Base):
__tablename__ = 'dummies'
id = Column(Integer, primary_key=True)
value = Column(Integer)
engine = None
Session = None
session = None
def init():
global engine, Session, session
engine = create_engine(DB_URL)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
def cleanup():
session.close()
engine.dispose()
def target(id):
init()
try:
dummy = session.query(Dummy).get(id)
dummy.value += 1
session.add(dummy)
session.commit()
finally:
cleanup()
def main():
init()
try:
dummy = Dummy(value=1)
session.add(dummy)
session.commit()
p = multiprocessing.Process(target=target, args=(dummy.id,))
p.start()
p.join()
session.refresh(dummy)
assert dummy.value == 2
finally:
cleanup()
if __name__ == '__main__':
i = 1
while True:
print(i)
main()
i += 1
On my system (PostgreSQL 9.6, SQLAlchemy 1.1.4, psycopg2 2.6.2, Python 2.7, Ubuntu 14.04) this yields
1
2
3
4
5
6
7
8
9
10
11
Traceback (most recent call last):
File "./fork_test.py", line 64, in <module>
main()
File "./fork_test.py", line 55, in main
session.refresh(dummy)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 1422, in refresh
only_load_props=attribute_names) is None:
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/orm/loading.py", line 223, in load_on_ident
return q.one()
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2756, in one
ret = self.one_or_none()
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2726, in one_or_none
ret = list(self)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2797, in __iter__
return self._execute_and_instances(context)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2820, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1393, in _handle_dbapi_exception
exc_info
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py", line 202, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/home/vagrant/latest-sqlalchemy/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py", line 469, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
[SQL: 'SELECT dummies.id AS dummies_id, dummies.value AS dummies_value \nFROM dummies \nWHERE dummies.id = %(param_1)s'] [parameters: {'param_1': 11074}]
This is repeatable and always crashes at the same iteration.
I'm creating a new engine and session after the fork as recommended by the SQLAlchemy documentation and elsewhere. Interestingly, the following slightly different approach does not crash:
import contextlib
import multiprocessing
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, create_engine
from sqlalchemy.orm import sessionmaker
DB_URL = 'postgresql://user:password#localhost/database'
Base = declarative_base()
class Dummy(Base):
__tablename__ = 'dummies'
id = Column(Integer, primary_key=True)
value = Column(Integer)
#contextlib.contextmanager
def get_session():
engine = sqlalchemy.create_engine(DB_URL)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
try:
yield session
finally:
session.close()
engine.dispose()
def target(id):
with get_session() as session:
dummy = session.query(Dummy).get(id)
dummy.value += 1
session.add(dummy)
session.commit()
def main():
with get_session() as session:
dummy = Dummy(value=1)
session.add(dummy)
session.commit()
p = multiprocessing.Process(target=target, args=(dummy.id,))
p.start()
p.join()
session.refresh(dummy)
assert dummy.value == 2
if __name__ == '__main__':
i = 1
while True:
print(i)
main()
i += 1
Since the original code is more complex and cannot simply be switched over to the latter version I'd like to understand why one of these works and the other doesn't.
The only obvious difference is that the crashing code uses global variables for the engine and the session -- these are shared via copy-on-write with the child processes. However, since I reset them directly after the fork I don't understand how that could be a problem.
Update
I re-ran the two code pieces with the latest SQLAlchemy (1.1.5) using both Python 2.7 and Python 3.4. On both the results are basically as described above. However, on Python 2.7 the crash of the first code piece now happens in the 13th iteration (reproducibly) while on 3.4 it already happens in the third iteration (also reproducibly). The second code piece runs without problems on both versions. Here's the traceback from 3.4:
1
2
3
Traceback (most recent call last):
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
psycopg2.OperationalError: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "fork_test.py", line 64, in <module>
main()
File "fork_test.py", line 55, in main
session.refresh(dummy)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/orm/session.py", line 1424, in refresh
only_load_props=attribute_names) is None:
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/orm/loading.py", line 223, in load_on_ident
return q.one()
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/orm/query.py", line 2749, in one
ret = self.one_or_none()
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/orm/query.py", line 2719, in one_or_none
ret = list(self)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/orm/query.py", line 2790, in __iter__
return self._execute_and_instances(context)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/orm/query.py", line 2813, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/engine/base.py", line 1393, in _handle_dbapi_exception
exc_info
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/home/vagrant/latest-sqlalchemy-3.4/lib/python3.4/site-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
[SQL: 'SELECT dummies.id AS dummies_id, dummies.value AS dummies_value \nFROM dummies \nWHERE dummies.id = %(param_1)s'] [parameters: {'param_1': 3397}]
Here's the PostgreSQL log (it's the same for 2.7 and 3.4):
2017-01-18 10:59:36 UTC [22429-1] LOG: database system was shut down at 2017-01-18 10:59:35 UTC
2017-01-18 10:59:36 UTC [22429-2] LOG: MultiXact member wraparound protections are now enabled
2017-01-18 10:59:36 UTC [22428-1] LOG: database system is ready to accept connections
2017-01-18 10:59:36 UTC [22433-1] LOG: autovacuum launcher started
2017-01-18 10:59:36 UTC [22435-1] [unknown]#[unknown] LOG: incomplete startup packet
2017-01-18 11:00:10 UTC [22466-1] user#db LOG: SSL error: decryption failed or bad record mac
2017-01-18 11:00:10 UTC [22466-2] user#db LOG: could not receive data from client: Connection reset by peer
(Note that the message about the incomplete startup packet is harmless)

Quoting "How do I use engines / connections / sessions with Python multiprocessing, or os.fork()?" with added emphasis:
The SQLAlchemy Engine object refers to a connection pool of existing database connections. So when this object is replicated to a child process, the goal is to ensure that no database connections are carried over.
and
However, for the case of a transaction-active Session or Connection being shared, there’s no automatic fix for this; an application needs to ensure a new child process only initiate new Connection objects and transactions, as well as ORM Session objects.
The issue stems from the forked child process inheriting the live global session, which is holding on to a Connection. When target calls init, it overwrites the global references to engine and session, thus decreasing their refcounts to 0 in the child, forcing them to finalize. If you for example one way or another create another reference to the inherited session in the child, you prevent it from being cleaned up – but don't do that. After main has joined and returns to business as usual it is trying to use the now potentially finalized – or otherwise out of sync – connection. As to why this causes an error only after some amount of iterations I'm not sure.
The only way to handle this situation using globals the way you do is to
Close all sessions
Call engine.dispose()
before forking. This will prevent connections from leaking to the child. For example:
def main():
global session
init()
try:
dummy = Dummy(value=1)
session.add(dummy)
session.commit()
dummy_id = dummy.id
# Return the Connection to the pool
session.close()
# Dispose of it!
engine.dispose()
# ...or call your cleanup() function, which does the same
p = multiprocessing.Process(target=target, args=(dummy_id,))
p.start()
p.join()
# Start a new session
session = Session()
dummy = session.query(Dummy).get(dummy_id)
assert dummy.value == 2
finally:
cleanup()
Your second example does not trigger finalization in the child, and so it only seems to work, though it might be as broken as the first, as it is still inheriting a copy of the session and its connection defined locally in main.

Related

Abort connection when database is read-only (Flask/SQLAlchemy)

I am facing the following issue:
We have configured failover DB nodes for our staging environment. When testing, sometimes the failover happens and Flask keeps open connections to some nodes which are now read-only -- any write operation then fails:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 608, in do_execute
cursor.execute(statement, parameters)
File "/usr/local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/dbapi2.py", line 210, in execute
return self.trace_sql(self.wrapped_.execute, sql, params)
File "/usr/local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/dbapi2.py", line 244, in _trace_sql
result = method(sql, params)
psycopg2.errors.ReadOnlySqlTransaction: cannot execute DELETE in a read-only transaction
I'd like to detect this somehow and close the connection to these nodes, so that any write operation succeeds. Is this possible?
You can import that error class into your module, and then use it in a try-except block:
from psycopg2.errors import ReadOnlySqlTransaction
try:
# your main stuff here
except ReadOnlySqlTransaction:
# terminate the connection

Cassandra Celery python timeout happens on raw query execution using django db connection execute

My celery is configured for the Cassandra session like this:
def cassandra_init(*args, **kwargs):
""" Initialize a clean Cassandra connection. """
if cql_cluster is not None:
cql_cluster.shutdown()
if cql_session is not None:
cql_session.shutdown()
connection.setup([settings.DATABASES["default"]["HOST"],], settings.DATABASES["default"]["NAME"])
# Initialize worker context (only standard tasks)
worker_process_init.connect(cassandra_init)
When I am executing a raw cassandra query, timeout happens,
from django.db import connection
cursor = connection.cursor()
total_ap = cursor.execute(
"SELECT cpu_info FROM ap_live_stats;")
It works well all over in my django project but not inside the celery tasks.
Error:
[2018-05-09 18:50:21,576: ERROR/ForkPoolWorker-5] Task apps.statistic.tasks.ap_hourly_data_migrator[77a596d4-61a2-43f4-8580-6abc6e9b5866] raised unexpected: OperationTimedOut("errors={'192.168.98.65': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=192.168.98.65",)
Traceback (most recent call last):
File "/home/vkchlt0079/virtuals/wlc-env/lib/python3.5/site-packages/celery/app/trace.py", line 374, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/vkchlt0079/virtuals/wlc-env/lib/python3.5/site-packages/celery/app/trace.py", line 629, in __protected_call__
return self.run(*args, **kwargs)
File "/home/vkchlt0079/projects/wlcd/src/web_gui/backend/django/wlcd/apps/statistic/tasks.py", line 59, in ap_hourly_data_migrator
"SELECT cpu_info FROM ap_live_stats;")
File "/home/vkchlt0079/virtuals/wlc-env/lib/python3.5/site-packages/django_cassandra_engine/utils.py", line 47, in execute
return self.cursor.execute(sql)
File "/home/vkchlt0079/virtuals/wlc-env/lib/python3.5/site-packages/django_cassandra_engine/connection.py", line 12, in execute
return self.connection.execute(*args, **kwargs)
File "/home/vkchlt0079/virtuals/wlc-env/lib/python3.5/site-packages/django_cassandra_engine/connection.py", line 86, in execute
self.session.set_keyspace(self.keyspace)
File "cassandra/cluster.py", line 2448, in cassandra.cluster.Session.set_keyspace (cassandra/cluster.c:48048)
File "cassandra/cluster.py", line 2030, in cassandra.cluster.Session.execute (cassandra/cluster.c:38536)
File "cassandra/cluster.py", line 3844, in cassandra.cluster.ResponseFuture.result (cassandra/cluster.c:80834)
cassandra.OperationTimedOut: errors={'192.168.98.65': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=192.168.98.65
Tried to increase the timeout, but not working and not sure, where it is to be included.
# project/tasks.py
from celery.signals import worker_init
from django.db import connection
#worker_process_init.connect
def connect_db(**kwargs):
connection.reconnect()
This will initiate the db connection required via Django Cassandra engine. Reference

"This session is in 'prepared' state; no further" error with SQLAlchemy using scoped_session in threaded mod_wsgi app

I recently updated to SQLAlchemy 1.1, which I'm using under Django 1.10 (also recently updated from 1.6), and I keep getting sqlalchemy/mysql errors that This session is in 'prepared' state; no further SQL can be emitted within this transaction.
How do I debug this?
It's running in a single process, multi-threaded environment under mod_wsgi - and I'm not sure if I've properly configured SQLAlchemy's scoped_session.
I use a request container that is assigned to each incoming request, which sets up the session and cleans it up. (I'm assuming each request in Django is on it's own thread.)
# scoped_session as a global variable
# I constant errors if pool_size = 20 for some reason
Engine = create_engine(host, pool_recycle=600, pool_size=10, connect_args=options)
Session = scoped_session(sessionmaker(autoflush=True, bind=Engine))
RUNNING_DEVSERVER = (len(sys.argv) > 1 and sys.argv[1] == 'runserver') # Session.remove() fails in dev
# Created in my API, once per request (per thread)
class RequestContainer(object):
def __init__(self, request, *args, **kwargs):
self.s = Session()
def safe_commit(self):
try:
self.s.commit()
except:
self.s.rollback()
raise
def __del__(self):
if self.s:
try:
self.s.commit()
except:
self.s.rollback()
raise
if not RUNNING_DEVSERVER:
Session.remove()
self.s = None
And the prepared state error pops up in the code, usually in the same place, but not all the time, and sometimes in other places:
...
rs = request_container.s.query(MyTable)
...
if rs.count():
# Error log:
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 3011, in count
return self.from_self(col).scalar()
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2765, in scalar
ret = self.one()
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2736, in one
ret = self.one_or_none()
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2706, in one_or_none
ret = list(self)
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2777, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2798, in _execute_and_instances
close_with_result=True)
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2807, in _get_bind_args
**kw
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2789, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 903, in connection
execution_options=execution_options)
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 908, in _connection_for_bind
engine, execution_options)
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 319, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python2.7/dist-packages/SQLAlchemy-1.1.0b3-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 201, in _assert_active
"This session is in 'prepared' state; no further "
InvalidRequestError: This session is in 'prepared' state; no further SQL can be emitted within this transaction.
The RequestContainer was being accidentally assigned to a global API interface handler, causing one session to be mis-used among multiple threads, when it was intended to be created per thread.
Updated to show how how I assign a session to each thread, including tear-down to prevent database commit errors from hanging the session state:
class ThreadSessionRequest(object):
def __init__(self, request, *args, **kwargs):
self.s = Session()
def __del__(self):
if self.s:
self.remove_session()
def remove_session(self):
if self.s:
try:
self.safe_commit()
finally:
Session.remove()
del self.s
self.s = None
def safe_commit(self):
if self.s:
try:
self.s.commit()
except:
self.s.rollback()
raise

query non-deterministically hanging on self._clslevel[target] = collections.deque()

I have a program running the following query via the Sql Alchemy ORM. At this particular point in the code, it is running serially.
//ms sql database setup
engine = create_engine(conn_string, pool_size=pool_size, pool_recycle=3600, echo=False)
engine.execute('set transaction isolation level read uncommitted')
session = scoped_session(sessionmaker(bind=engine, autocommit=autocommit))
//hangs in here
max_id = session.query(func.max(entity.entity_id)).all()[0][0]
Occasionally, the program hangs as if it is deadlocked. Execution does not proceed for hours. I have ruled out any issues in database connection or blocked queries as a periodic stack trace of my main calling thread shows the execution hanging at the following spot (see StackTrace below).
It seems like a dictionary is being updated. If SqlAlchemy were to be trying to do anything fancy with threads underneath the hood, weakref.py uses a WeakValueDictionary from https://docs.python.org/3/library/weakref.html#weakref.WeakValueDictionary, in which self.data is a native python dictionary which should be safe under concurrent access.
What could be causing this intermittent hanging? I am on SQL Alchemy 1.0.12 and Python 3.5.2.
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/orm/query.py", line 2588, in all
return list(self)
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/orm/query.py", line 2732, in __iter__
context = self._compile_context()
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/orm/query.py", line 3180, in _compile_context
if self.dispatch.before_compile:
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/event/base.py", line 288, in __get__
obj.__dict__['dispatch'] = disp = self.dispatch_cls._for_instance(obj)
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/event/base.py", line 110, in _for_instance
return self._for_class(instance_cls)
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/event/base.py", line 106, in _for_class
return self.__class__(self, instance_cls)
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/event/base.py", line 84, in __init__
for ls in parent._event_descriptors
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/event/base.py", line 84, in <genexpr>
for ls in parent._event_descriptors
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/event/attr.py", line 188, in __init__
parent.update_subclass(target_cls)
File "/home/me/py35/lib/python3.5/site-packages/sqlalchemy/event/attr.py", line 125, in update_subclass
self._clslevel[target] = collections.deque()
File "/home/me/py35/lib/python3.5/weakref.py", line 378, in __setitem__
self.data[ref(key, self._remove)] = value

SqlAlchemy+Tornado: can't reconnect until invalid transaction is rolled back

i'm building a webapp with tornado+sqlalchemy and absolutely random i got this error
File "/usr/lib/python3/dist-packages/sqlalchemy/engine/base.py", line 1024, in _handle_dbapi_exception
exc_info
File "/usr/lib/python3/dist-packages/sqlalchemy/util/compat.py", line 187, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=exc_value)
File "/usr/lib/python3/dist-packages/sqlalchemy/util/compat.py", line 182, in reraise
raise value.with_traceback(tb)
File "/usr/lib/python3/dist-packages/sqlalchemy/engine/base.py", line 822, in _execute_context
conn = self._revalidate_connection()
File "/usr/lib/python3/dist-packages/sqlalchemy/engine/base.py", line 239, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.StatementError: Can't reconnect until invalid transaction is rolled back
I can't figure out how to solve this. I've put all db.commit into a
try:
self.db.commit()
except Exception(e):
self.db.rollback()
That's my class Application.
class Application
[...]
engine = create_engine(options.db_path, convert_unicode=True, echo=options.debug)
models.init_db(engine)
self.db = scoped_session(sessionmaker(bind=engine))
tornado.web.Application.__init__(self, handlers, **settings)
but nothing.
What is the best way to configure sqlalchemy and tornado for a web app like mysql+php?
My way is do rollback on finish, add this to your BaseHandler:
def on_finish(self):
if self.get_status() == 500:
self.db_session.rollback()
I remember having same issuess a while ago. Seems there was some strange stuff related to connection pooling. Disabling pooling seemd to fixed it.
Not the best idea in general, but it worked.
Try passing poolclass=NullPool to create_engine
...
from sqlalchemy.pool import NullPool
...
engine = create_engine(options.db_path, convert_unicode=True, echo=options.debug, poolclass=NullPool)

Categories

Resources