Does teardown_appcontext ignore HTTPExceptions? - python

So, I'm trying to rollback the database session in case of an HTTP error like a bad_request, unauthorized, forbidden, or not_found happens.
it is a serverless application with wsgi and flask.
The scenario is: I create an entry to be saved in the database, but if something wrong happens, I want it to roll_back the session.
If, I raise an exception, the rollback happens, but if I use abort(make_response(jsonify(message=message, **kwargs), 400)) an HTTPException is raised, but the teardown_appcontext kind of ignores it.
I also tried application.config['PRESERVE_CONTEXT_ON_EXCEPTION'] = True #and false too but it didn't solve my problem.
In my app:
def database(application, engine=None):
sqlalchemy_url = os.environ.get('SQLALCHEMY_URL')
set_session(sqlalchemy_url, engine=engine)
#application.teardown_appcontext
def finish_session(exception=None):
commit_session(exception)
def commit_session(exception=None):
if exception:
_dbsession.rollback()
else:
_dbsession.commit()
_dbsession.remove()
if hasattr(_engine, 'dispose'):
_engine.dispose()
And here, the function that is called if I want to return an bad_request response. The abort function raises an HTTPException that is ignored by the teardown function
def badrequest(message='bad request.', **kwargs):
abort(make_response(jsonify(message=message, **kwargs), 400))
I want the teardown_appcontext to recognize the HTTPException too, not only an Exception. In this way, if the abort function is called, the rollback will be done.

I think this is because teardown_appcontext called when the request context is popped. An exception was init in context of request. You can rollback session using errorhandler() or register_error_handler(). Here is an example:
from flask import Flask, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from werkzeug.exceptions import BadRequest
app = Flask(__name__)
app.config.update(dict(SQLALCHEMY_DATABASE_URI='...'))
db = SQLAlchemy(app)
class Node(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False)
#app.errorhandler(BadRequest)
def handle_bad_request(e):
db.session.rollback()
return 'session has been rolled back!', 400
#app.teardown_appcontext
def finish_session(exception=None):
if not exception:
db.session.commit()
#app.route('/bad-node')
def bad():
# add into session without commit and abort(see: handle_bad_request)
db.session.add(Node(name='bad node'))
abort(400)
#app.route('/good-node')
def good():
# without exceptions - see: finish_session
db.session.add(Node(name='good node'))
return '<good node> was saved'
#app.route('/nodes')
def all_nodes():
# just list of items from db
return jsonify([i.name for i in Node.query.all()])
if __name__ == '__main__':
db.create_all()
db.session.commit()
app.run(debug=True)
Open /good-node and /bad-node a few times. After that open /nodes you will see that 'bad nodes' were not saved(was rollback).
Hope this helps.

Related

RuntimeError: working outside of request context Flask Session

I am getting RuntimeError: working outside of request context error while running a test in Flask. I've tried multiple suggestions from other threads, but none has worked for me.
Part of my views.py:
#user_app.route('/login', methods =('GET', 'POST'))
def login():
form = LoginForm()
error = None
if form.validate_on_submit():
user = User.objects.filter(username=form.username.data).first()
if user:
if bc.hashpw(form.password.data, user.password) == user.password:
session['username']=form.username.data
return 'User Logged In'
else:
user = None
if not user:
error = 'Incorrect credentials'
return render_template('user/login.html',form=form,error=error)
Relevant part of my tests.py:
from application import create_app as create_app_base
def create_app(self):
self.db_name = 'flaskbook_test'
return create_app_base(
MONGODB_SETTINGS={'DB':self.db_name},
TESTING=True,
WTF_CSRF_ENABLED=False,
SECRET_KEY='SecretKey'
)
def setUp(self):
self.app_factory = self.create_app()
self.app = self.app_factory.test_client()
#self.app.application.app_context().push() <-- this did not help
def tearDown(self):
db = _get_db()
db.client.drop_database(db)
def test_login_user(self):
#create user
self.app.post('/register', data=self.user_dict())
#login user
rv = self.app.post('/login',data=dict(
username='username',
password='password'
))
#check session is set
with self.app as c:
rv = c.get('/')
assert session.get('username') == self.user_dict()['username']
I have already tried adding app_context and self.app.application.app_context().push() as mentioned above:
with self.app.application.app_context():
assert session.get('username') == self.user_dict()['username']
But it didn't work. Whenever I call session['username'] I get RuntimeError: working outside of request context.
My requirements.txt: Flask0.10.1 Flask-Script 2.0.5 flask-mongoengine 0.7.4
Please help.
What you want is the request context, not the app context.
Flask includes some handy functions to push a request context for you - check out the Flask testing docs and you'll see a lot of relevant info, including the test_request_context method on the app object.
Combine that with app.test_client to push a request context and then simulate client behaviour such as POSTing to your endpoint. Try this:
with self.app.test_request_context('/'), self.app.test_client() as c:
rv = c.post('/')
assert session.get('username') == self.user_dict()['username']

FastApi Sqlalchemy how to manage transaction (session and multiple commits)

I have a CRUD with insert and update functions with commit at the end of the each one as follows:
#staticmethod
def insert(db: Session, item: Item) -> None:
db.add(item)
db.commit()
#staticmethod
def update(db: Session, item: Item) -> None:
...
db.commit()
I have an endpoint which receives a sqlalchemy session from a FastAPI dependency and needs to insert and update atomically (DB transaction).
What's the best practice when working with transactions? I can't work with the CRUD since it does more than one commit.
How should I handle the transactions? Where do you commit your session? in the CRUD? or only once in the FastAPI dependency function for each request?
I had the same problem while using FastAPI. I couldn't find a way to use commit in separate methods and have them behave transactionally.
What I ended up doing was a flush instead of the commit, which sends the changes to the db, but doesn't commit the transaction.
One thing to note, is that in FastAPI every request opens a new session and closes it once its done. This would be a rough example of what is happening using the example in the SQLAlchemy docs.
def run_my_program():
# This happens in the `database = SessionLocal()` of the `get_db` method below
session = Session()
try:
ThingOne().go(session)
ThingTwo().go(session)
session.commit()
except:
session.rollback()
raise
finally:
# This is the same as the `get_db` method below
session.close()
The session that is generated for the request is already a transaction. When you commit that session what is actually doing is this
When using the Session in its default mode of autocommit=False, a new transaction will be begun immediately after the commit, but note that the newly begun transaction does not use any connection resources until the first SQL is actually emitted.
In my opinion after reading that it makes sense handling the commit and rollback at the endpoint scope.
I created a dummy example of how this would work. I use everything form the FastAPI guide.
def create_user(db: Session, user: UserCreate):
"""
Create user record
"""
fake_hashed_password = user.password + "notreallyhashed"
db_user = models.User(email=user.email, hashed_password=fake_hashed_password)
db.add(db_user)
db.flush() # Changed this to a flush
return db_user
And then use the crud operations in the endpoint as follows
from typing import List
from fastapi import Depends, HTTPException
from sqlalchemy.orm import Session
...
def get_db():
"""
Get SQLAlchemy database session
"""
database = SessionLocal()
try:
yield database
finally:
database.close()
#router.post("/users", response_model=List[schemas.User])
def create_users(user_1: schemas.UserCreate, user_2: schemas.UserCreate, db: Session = Depends(get_db)):
"""
Create two users
"""
try:
user_1 = crud.create_user(db=db, user=user_1)
user_2 = crud.create_user(db=db, user=user_2)
db.commit()
return [user_1, user_2]
except:
db.rollback()
raise HTTPException(status_code=400, detail="Duplicated user")
In the future I might investigate moving this to a middleware, but I don't think that using commit you can get the behavior you want.
A more pythonic approach is to let a context manager perform a commit or rollback depending on whether or not there was an exception.
A Transaction is a nice abstraction of what we are trying to accomplish.
class Transaction:
def __init__(self, session: Session = Depends(get_session)):
self.session = session
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
# rollback and let the exception propagate
self.session.rollback()
return False
self.session.commit()
return True
And, use it in your APIs, like so:
def some_api(tx: Transaction = Depends(Transaction)):
with tx:
ThingOne().go()
ThingTwo().go()
No need to pass session to ThingOne and ThingTwo. Inject it into them, like so:
class ThingOne:
def __init__(self, session: Session = Depends(get_session)):
...
class ThingTwo:
def __init__(self, session: Session = Depends(get_session)):
...
I would also inject ThingOne and ThingTwo in the APIs as well:
def some_api(tx: Transaction = Depends(Transaction),
one: ThingOne = Depends(ThingOne),
two: ThingTwo = Depends(ThingTwo)):
with tx:
one.go()
two.go()

Flask+Pytest+SQLAlchemy: Can't create and drop tables when run pytest using flask-sqlalchemy

when I run tests It succeeds to connect to the database, but it does not create tables. I think maybe there is a different way to create tables when I use flask-sqlalchemy, but I can't find the solution.
This is app.py
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__, template_folder='templates')
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(config_name)
app.register_blueprint(api)
db.init_app(app)
#app.route('/ping')
def health_check():
return jsonify(dict(ok='ok'))
#app.errorhandler(404)
def ignore_error(err):
return jsonify()
app.add_url_rule('/urls', view_func=Shorty.as_view('urls'))
return app
This is run.py
environment = environ['TINY_ENV']
config = config_by_name[environment]
app = create_app(config)
if __name__ == '__main__':
app.run()
This is config.py
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
"""
set Flask configuration vars
"""
# General config
DEBUG = True
TESTING = False
# Database
SECRET_KEY = os.environ.get('SECRET_KEY', 'my_precious_secret_key')
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root#localhost:3306/tiny'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SERVER_HOST = 'localhost'
SERVER_PORT = '5000'
class TestConfig(Config):
"""
config for test
"""
TESTING = True
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root#localhost:3306/test_tiny'
config_by_name = dict(
test=TestConfig,
local=Config
)
key = Config.SECRET_KEY
This is models.py
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class URLS(db.Model):
__tablename__ = 'urls'
id = db.Column(db.Integer, primary_key=True)
original_url = db.Column(db.String(400), nullable=False)
short_url = db.Column(db.String(200), nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow()
This is test config setting.
db = SQLAlchemy()
#pytest.fixture(scope='session')
def app():
test_config = config_by_name['test']
app = create_app(test_config)
app.app_context().push()
return app
#pytest.fixture(scope='session')
def client(app):
return app.test_client()
#pytest.fixture(scope='session')
def init_db(app):
db.init_app(app)
db.create_all()
yield db
db.drop_all()
The following might be the problem that is preventing your code from running multiple times and/or preventing you from dropping/creating your tables. Regardless if it solves your problem, it is something one might not be aware of and quite important to keep in mind. :)
When you are running your tests multiple times, db.drop_all() might not be called (because one of your tests failed) and therefore, it might not be able to create the tables on the next run (since they are already existing). The problem lies in using a context manager without a try: finally:. (NOTE: Every fixture using yield is a context manager).
from contextlib import contextmanager
def test_foo(db):
print('begin foo')
raise RuntimeError()
print('end foo')
#contextmanager
def get_db():
print('before')
yield 'DB object'
print('after')
This code represents your code, but without using the functionality of pytest. Pytest is running it more or less like
try:
with get_db(app) as db:
test_foo(db)
except Exception as e:
print('Test failed')
One would expect an output similar to:
before
begin_foo
after
Test failed
but we only get
before
begin_foo
Test failed
While the contextmanager is active (yield has been executed), our test method is running. If an exception is raised during the execution of our test function, the execution is stopped WITHOUT running any code after the yield statement. To prevent this, we have to wrap our fixture/contextmanager in a try: ... finally: block. As finally is ALWAYS executed regardless of what has happened.
#contextmanager
def get_db():
print('before')
try:
yield 'DB object'
finally:
print('after')
The code after the yield statement is now executed as expected.
before
begin foo
after
Test failed
If you want to learn more, see the relevant section in the contextmanager docs:
At the point where the generator yields, the block nested in the with statement is
executed. The generator is then resumed after the block is exited. If an unhandled
exception occurs in the block, it is reraised inside the generator at the point
where the yield occurred. Thus, you can use a try…except…finally statement to trap
the error (if any), or ensure that some cleanup takes place.

Flask-SQLAlchemy fails to update data in while in thread

I am building an app where users will occasionally initiate a longer-running process. While running, the process will commit updates to a database entry.
Since the process takes some time, I am using the threading module to execute it. But values updated while in the thread are never actually committed.
An example:
from flask import Flask, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
import time, threading, os
if os.path.exists('test.db'): os.remove('test.db')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.Integer)
def __init__(self, value): self.value = value
db.create_all()
item = Item(1)
db.session.add(item)
db.session.commit()
#app.route('/go', methods=['GET'])
def go():
def fun(item):
time.sleep(2)
item.value += 1
db.session.commit()
thr = threading.Thread(target=fun, args=(item,))
# thr.daemon = True
thr.start()
return redirect(url_for('view'))
#app.route('/view', methods=['GET'])
def view(): return str(Item.query.get(1).value)
app.run(host='0.0.0.0', port=8080, debug=True)
My expectation was that the item's value would be asynchronously updated after two seconds (when the fun completes), and that additional requests to /view would reveal the updated value. But this never occurs. I am not an expert on what is going on in the threading module; am I missing something?
I have tried setting thr.daemon=True as pointed out in some posts; but that is not it. The closest SO post I have found is this one; that question does not have a minimal and verifiable example and has not been answered.
I guess this is due to the fact that sessions are local threaded, as mentioned in the documentation. In your case, item was created in one thread and then passed to a new thread to be modified directly.
You can either use scoped sessions as suggested in the documentation, or simply change your URI config to bypass this behavior:
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db?check_same_thread=False'
After some debugging I figured out a solution; though I still do not understand the problem. It has to do with referencing a variable for the database object. If fun updates an object returned by a query, it works as expected:
def fun(item_id):
time.sleep(2)
Item.query.get(item_id).value += 1
db.session.commit()
In context:
from flask import Flask, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
import time, threading, os
if os.path.exists('test.db'): os.remove('test.db')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.Integer)
def __init__(self, value): self.value = value
db.create_all()
item = Item(1)
db.session.add(item)
db.session.commit()
#app.route('/go', methods=['GET'])
def go():
def fun(item_id):
time.sleep(2)
Item.query.get(item_id).value += 1
db.session.commit()
thr = threading.Thread(target=fun, args=(item.id,))
# thr.daemon = True
thr.start()
return redirect(url_for('view'))
#app.route('/view', methods=['GET'])
def view(): return str(Item.query.get(1).value)
app.run(host='0.0.0.0', port=8080, debug=True)
I would be very pleased to hear from anyone knows what exactly is going on here!

Fall back to Flask's default session manager when session database is down?

I'm using SqlAlchemy for my session management database, and have created a custom SessionInterface as per the documentation.
My dev database went down today, and now I cannot access my site, as would be expected. Is there a way for me to fall back to Flask's default session manager in this event?
Here is my current implementation of SessionInterface
class SqlAlchemySessionInterface(SessionInterface):
#...
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if sid:
# error is raised here when database is down
stored_session = DBSession.query.filter_by(sid=sid).first()
# ...
I have a naive solution to the problem of a crashed database, that leverages a in memory dict as a backup:
# A backup memory storage for sessions
backup = {}
class SqlAlchemySessionInterface(SessionInterface):
#...
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if sid:
try:
stored_session = DBSession.query.filter_by(sid=sid).first()
except DatabaseError:
stored_session = backup.get('sid')
# ...
You can extend a default secure cookie session interface implementation.
class ReliableSessionInterface(SecureCookieSessionInterface):
def open_session(self, app, request):
try:
return self._open_db_session(app, request)
except DatabaseError:
return super().open_session(app, request)
def save_session(app, session, response):
try:
self._save_session_to_db(app, session, response)
except DatabaseError:
super().save_session(app, session, response)
However, such requirement sounds a little bit strange.

Categories

Resources