I'm trying to separate the Read and write DB operations via Flask Sqlalchemy. I'm using binds to connect to the mysql databases. I would want to perform the write operation in Master and Reads from slaves. There does not seem to be an in built way to handle this.
I'm new to python and was surprised that a much needed functionality like this is not pre-built into flask-sqlalchemy already. Any help is appreciated. Thanks
There is no official support, but you can customize Flask-SQLalchemy session to use master slave connects
from functools import partial
from sqlalchemy import orm
from flask import current_app
from flask_sqlalchemy import SQLAlchemy, get_state
class RoutingSession(orm.Session):
def __init__(self, db, autocommit=False, autoflush=True, **options):
self.app = db.get_app()
self.db = db
self._bind_name = None
orm.Session.__init__(
self, autocommit=autocommit, autoflush=autoflush,
bind=db.engine,
binds=db.get_binds(self.app),
**options,
)
def get_bind(self, mapper=None, clause=None):
try:
state = get_state(self.app)
except (AssertionError, AttributeError, TypeError) as err:
current_app.logger.info(
'cant get configuration. default bind. Error:' + err)
return orm.Session.get_bind(self, mapper, clause)
# If there are no binds configured, use default SQLALCHEMY_DATABASE_URI
if not state or not self.app.config['SQLALCHEMY_BINDS']:
return orm.Session.get_bind(self, mapper, clause)
# if want to user exact bind
if self._bind_name:
return state.db.get_engine(self.app, bind=self._bind_name)
else:
# if no bind is used connect to default
return orm.Session.get_bind(self, mapper, clause)
def using_bind(self, name):
bind_session = RoutingSession(self.db)
vars(bind_session).update(vars(self))
bind_session._bind_name = name
return bind_session
class RouteSQLAlchemy(SQLAlchemy):
def __init__(self, *args, **kwargs):
SQLAlchemy.__init__(self, *args, **kwargs)
self.session.using_bind = lambda s: self.session().using_bind(s)
def create_scoped_session(self, options=None):
if options is None:
options = {}
scopefunc = options.pop('scopefunc', None)
return orm.scoped_session(
partial(RoutingSession, self, **options),
scopefunc=scopefunc,
)
Than the default session will be master, when you want to select from slave you can call it directly, here the examples:
In your app:
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///master'
app.config['SQLALCHEMY_BINDS'] = {
'slave': 'postgresql:///slave'
}
db = RouteSQLAlchemy(app)
Select from master
session.query(User).filter_by(id=1).first()
Select from slave
session.using_bind('slave').query(User).filter_by(id=1).first()
Here is the documentation http://packages.python.org/Flask-SQLAlchemy/binds.html
Related
I am currently working on a flask app in which you can have multiple databases connected to it.
Each request to the app should be handled by a certain database depending on the url.
I am now trying to replace flask-sqlalchemy with sqlalchemy in order to use scoped-session to take care of my problem.
I have a session_registry in order to store the sessions:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
class SessionRegistry(object):
_registry = {}
def get_database_connection(self, name, **kwargs):
return self._registry[name]
def add_database_connection(self, url, name, **kwargs):
if url not in self._registry:
engine = create_engine(url)
Session = sessionmaker(bind=engine)
session = scoped_session(Session)
self._registry[name] = session
return True if self._registry[name] is not None else Fals
The problem that I have now is, that I don't know how to pass it to my routes in order to use that session. Here is an example class where I am trying to use it:
class SomeJob():
def get(self, lim=1000, order="asc"):
if order == "desc":
result = session.query(SomeModel).order_by(
SomeModel.id.desc()).limit(lim).all()
else:
result = session.query(SomeModel).order_by(
SomeModel.id.asc()).limit(lim).all()
# deserialize to json
schemaInstance = SomeSchema(many=True)
json_res = schemaInstance.dump(result)
# return json
return json_res
My question now is, how do I pass that session to the object properly?
I am trying to access a pyodbc connection from multiple places within my code.
Currently the user passes in the connection details via command line, and I process them using plac.annotation, problem is, I do not know how to share this object across all the project. What I have so far is:
A Singleton Class for storing the connection
class DatabaseInstance:
"""
Singleton Class holding a Database Connection
"""
class __DatabaseInstance:
def __init__(self, server, database, schema, table, user, password):
self.server = server
self.database = database
self.schema = schema
self.table = table
self.username = user
self.passw = password
def __str__(self):
return "{} DB: {}#[{}].[{}].[{}] # {}".format(
repr(self),
self.server,
self.database,
self.schema,
self.table,
self.username,
)
def get_connection(self):
"""
TODO
"""
if DatabaseInstance.connection:
return DatabaseInstance.connection
else:
DatabaseInstance.connection = pyodbc.connect(
"DRIVER=SQL Server;SERVER="
+ self.server
+ ";PORT=1433;DATABASE="
+ self.database
+ ";UID="
+ self.username
+ ";PWD="
+ self.passw
)
return DatabaseInstance.connection
instance = None
connection = None
def __init__(self, server, database, schema, table, user, password):
if not DatabaseInstance.instance:
DatabaseInstance.instance = DatabaseInstance.__DatabaseInstance(
server, database, schema, table, user, password
)
def __getattr__(self, name):
return getattr(self.instance, name)
Now, In my main, I get the params, and create an instance for the database:
connection = DatabaseInstance(
server=server,
database=database,
schema=schema,
table=table,
user=user,
password=passw,
)
The application needs to access this object from different modules and submodules, but connection is withing the scope of a function.
Is there a better way to do it than just passing down this object from function to function up until it is used by the necessary functions?
Don't instantiate the singleton in your main. Instantiate DatabaseInstance in your module as db, and when you need access to the singleton, reach into the module and use it.
import .thatmodule
db = thatmodule.db.get_connection()
It is perfectly fine to instantiate in your main and pass it into code that needs access to the database, to answer your final question. It makes for an uglier API, though.
Both are fine. The choice is up to you.
I am trying to access access application configuration inside a blueprint authorisation.py which in a package api. I am initializing the blueprint in __init__.py which is used in authorisation.py.
__init__.py
from flask import Blueprint
api_blueprint = Blueprint("xxx.api", __name__, None)
from api import authorisation
authorisation.py
from flask import request, jsonify, current_app
from ..oauth_adapter import OauthAdapter
from api import api_blueprint as api
client_id = current_app.config.get('CLIENT_ID')
client_secret = current_app.config.get('CLIENT_SECRET')
scope = current_app.config.get('SCOPE')
callback = current_app.config.get('CALLBACK')
auth = OauthAdapter(client_id, client_secret, scope, callback)
#api.route('/authorisation_url')
def authorisation_url():
url = auth.get_authorisation_url()
return str(url)
I am getting RuntimeError: working outside of application context
I understand why that is but then what is the correct way of accessing those configuration settings?
----Update----
Temporarily, I have done this.
#api.route('/authorisation_url')
def authorisation_url():
client_id, client_secret, scope, callback = config_helper.get_config()
auth = OauthAdapter(client_id, client_secret, scope, callback)
url = auth.get_authorisation_url()
return str(url)
Use flask.current_app in place of app in the blueprint view.
from flask import current_app
#api.route("/info")
def get_account_num():
num = current_app.config["INFO"]
The current_app proxy is only available in the context of a request.
Overloading record method seems to be quite easy:
api_blueprint = Blueprint('xxx.api', __name__, None)
api_blueprint.config = {}
#api_blueprint.record
def record_params(setup_state):
app = setup_state.app
api_blueprint.config = dict([(key,value) for (key,value) in app.config.iteritems()])
To build on tbicr's answer, here's an example overriding the register method example:
from flask import Blueprint
auth = None
class RegisteringExampleBlueprint(Blueprint):
def register(self, app, options, first_registration=False):
global auth
config = app.config
client_id = config.get('CLIENT_ID')
client_secret = config.get('CLIENT_SECRET')
scope = config.get('SCOPE')
callback = config.get('CALLBACK')
auth = OauthAdapter(client_id, client_secret, scope, callback)
super(RegisteringExampleBlueprint,
self).register(app, options, first_registration)
the_blueprint = RegisteringExampleBlueprint('example', __name__)
And an example using the record decorator:
from flask import Blueprint
from api import api_blueprint as api
auth = None
# Note there's also a record_once decorator
#api.record
def record_auth(setup_state):
global auth
config = setup_state.app.config
client_id = config.get('CLIENT_ID')
client_secret = config.get('CLIENT_SECRET')
scope = config.get('SCOPE')
callback = config.get('CALLBACK')
auth = OauthAdapter(client_id, client_secret, scope, callback)
Blueprints have register method which called when you register blueprint. So you can override this method or use record decorator to describe logic which depends from app.
The current_app approach is fine but you must have some request context. If you don't have one (some pre-work like testing, e.g.) you'd better place
with app.test_request_context('/'):
before this current_app call.
You will have RuntimeError: working outside of application context , instead.
You either need to import the main app variable (or whatever you have called it) that is returned by Flask():
from someplace import app
app.config.get('CLIENT_ID')
Or do that from within a request:
#api.route('/authorisation_url')
def authorisation_url():
client_id = current_app.config.get('CLIENT_ID')
url = auth.get_authorisation_url()
return str(url)
You could also wrap the blueprint in a function and pass the app as an argument:
Blueprint:
def get_blueprint(app):
bp = Blueprint()
return bp
Main:
from . import my_blueprint
app.register_blueprint(my_blueprint.get_blueprint(app))
I know this is an old thread. But while writing a flask service, I used a method like this to do it. It's longer than the solutions above but it gives you the possibility to use customized class yourself. And frankly, I like to write services like this.
Step 1:
I added a struct in a different module file where we can make the class structs singleton. And I got this class structure from this thread already discussed. Creating a singleton in Python
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
else:
cls._instances[cls].__init__(*args, **kwargs)
return cls._instances[cls]
Step 2:
Then I created a Singleton EnvironmentService class from our Singleton class that we defined above, just for our purpose. Instead of recreating such classes, create them once and use them in other modules, routes, etc. import. We can access the class with the same reference.
from flask import Config
from src.core.metaclass.Singleton import Singleton
class EnvironmentService(metaclass=Singleton):
__env: Config = None
def initialize(self, env):
self.__env = env
return EnvironmentService()
def get_all(self):
return self.__env.copy()
def get_one(self, key):
return self.__env.get(key)
Step 3:
Now we include the service in the application in our project root directory. This process should be applied before the routes.
from flask import Flask
from src.services.EnvironmentService import EnvironmentService
app = Flask(__name__)
# Here is our service
env = EnvironmentService().initialize(app.config)
# Your routes...
Usage:
Yes, we can now access our service from other routes.
from src.services.EnvironmentService import EnvironmentService
key = EnvironmentService().get_one("YOUR_KEY")
I'm trying to use pytest to write functional tests for a Flask application which interfaces with a Neo4j graph database via the Neo4j driver.
The toy example using the Movie Database is outlined below using a route which deletes a record from the database. For testing purposes I would like to wrap the session in a transaction which would be rolled-back rather than committed.
The application has routes which runs Cypher statements within an auto-commit transaction via, session.run(...), however I can circumvent this logic during testing by enforcing the use of a transaction prior to request,
session.begin_transaction()
...
session.rollback_transaction()
My question is I'm unsure how to leverage this pattern using pytest. Do I have to somehow bind the database to the client? Also is there a fixture I can use which will ensure that every test leveraging the client will be wrapped in a transaction which can be rolled back?
myapp/app.py:
from flask import _app_ctx_stack, Flask, Response
from flask_restplus import Api, Resource
from neo4j.v1 import GraphDatabase
class FlaskGraphDatabase(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
#app.teardown_appcontext
def teardown(exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'neo4j_session'):
ctx.neo4j_session.close()
if hasattr(ctx, 'neo4j_driver'):
ctx.neo4j_driver.close()
#property
def driver(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'neo4j_driver'):
ctx.neo4j_driver = GraphDatabase.driver('bolt://localhost:7687')
return ctx.neo4j_driver
#property
def session(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'neo4j_session'):
ctx.neo4j_session = self.driver.session()
return ctx.neo4j_session
api = Api()
gdb = FlaskGraphDatabase()
#api.route('/<source>/acted_in/<target>')
class Friend(Resource):
def delete(self, source, target):
statement = """
MATCH (s:Person)-[r:ACTED_IN]->(t:Movie)
WHERE s.name = {source} AND t.title = {target}
DELETE r
"""
cursor = gdb.session.run(statement, source=source, target=target)
status = 204 if cursor.summary().counters.contains_updates else 404
return Response(status=status)
def create_app():
app = Flask(__name__)
gdb.init_app(app)
api.init_app(app)
return app
if __name__ == '__main__':
app = create_app()
app.run()
tests/conftest.py:
import pytest
from myapp.app import create_app
#pytest.yield_fixture(scope='session')
def app():
yield create_app()
#pytest.yield_fixture(scope='session')
def client(app):
with app.test_client() as client:
yield client
tests/test_routes.py:
def test_delete(client):
res = client.delete('/Keanu Reeves/acted_in/The Matrix')
assert res.status_code == 204
Yes you can use a fixture to achieve this : add an autouse fixture with session scope in your conftest.py which will start a transaction at the begining of the test session and roll it back at the end.
tests/conftest.py:
from neomodel import db
#pytest.fixture(autouse=True, scope="session")
def setup_neo_test_db():
print("Initializing db transaction")
db.begin()
yield
print("Rolling back")
db.rollback()
I'm experiencing a similar problem to Passing application context to custom converter using the Application Factory pattern where I'm using a custom URL converter for converting a Neo4j graph database ID into a node object, i.e.,
import atexit
from flask import Flask
from neo4j.v1 import GraphDatabase
from werkzeug.routing import BaseConverter
class NodeConverter(BaseConverter):
def to_python(self, value):
with driver.session() as session:
cursor = session.run('MATCH (n {id: $id}) RETURN n', id=value)
return cursor.single().values()[0]
app = Flask(__name__)
app.url_map.converters['node'] = NodeConverter
driver = GraphDatabase.driver('bolt://localhost')
atexit.register(lambda driver=driver: driver.close())
#app.route('/<node:node>')
def test(node):
print node
if __name__ == '__main__':
app.run()
Though this approach leverages a single database connection there are a couple of major drawbacks: i) the database connection cannot be configured via the Flask config, and ii) if the database fails so does the Flask app.
To counter this I created a local extension per http://flask.pocoo.org/docs/0.12/extensiondev/, i.e.,
from flask import _app_ctx_stack, Flask
from neo4j.v1 import GraphDatabase
from werkzeug.routing import BaseConverter
class MyGraphDatabase(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
#app.teardown_appcontext
def teardown(exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'driver'):
ctx.driver.close()
#property
def driver(self):
ctx = _app_ctx_stack.top
if ctx is not None and not hasattr(ctx, 'driver'):
ctx.driver = GraphDatabase.driver(app.config['NEO4J_URI'])
return ctx.driver
class NodeConverter(BaseConverter):
def to_python(self, value):
with app.app_context():
with db.driver.session() as session:
cursor = session.run('MATCH (n {id: $id}) RETURN n', id=value)
return cursor.single().values()[0]
db = MyGraphDatabase()
app = Flask(__name__)
app.config.from_pyfile('app.cfg')
app.url_map.converters['node'] = NodeConverter
db.init_app(app)
#app.route('/<node:node>')
def test(node):
print node
if __name__ == '__main__':
app.run()
This issue is given that the URL converter is outside of the app context I needed to include the following block,
with app.app_context():
...
where a temporary app context is created during URL parsing and then discarded immediately which seems suboptimal from a performance perspective. Is this the correct approach for this?
The other problem with this configuration is that one needs to be cognizant of potential circular references when the converter and application reside in different files since the NodeConverter requires the app and the app registers the NodeConverter.