schema_translate_map for SQLALchemy AsyncSession - python

I need to change PostgreSQL schema in SQLAlchemy AsyncSession session.
For sync Session we have session.connection(execution_options={"schema_translate_map": {None: schema}})
For async I found a way to do it: MyModel.__table__.schema = "MySchema, but it will change the model in runtime which is really bad for async code.
Is there something like schema_translate_map for AsyncSession?

I had the same exact problem and the way I fix it is doing like this:
from asyncio import current_task
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_scoped_session
from sqlalchemy.orm import sessionmaker
async def main()
db_connstring = "postgresql+asyncpg://scott:tiger#localhost/test"
engine = create_async_engine(db_connstring, pool_pre_ping=True)
session = async_scoped_session(
sessionmaker(bind=engine, expire_on_commit=False, class_=AsyncSession),
scopefunc=current_task)
schema = "my_schema"
connection = await session.connection()
await connection.execution_options(schema_translate_map={None: schema})
...

Related

SQLAlchemy dynamic schema-names does not work if session is returned from try/except or with-block

I am trying to use multiple schemas in SQLAlchemy.
Somehow it does not work properly when using context manager (with-block) or try/except-block.
This is my short execution example:
def import_units(schema_name: str):
from taxes.models import Tax
dbs = get_db_session(schema_name)
dbs.add(Tax(percentage=19.00, abbreviation="abc"))
dbs.commit() ## during commit I get ERROR:
## sqlalchemy.exc.ProgrammingError: (psycopg2.errors.UndefinedTable) relation "per_user.taxes" does not exist
## LINE 1: INSERT INTO per_user.taxes (percentage, abbreviation, de...
Here is how I generate the session and pass it with and without blocks:
def get_db_session(schema_name: str):
dbs = SessionLocal()
dbs.connection(execution_options={"schema_translate_map": {'per_user': schema_name}})
return dbs ## this return WORKS (translates into right schema name)
try:
return dbs ## this does NOT work (no translation, uses 'per_user' as schema name)
finally:
dbs.close()
with SessionLocal() as s:
s.connection(execution_options={"schema_translate_map": {'per_user': schema_name}})
return s ## this does NOT work (no translation, uses 'per_user' as schema name)
If the imports and session creation can give you some data, here are they too.
from config.settings import SQL_DATABASE_URI
import warnings
from sqlalchemy import create_engine, exc
from sqlalchemy.orm import sessionmaker
engine = create_engine(SQL_DATABASE_URI, pool_pre_ping=True, future=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine, future=True)
warnings.filterwarnings("always", category=exc.RemovedIn20Warning)

FastAPI database dependency setup for connection pooling

Consider the following fastapi setup:
application.add_event_handler(
"startup",
create_start_app_handler(application, settings),
)
def create_start_app_handler(
app: FastAPI,
settings: AppSettings,
) -> Callable:
async def start_app() -> None:
await connect_to_db(app, settings)
return start_app
async def connect_to_db(app: FastAPI, settings: AppSettings) -> None:
db_url = settings.DATABASE_URL
engine = create_engine(db_url, pool_size=settings.POOL_SIZE, max_overflow=settings.MAX_OVERFLOW)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
db = SessionLocal()
def close_db():
db.close()
engine.dispose()
app.state.db = db
app.state.close_db = close_db
close_db is used to close the database connection on app shutdown
I have the following dependencies defined:
def _get_db(request: Request) -> Generator:
yield request.app.state.db
def get_repository(
repo_type: Type[BaseRepository],
) -> Callable[[Session], BaseRepository]:
def _get_repo(
sess: Session = Depends(_get_db),
) -> BaseRepository:
return repo_type(sess)
return _get_repo
Would this still allow me to take advantage of connection pooling?
Also, this feels a little hacky and I could use some feedback if there's anything in particular that I should not be doing.
To be blunt; it seems overly complicated for something that is pretty well documented in the docs.
In your case, you create only 1 instance of SessionLocal() and will share that across all your requests (because you store it in the app.state). In other words: no this will not be using connection pooling, it will use only 1 connection.
A better approach is to yield an instance per request, either via middleware or via a dependency. That way, the connection is actually closed when the incoming request has been fully handled. For example, like this:
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
#app.get("/")
def root(db: SessionLocal = Depends(get_db)):
return "hello world"
I am not sure how you ended up where you ended up, but I would recommend to refactor a bunch.

Cache or Reuse Mongodb connection in AWS lambda using Python

I'm building a serverless application using Python and Mongodb. In documentation I found that I need to write db connection outside handler function. I have used Mangum python package as adapter to handle API gateway.
from fastapi import FastAPI, Body, status, Depends
from mangum import Mangum
from motor.motor_asyncio import AsyncIOMotorClient
from fastapi.responses import JSONResponse
from app.utility.config import MONGODB_URL, MAX_CONNECTIONS_COUNT, MIN_CONNECTIONS_COUNT, MAX_DB_THREADS_WAIT_COUNT, MAX_DB_THREAD_QUEUE_TIMEOUT_COUNT
application= FastAPI()
client = AsyncIOMotorClient(str(MONGODB_URL),
maxPoolSize=MAX_CONNECTIONS_COUNT,
minPoolSize=MIN_CONNECTIONS_COUNT,
waitQueueMultiple = MAX_DB_THREADS_WAIT_COUNT,
waitQueueTimeoutMS = MAX_DB_THREAD_QUEUE_TIMEOUT_COUNT )
async def get_database() -> AsyncIOMotorClient:
return client
#application.post("/createStudent")
async def create_student(student = Body(...), db: AsyncIOMotorClient = Depends(get_database)):
new_student = await db["college"]["students"].insert_one(student)
created_student = await db["college"]["students"].find_one({"_id": new_student.inserted_id})
return JSONResponse(status_code=status.HTTP_201_CREATED, content=created_student)
#application.post("/createTeacher")
async def create_teacher(teacher = Body(...), db: AsyncIOMotorClient = Depends(get_database)):
new_teacher = await db["college"]["students"].insert_one(teacher)
created_teacher = await db["college"]["students"].find_one({"_id": new_teacher.inserted_id})
return JSONResponse(status_code=status.HTTP_201_CREATED, content=created_teacher)
handler = Mangum(application)
For every API request, new connection is created. How to cache db so that new request uses old connection. Every time new request is created so that lambda compute time is increased dramatically after db hits max connection limit.
There are examples for node js but for python I could not find anywhere

fast api depends SQL connection

I'm new in fast api. I'm trying to get user from my table but getting this error:
user = await db.query(User).filter(User.login == data['login']).first()
AttributeError: 'Depends' object has no attribute 'query'
here is my server/database.py
import os
from sqlmodel import SQLModel, Session
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
DATABASE_URL = os.environ.get("DATABASE_URL")
engine = create_async_engine(DATABASE_URL, echo = True, future = True)
async def init_db():
async with engine.begin() as conn:
# await conn.run_sync(SQLModel.metadata.drop_all)
await conn.run_sync(SQLModel.metadata.create_all) # init is ok, db table working
pass
async def get_session() -> AsyncSession:
async_session = sessionmaker(
engine, class_ = AsyncSession,
expire_on_commit = False
)
async with async_session() as session:
yield session
here is server/web_server.py
from fastapi import Depends, FastAPI
from sqlalchemy.future import select
from fastapi_socketio import SocketManager
from fastapi_login import LoginManager
import json
from database import get_session, init_db
from models import User, File
app = FastAPI()
sio = SocketManager(app = app)
#app.on_event("startup")
async def on_startup():
await init_db() # is ok
#app.sio.on('connect')
async def handle_connect(sid, connection_data):
await app.sio.emit('connect', 'User joined') # is ok
#app.sio.on('authorize')
async def authorize(sid, message, db = Depends(get_session)):
data = json.loads(message) # is ok
user = await db.query(User).filter(User.login == data['login']).first() # error in this line
print(user)
if __name__ == '__main__':
import uvicorn
uvicorn.run("web_server:app", host = '0.0.0.0', port = 8000, reload = True, debug = False)
I'm using socketio for my app, socketio.on('authorize') message contains data = {'login' : '...', 'password' : '...'} and I want to use this to find user in my postgresql table to check password and authorize it. So having problems with 'get_session', query, execute and commit not working with it, getting the same error AttributeError: 'Depends' object has no attribute ...

Stranger error in SqlAlchemy. Bug?

I tried to use Flask-Restful + SqlAlchemy (with Automap) + MySql SGDB but I don't understand why this error occured in my code:
I sent the request in my controller and it worked normally, but after 10s an error is generated about a loss of connection with a SGDB.
itens = session.query(estados).filter(estados.ativo == True)
But stranger is that if I use SQL string syntax, the problem does not occur.
itens = engine.execute("SELECT `TBEstados`.`id`, `TBEstados`.`nome`, `TBEstados`.`ativo` FROM `intbr_webapp`.`TBEstados`;")
I'm using SqlAlchemy 1.2 but I did try also the 1.1 version. I did try also use the pre-ping=true and I didn't obtain success.
Does someone know anything about this? I don't understand why using ORM structure doesn't work, but with SQL syntax it does work. The connection is same, but the result is not.
My code is below:
estado.py
from flask import jsonify
from flask_restful import Resource
from json import dumps
from resources.database import Base, session, engine
#from resources.dataEncoder import JsonModel
from models.TBEstados import TBEstadosSchema
class Estados(Resource):
def get(self):
estados = Base.classes.TBEstados
itens = session.query(estados).filter(estados.ativo == True)
result = TBEstadosSchema(many=True).dump(itens)
return jsonify(result.data)
database.py (imported in EstadoModel)
from flask import Flask, g
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import sessionmaker
from flask_marshmallow import Marshmallow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = my conn string
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.config['SQLALCHEMY_POOL_TIMEOUT'] = 30
Base = automap_base()
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], pool_pre_ping=True)
# reflect the tables
Base.prepare(engine, reflect=True)
Session = sessionmaker(bind=engine)
session = Session()
ma = Marshmallow()
run.py
from flask import Flask, g
from flask_restful import Resource, Api
import resources.database
from controllers.Estados import Estados
app = Flask(__name__)
api = Api(app)
api.add_resource(Estados, '/estados')
if __name__ == '__main__':
app.run(debug=True, port=9002)
The exact error:
sqlalchemy.exc.OperationalError
sqlalchemy.exc.OperationalError: (pymysql.err.OperationalError) (2013, 'Lost connection to MySQL server during query') [SQL: 'SELECT TBEstados.id AS TBEstados_id, TBEstados.nome AS TBEstados_nome, TBEstados.ativo AS TBEstados_ativo \nFROM TBEstados \nWHERE TBEstados.ativo = true']
I solved my problem!
I changed value of the MySql variable wait_timeout of the "10s" to 100s (to test) and error in SqlAlchemy using PyMySql provider doesn't occurred again.
https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_wait_timeout
Thanks for help!

Categories

Resources