Receiving "'hmset' with mapping of length 0" error - python

I want to store my session data on redis dataset. I have set SESSION_ENGINE = 'redis' in settings.py.
Code for redis.py
#redis.py
from django.contrib.sessions.backends.base import SessionBase
from django.utils.functional import cached_property
from redis import Redis
class SessionStore(SessionBase):
#cached_property
def _connection(self):
return Redis(
host='127.0.0.1',
port='6379',
db=0,
decode_responses=True
)
def load(self):
return self._connection.hgetall(self.session_key)
def exists(self, session_key):
return self._connection.exists(session_key)
def create(self):
# Creates a new session in the database.
self._session_key = self._get_new_session_key()
self.save(must_create=True)
self.modified = True
def save(self, must_create=False):
# Saves the session data. If `must_create` is True,
# creates a new session object. Otherwise, only updates
# an existing object and doesn't create one.
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
session_key = self._get_or_create_session_key()
self._connection.hmset(session_key, data)
self._connection.expire(session_key, self.get_expiry_age())
def delete(self, session_key=None):
# Deletes the session data under the session key.
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._connection.delete(session_key)
#classmethod
def clear_expired(cls):
# There is no need to remove expired sessions by hand
# because Redis can do it automatically when
# the session has expired.
# We set expiration time in `save` method.
pass
I am receiving 'hmset' with mapping of length 0 error on accessing http://localhost:8000/admin in django.
After removing SESSION_ENGINE='redis' I am not receiving this error.

From redis documentation:
As per Redis 4.0.0, HMSET is considered deprecated. Please use HSET in new code.
I have replaced this line in save() method:
self._connection.hmset(session_key, data)
with:
self._connection.hset(session_key, 'session_key', session_key, data)
On making the change, it works as expected.

Related

FastApi Sqlalchemy how to manage transaction (session and multiple commits)

I have a CRUD with insert and update functions with commit at the end of the each one as follows:
#staticmethod
def insert(db: Session, item: Item) -> None:
db.add(item)
db.commit()
#staticmethod
def update(db: Session, item: Item) -> None:
...
db.commit()
I have an endpoint which receives a sqlalchemy session from a FastAPI dependency and needs to insert and update atomically (DB transaction).
What's the best practice when working with transactions? I can't work with the CRUD since it does more than one commit.
How should I handle the transactions? Where do you commit your session? in the CRUD? or only once in the FastAPI dependency function for each request?
I had the same problem while using FastAPI. I couldn't find a way to use commit in separate methods and have them behave transactionally.
What I ended up doing was a flush instead of the commit, which sends the changes to the db, but doesn't commit the transaction.
One thing to note, is that in FastAPI every request opens a new session and closes it once its done. This would be a rough example of what is happening using the example in the SQLAlchemy docs.
def run_my_program():
# This happens in the `database = SessionLocal()` of the `get_db` method below
session = Session()
try:
ThingOne().go(session)
ThingTwo().go(session)
session.commit()
except:
session.rollback()
raise
finally:
# This is the same as the `get_db` method below
session.close()
The session that is generated for the request is already a transaction. When you commit that session what is actually doing is this
When using the Session in its default mode of autocommit=False, a new transaction will be begun immediately after the commit, but note that the newly begun transaction does not use any connection resources until the first SQL is actually emitted.
In my opinion after reading that it makes sense handling the commit and rollback at the endpoint scope.
I created a dummy example of how this would work. I use everything form the FastAPI guide.
def create_user(db: Session, user: UserCreate):
"""
Create user record
"""
fake_hashed_password = user.password + "notreallyhashed"
db_user = models.User(email=user.email, hashed_password=fake_hashed_password)
db.add(db_user)
db.flush() # Changed this to a flush
return db_user
And then use the crud operations in the endpoint as follows
from typing import List
from fastapi import Depends, HTTPException
from sqlalchemy.orm import Session
...
def get_db():
"""
Get SQLAlchemy database session
"""
database = SessionLocal()
try:
yield database
finally:
database.close()
#router.post("/users", response_model=List[schemas.User])
def create_users(user_1: schemas.UserCreate, user_2: schemas.UserCreate, db: Session = Depends(get_db)):
"""
Create two users
"""
try:
user_1 = crud.create_user(db=db, user=user_1)
user_2 = crud.create_user(db=db, user=user_2)
db.commit()
return [user_1, user_2]
except:
db.rollback()
raise HTTPException(status_code=400, detail="Duplicated user")
In the future I might investigate moving this to a middleware, but I don't think that using commit you can get the behavior you want.
A more pythonic approach is to let a context manager perform a commit or rollback depending on whether or not there was an exception.
A Transaction is a nice abstraction of what we are trying to accomplish.
class Transaction:
def __init__(self, session: Session = Depends(get_session)):
self.session = session
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
# rollback and let the exception propagate
self.session.rollback()
return False
self.session.commit()
return True
And, use it in your APIs, like so:
def some_api(tx: Transaction = Depends(Transaction)):
with tx:
ThingOne().go()
ThingTwo().go()
No need to pass session to ThingOne and ThingTwo. Inject it into them, like so:
class ThingOne:
def __init__(self, session: Session = Depends(get_session)):
...
class ThingTwo:
def __init__(self, session: Session = Depends(get_session)):
...
I would also inject ThingOne and ThingTwo in the APIs as well:
def some_api(tx: Transaction = Depends(Transaction),
one: ThingOne = Depends(ThingOne),
two: ThingTwo = Depends(ThingTwo)):
with tx:
one.go()
two.go()

How do I use scoped_session properly with multiple databases?

I am currently working on a flask app in which you can have multiple databases connected to it.
Each request to the app should be handled by a certain database depending on the url.
I am now trying to replace flask-sqlalchemy with sqlalchemy in order to use scoped-session to take care of my problem.
I have a session_registry in order to store the sessions:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
class SessionRegistry(object):
_registry = {}
def get_database_connection(self, name, **kwargs):
return self._registry[name]
def add_database_connection(self, url, name, **kwargs):
if url not in self._registry:
engine = create_engine(url)
Session = sessionmaker(bind=engine)
session = scoped_session(Session)
self._registry[name] = session
return True if self._registry[name] is not None else Fals
The problem that I have now is, that I don't know how to pass it to my routes in order to use that session. Here is an example class where I am trying to use it:
class SomeJob():
def get(self, lim=1000, order="asc"):
if order == "desc":
result = session.query(SomeModel).order_by(
SomeModel.id.desc()).limit(lim).all()
else:
result = session.query(SomeModel).order_by(
SomeModel.id.asc()).limit(lim).all()
# deserialize to json
schemaInstance = SomeSchema(many=True)
json_res = schemaInstance.dump(result)
# return json
return json_res
My question now is, how do I pass that session to the object properly?

Fall back to Flask's default session manager when session database is down?

I'm using SqlAlchemy for my session management database, and have created a custom SessionInterface as per the documentation.
My dev database went down today, and now I cannot access my site, as would be expected. Is there a way for me to fall back to Flask's default session manager in this event?
Here is my current implementation of SessionInterface
class SqlAlchemySessionInterface(SessionInterface):
#...
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if sid:
# error is raised here when database is down
stored_session = DBSession.query.filter_by(sid=sid).first()
# ...
I have a naive solution to the problem of a crashed database, that leverages a in memory dict as a backup:
# A backup memory storage for sessions
backup = {}
class SqlAlchemySessionInterface(SessionInterface):
#...
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if sid:
try:
stored_session = DBSession.query.filter_by(sid=sid).first()
except DatabaseError:
stored_session = backup.get('sid')
# ...
You can extend a default secure cookie session interface implementation.
class ReliableSessionInterface(SecureCookieSessionInterface):
def open_session(self, app, request):
try:
return self._open_db_session(app, request)
except DatabaseError:
return super().open_session(app, request)
def save_session(app, session, response):
try:
self._save_session_to_db(app, session, response)
except DatabaseError:
super().save_session(app, session, response)
However, such requirement sounds a little bit strange.

How do I pass arguments to after_request?

I am new to Flask and am learning about the #app.after_request and #app.teardown_appcontext. I have a decorated view for oauthlib that takes an argument, data (which is an object).
#app.route('/api/me')
#oauth.require_oauth()
def me(data):
user = data.user
return jsonify(username=user.username)
After this view (and many other views) are executed, I'd like to update my database but need to have access to the variable data. How do I do that with #app.after_request or #app.teardown_appcontext?
#app.after_request
def record_ip(response):
client = data.client # needs access to "data"
.... log stuff in my database ...
return response
You can add the object to the flask.g globals object:
from flask import g
#app.route('/api/me')
#oauth.require_oauth()
def me(req):
user = req.user
g.oauth_request = req
return jsonify(username=user.username)
#app.after_request
def record_ip(response):
req = g.get('oauth_request')
if req is not None:
client = req.client # needs access to "req"
# .... log stuff in my database ...
return response
The global flask.g context is thread safe and tied to the current request; quoting from the documentation:
The application context is created and destroyed as necessary. It never moves between threads and it will not be shared between requests.

Is having multiple SQLAlchemy sessions in the same controller okay, or should I put them all into one session?

So I have a controller that renders a page. In the controller, I call multiple functions from the model that create its own sessions. For example:
def page(request):
userid = authenticated_userid(request)
user = User.get_by_id(userid)
things = User.get_things()
return {'user': user, 'things': things}
Where in the model I have:
class User:
...
def get_by_id(self, userid):
return DBSession.query(User)...
def get_things(self):
return DBSession.query(Thing)...
My question is, is creating a new session for each function optimal, or should I start a session in the controller and use the same session throughout the controller (assuming I'm both querying as well as inserting into the database in the controller)? Ex.
def page(request):
session = DBSession()
userid = authenticated_userid(request)
user = User.get_by_id(userid, session)
things = User.get_things(session)
...
return {'user': user, 'things': things}
class User:
...
def get_by_id(self, userid, session=None):
if not session:
session = DBSession()
return session.query(User)...
def get_things(self, session=None):
if not session:
session = DBSession()
return session.query(Thing)...
Your first code is OK, if your DBSession is a ScopedSession. DBSession() is not a constructor then, but just an accessor function to thread-local storage. You might speed up things a bit by passing the session explicitly, but premature optimization is the root of all evil.

Categories

Resources