Stripping whitespace generically for all String fields - SQLAlchemy - python

I am using SQLAlchemy through Flask-SQLAlchemy as the ORM for a web app.
I'd like to automatically leading and trailing strip whitespace (e.g. str.strip) when assigning to any string field.
One way to do this would be the following, but it would need to be specified for each and every string field:
class User(db.Model):
_email = db.Column('email', db.String(100), primary_key=True)
#hybrid_property
def email(self): return self._email
#email.setter
def email(self, data): self._email = data.strip()
I would like to do this more generically for every String field (without having to write the above for each).

One way would be to create a custom augmented string type that handles such processing:
from sqlalchemy.types import TypeDecorator
class StrippedString(TypeDecorator):
impl = db.String
def process_bind_param(self, value, dialect):
# In case you have nullable string fields and pass None
return value.strip() if value else value
def copy(self, **kw):
return StrippedString(self.impl.length)
You'd then use this in place of plain String in your models:
class User(db.Model):
email = db.Column(StrippedString(100), primary_key=True)
This does not work exactly the same as your own implementation in that the processing takes place when the value is to be bound to a query as a parameter, or in other words a bit later:
In [12]: u = User(email=' so.much#white.space ')
In [13]: u.email
Out[13]: ' so.much#white.space '
In [14]: session.add(u)
In [15]: session.commit()
In [16]: u.email
Out[16]: 'so.much#white.space'

Related

convert models to json [duplicate]

Django has some good automatic serialization of ORM models returned from DB to JSON format.
How to serialize SQLAlchemy query result to JSON format?
I tried jsonpickle.encode but it encodes query object itself.
I tried json.dumps(items) but it returns
TypeError: <Product('3', 'some name', 'some desc')> is not JSON serializable
Is it really so hard to serialize SQLAlchemy ORM objects to JSON /XML? Isn't there any default serializer for it? It's very common task to serialize ORM query results nowadays.
What I need is just to return JSON or XML data representation of SQLAlchemy query result.
SQLAlchemy objects query result in JSON/XML format is needed to be used in javascript datagird (JQGrid http://www.trirand.com/blog/)
You could just output your object as a dictionary:
class User:
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
And then you use User.as_dict() to serialize your object.
As explained in Convert sqlalchemy row object to python dict
A flat implementation
You could use something like this:
from sqlalchemy.ext.declarative import DeclarativeMeta
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
fields[field] = data
except TypeError:
fields[field] = None
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
and then convert to JSON using:
c = YourAlchemyClass()
print json.dumps(c, cls=AlchemyEncoder)
It will ignore fields that are not encodable (set them to 'None').
It doesn't auto-expand relations (since this could lead to self-references, and loop forever).
A recursive, non-circular implementation
If, however, you'd rather loop forever, you could use:
from sqlalchemy.ext.declarative import DeclarativeMeta
def new_alchemy_encoder():
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
fields[field] = obj.__getattribute__(field)
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
And then encode objects using:
print json.dumps(e, cls=new_alchemy_encoder(), check_circular=False)
This would encode all children, and all their children, and all their children... Potentially encode your entire database, basically. When it reaches something its encoded before, it will encode it as 'None'.
A recursive, possibly-circular, selective implementation
Another alternative, probably better, is to be able to specify the fields you want to expand:
def new_alchemy_encoder(revisit_self = False, fields_to_expand = []):
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
val = obj.__getattribute__(field)
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or (isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field not in fields_to_expand:
# not expanding this field: set it to None and continue
fields[field] = None
continue
fields[field] = val
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
You can now call it with:
print json.dumps(e, cls=new_alchemy_encoder(False, ['parents']), check_circular=False)
To only expand SQLAlchemy fields called 'parents', for example.
Python 3.7+ and Flask 1.1+ can use the built-in dataclasses package
from dataclasses import dataclass
from datetime import datetime
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
#dataclass
class User(db.Model):
id: int
email: str
id = db.Column(db.Integer, primary_key=True, auto_increment=True)
email = db.Column(db.String(200), unique=True)
#app.route('/users/')
def users():
users = User.query.all()
return jsonify(users)
if __name__ == "__main__":
users = User(email="user1#gmail.com"), User(email="user2#gmail.com")
db.create_all()
db.session.add_all(users)
db.session.commit()
app.run()
The /users/ route will now return a list of users.
[
{"email": "user1#gmail.com", "id": 1},
{"email": "user2#gmail.com", "id": 2}
]
Auto-serialize related models
#dataclass
class Account(db.Model):
id: int
users: User
id = db.Column(db.Integer)
users = db.relationship(User) # User model would need a db.ForeignKey field
The response from jsonify(account) would be this.
{
"id":1,
"users":[
{
"email":"user1#gmail.com",
"id":1
},
{
"email":"user2#gmail.com",
"id":2
}
]
}
Overwrite the default JSON Encoder
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
"Add support for serializing timedeltas"
def default(o):
if type(o) == datetime.timedelta:
return str(o)
if type(o) == datetime.datetime:
return o.isoformat()
return super().default(o)
app.json_encoder = CustomJSONEncoder
You can convert a RowProxy to a dict like this:
d = dict(row.items())
Then serialize that to JSON ( you will have to specify an encoder for things like datetime values )
It's not that hard if you just want one record ( and not a full hierarchy of related records ).
json.dumps([(dict(row.items())) for row in rs])
I recommend using marshmallow. It allows you to create serializers to represent your model instances with support to relations and nested objects.
Here is a truncated example from their docs. Take the ORM model, Author:
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
first = db.Column(db.String(80))
last = db.Column(db.String(80))
A marshmallow schema for that class is constructed like this:
class AuthorSchema(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method("format_name", dump_only=True)
def format_name(self, author):
return "{}, {}".format(author.last, author.first)
...and used like this:
author_schema = AuthorSchema()
author_schema.dump(Author.query.first())
...would produce an output like this:
{
"first": "Tim",
"formatted_name": "Peters, Tim",
"id": 1,
"last": "Peters"
}
Have a look at their full Flask-SQLAlchemy Example.
A library called marshmallow-sqlalchemy specifically integrates SQLAlchemy and marshmallow. In that library, the schema for the Author model described above looks like this:
class AuthorSchema(ModelSchema):
class Meta:
model = Author
The integration allows the field types to be inferred from the SQLAlchemy Column types.
marshmallow-sqlalchemy here.
You can use introspection of SqlAlchemy as this :
mysql = SQLAlchemy()
from sqlalchemy import inspect
class Contacts(mysql.Model):
__tablename__ = 'CONTACTS'
id = mysql.Column(mysql.Integer, primary_key=True)
first_name = mysql.Column(mysql.String(128), nullable=False)
last_name = mysql.Column(mysql.String(128), nullable=False)
phone = mysql.Column(mysql.String(128), nullable=False)
email = mysql.Column(mysql.String(128), nullable=False)
street = mysql.Column(mysql.String(128), nullable=False)
zip_code = mysql.Column(mysql.String(128), nullable=False)
city = mysql.Column(mysql.String(128), nullable=False)
def toDict(self):
return { c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs }
#app.route('/contacts',methods=['GET'])
def getContacts():
contacts = Contacts.query.all()
contactsArr = []
for contact in contacts:
contactsArr.append(contact.toDict())
return jsonify(contactsArr)
#app.route('/contacts/<int:id>',methods=['GET'])
def getContact(id):
contact = Contacts.query.get(id)
return jsonify(contact.toDict())
Get inspired from an answer here :
Convert sqlalchemy row object to python dict
Flask-JsonTools package has an implementation of JsonSerializableBase Base class for your models.
Usage:
from sqlalchemy.ext.declarative import declarative_base
from flask.ext.jsontools import JsonSerializableBase
Base = declarative_base(cls=(JsonSerializableBase,))
class User(Base):
#...
Now the User model is magically serializable.
If your framework is not Flask, you can just grab the code
For security reasons you should never return all the model's fields. I prefer to selectively choose them.
Flask's json encoding now supports UUID, datetime and relationships (and added query and query_class for flask_sqlalchemy db.Model class). I've updated the encoder as follows:
app/json_encoder.py
from sqlalchemy.ext.declarative import DeclarativeMeta
from flask import json
class AlchemyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o.__class__, DeclarativeMeta):
data = {}
fields = o.__json__() if hasattr(o, '__json__') else dir(o)
for field in [f for f in fields if not f.startswith('_') and f not in ['metadata', 'query', 'query_class']]:
value = o.__getattribute__(field)
try:
json.dumps(value)
data[field] = value
except TypeError:
data[field] = None
return data
return json.JSONEncoder.default(self, o)
app/__init__.py
# json encoding
from app.json_encoder import AlchemyEncoder
app.json_encoder = AlchemyEncoder
With this I can optionally add a __json__ property that returns the list of fields I wish to encode:
app/models.py
class Queue(db.Model):
id = db.Column(db.Integer, primary_key=True)
song_id = db.Column(db.Integer, db.ForeignKey('song.id'), unique=True, nullable=False)
song = db.relationship('Song', lazy='joined')
type = db.Column(db.String(20), server_default=u'audio/mpeg')
src = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(db.DateTime, server_default=db.func.now(), onupdate=db.func.now())
def __init__(self, song):
self.song = song
self.src = song.full_path
def __json__(self):
return ['song', 'src', 'type', 'created_at']
I add #jsonapi to my view, return the resultlist and then my output is as follows:
[
{
"created_at": "Thu, 23 Jul 2015 11:36:53 GMT",
"song":
{
"full_path": "/static/music/Audioslave/Audioslave [2002]/1 Cochise.mp3",
"id": 2,
"path_name": "Audioslave/Audioslave [2002]/1 Cochise.mp3"
},
"src": "/static/music/Audioslave/Audioslave [2002]/1 Cochise.mp3",
"type": "audio/mpeg"
}
]
A more detailed explanation.
In your model, add:
def as_dict(self):
return {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
The str() is for python 3 so if using python 2 use unicode(). It should help deserialize dates. You can remove it if not dealing with those.
You can now query the database like this
some_result = User.query.filter_by(id=current_user.id).first().as_dict()
First() is needed to avoid weird errors. as_dict() will now deserialize the result. After deserialization, it is ready to be turned to json
jsonify(some_result)
While the original question goes back awhile, the number of answers here (and my own experiences) suggest it's a non-trivial question with a lot of different approaches of varying complexity with different trade-offs.
That's why I built the SQLAthanor library that extends SQLAlchemy's declarative ORM with configurable serialization/de-serialization support that you might want to take a look at.
The library supports:
Python 2.7, 3.4, 3.5, and 3.6.
SQLAlchemy versions 0.9 and higher
serialization/de-serialization to/from JSON, CSV, YAML, and Python dict
serialization/de-serialization of columns/attributes, relationships, hybrid properties, and association proxies
enabling and disabling of serialization for particular formats and columns/relationships/attributes (e.g. you want to support an inbound password value, but never include an outbound one)
pre-serialization and post-deserialization value processing (for validation or type coercion)
a pretty straightforward syntax that is both Pythonic and seamlessly consistent with SQLAlchemy's own approach
You can check out the (I hope!) comprehensive docs here: https://sqlathanor.readthedocs.io/en/latest
Hope this helps!
Custom serialization and deserialization.
"from_json" (class method) builds a Model object based on json data.
"deserialize" could be called only on instance, and merge all data from json into Model instance.
"serialize" - recursive serialization
__write_only__ property is needed to define write only properties ("password_hash" for example).
class Serializable(object):
__exclude__ = ('id',)
__include__ = ()
__write_only__ = ()
#classmethod
def from_json(cls, json, selfObj=None):
if selfObj is None:
self = cls()
else:
self = selfObj
exclude = (cls.__exclude__ or ()) + Serializable.__exclude__
include = cls.__include__ or ()
if json:
for prop, value in json.iteritems():
# ignore all non user data, e.g. only
if (not (prop in exclude) | (prop in include)) and isinstance(
getattr(cls, prop, None), QueryableAttribute):
setattr(self, prop, value)
return self
def deserialize(self, json):
if not json:
return None
return self.__class__.from_json(json, selfObj=self)
#classmethod
def serialize_list(cls, object_list=[]):
output = []
for li in object_list:
if isinstance(li, Serializable):
output.append(li.serialize())
else:
output.append(li)
return output
def serialize(self, **kwargs):
# init write only props
if len(getattr(self.__class__, '__write_only__', ())) == 0:
self.__class__.__write_only__ = ()
dictionary = {}
expand = kwargs.get('expand', ()) or ()
prop = 'props'
if expand:
# expand all the fields
for key in expand:
getattr(self, key)
iterable = self.__dict__.items()
is_custom_property_set = False
# include only properties passed as parameter
if (prop in kwargs) and (kwargs.get(prop, None) is not None):
is_custom_property_set = True
iterable = kwargs.get(prop, None)
# loop trough all accessible properties
for key in iterable:
accessor = key
if isinstance(key, tuple):
accessor = key[0]
if not (accessor in self.__class__.__write_only__) and not accessor.startswith('_'):
# force select from db to be able get relationships
if is_custom_property_set:
getattr(self, accessor, None)
if isinstance(self.__dict__.get(accessor), list):
dictionary[accessor] = self.__class__.serialize_list(object_list=self.__dict__.get(accessor))
# check if those properties are read only
elif isinstance(self.__dict__.get(accessor), Serializable):
dictionary[accessor] = self.__dict__.get(accessor).serialize()
else:
dictionary[accessor] = self.__dict__.get(accessor)
return dictionary
Here is a solution that lets you select the relations you want to include in your output as deep as you would like to go.
NOTE: This is a complete re-write taking a dict/str as an arg rather than a list. fixes some stuff..
def deep_dict(self, relations={}):
"""Output a dict of an SA object recursing as deep as you want.
Takes one argument, relations which is a dictionary of relations we'd
like to pull out. The relations dict items can be a single relation
name or deeper relation names connected by sub dicts
Example:
Say we have a Person object with a family relationship
person.deep_dict(relations={'family':None})
Say the family object has homes as a relation then we can do
person.deep_dict(relations={'family':{'homes':None}})
OR
person.deep_dict(relations={'family':'homes'})
Say homes has a relation like rooms you can do
person.deep_dict(relations={'family':{'homes':'rooms'}})
and so on...
"""
mydict = dict((c, str(a)) for c, a in
self.__dict__.items() if c != '_sa_instance_state')
if not relations:
# just return ourselves
return mydict
# otherwise we need to go deeper
if not isinstance(relations, dict) and not isinstance(relations, str):
raise Exception("relations should be a dict, it is of type {}".format(type(relations)))
# got here so check and handle if we were passed a dict
if isinstance(relations, dict):
# we were passed deeper info
for left, right in relations.items():
myrel = getattr(self, left)
if isinstance(myrel, list):
mydict[left] = [rel.deep_dict(relations=right) for rel in myrel]
else:
mydict[left] = myrel.deep_dict(relations=right)
# if we get here check and handle if we were passed a string
elif isinstance(relations, str):
# passed a single item
myrel = getattr(self, relations)
left = relations
if isinstance(myrel, list):
mydict[left] = [rel.deep_dict(relations=None)
for rel in myrel]
else:
mydict[left] = myrel.deep_dict(relations=None)
return mydict
so for an example using person/family/homes/rooms... turning it into json all you need is
json.dumps(person.deep_dict(relations={'family':{'homes':'rooms'}}))
step1:
class CNAME:
...
def as_dict(self):
return {item.name: getattr(self, item.name) for item in self.__table__.columns}
step2:
list = []
for data in session.query(CNAME).all():
list.append(data.as_dict())
step3:
return jsonify(list)
Even though it's a old post, Maybe I didn't answer the question above, but I want to talk about my serialization, at least it works for me.
I use FastAPI,SqlAlchemy and MySQL, but I don't use orm model;
# from sqlalchemy import create_engine
# from sqlalchemy.orm import sessionmaker
# engine = create_engine(config.SQLALCHEMY_DATABASE_URL, pool_pre_ping=True)
# SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Serialization code
import decimal
import datetime
def alchemy_encoder(obj):
"""JSON encoder function for SQLAlchemy special classes."""
if isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(obj, decimal.Decimal):
return float(obj)
import json
from sqlalchemy import text
# db is SessionLocal() object
app_sql = 'SELECT * FROM app_info ORDER BY app_id LIMIT :page,:page_size'
# The next two are the parameters passed in
page = 1
page_size = 10
# execute sql and return a <class 'sqlalchemy.engine.result.ResultProxy'> object
app_list = db.execute(text(app_sql), {'page': page, 'page_size': page_size})
# serialize
res = json.loads(json.dumps([dict(r) for r in app_list], default=alchemy_encoder))
If it doesn't work, please ignore my answer. I refer to it here
https://codeandlife.com/2014/12/07/sqlalchemy-results-to-json-the-easy-way/
install simplejson by
pip install simplejson and the create a class
class Serialise(object):
def _asdict(self):
"""
Serialization logic for converting entities using flask's jsonify
:return: An ordered dictionary
:rtype: :class:`collections.OrderedDict`
"""
result = OrderedDict()
# Get the columns
for key in self.__mapper__.c.keys():
if isinstance(getattr(self, key), datetime):
result["x"] = getattr(self, key).timestamp() * 1000
result["timestamp"] = result["x"]
else:
result[key] = getattr(self, key)
return result
and inherit this class to every orm classes so that this _asdict function gets registered to every ORM class and boom.
And use jsonify anywhere
It is not so straighforward. I wrote some code to do this. I'm still working on it, and it uses the MochiKit framework. It basically translates compound objects between Python and Javascript using a proxy and registered JSON converters.
Browser side for database objects is db.js
It needs the basic Python proxy source in proxy.js.
On the Python side there is the base proxy module.
Then finally the SqlAlchemy object encoder in webserver.py.
It also depends on metadata extractors found in the models.py file.
def alc2json(row):
return dict([(col, str(getattr(row,col))) for col in row.__table__.columns.keys()])
I thought I'd play a little code golf with this one.
FYI: I am using automap_base since we have a separately designed schema according to business requirements. I just started using SQLAlchemy today but the documentation states that automap_base is an extension to declarative_base which seems to be the typical paradigm in the SQLAlchemy ORM so I believe this should work.
It does not get fancy with following foreign keys per Tjorriemorrie's solution, but it simply matches columns to values and handles Python types by str()-ing the column values. Our values consist Python datetime.time and decimal.Decimal class type results so it gets the job done.
Hope this helps any passers-by!
I know this is quite an older post. I took solution given by #SashaB and modified as per my need.
I added following things to it:
Field ignore list: A list of fields to be ignored while serializing
Field replace list: A dictionary containing field names to be replaced by values while serializing.
Removed methods and BaseQuery getting serialized
My code is as follows:
def alchemy_json_encoder(revisit_self = False, fields_to_expand = [], fields_to_ignore = [], fields_to_replace = {}):
"""
Serialize SQLAlchemy result into JSon
:param revisit_self: True / False
:param fields_to_expand: Fields which are to be expanded for including their children and all
:param fields_to_ignore: Fields to be ignored while encoding
:param fields_to_replace: Field keys to be replaced by values assigned in dictionary
:return: Json serialized SQLAlchemy object
"""
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata' and x not in fields_to_ignore]:
val = obj.__getattribute__(field)
# is this field method defination, or an SQLalchemy object
if not hasattr(val, "__call__") and not isinstance(val, BaseQuery):
field_name = fields_to_replace[field] if field in fields_to_replace else field
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or \
(isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field not in fields_to_expand:
# not expanding this field: set it to None and continue
fields[field_name] = None
continue
fields[field_name] = val
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
Hope it helps someone!
Use the built-in serializer in SQLAlchemy:
from sqlalchemy.ext.serializer import loads, dumps
obj = MyAlchemyObject()
# serialize object
serialized_obj = dumps(obj)
# deserialize object
obj = loads(serialized_obj)
If you're transferring the object between sessions, remember to detach the object from the current session using session.expunge(obj).
To attach it again, just do session.add(obj).
Under Flask, this works and handles datatime fields, transforming a field of type
'time': datetime.datetime(2018, 3, 22, 15, 40) into
"time": "2018-03-22 15:40:00":
obj = {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
# This to get the JSON body
return json.dumps(obj)
# Or this to get a response object
return jsonify(obj)
following code will serialize sqlalchemy result to json.
import json
from collections import OrderedDict
def asdict(self):
result = OrderedDict()
for key in self.__mapper__.c.keys():
if getattr(self, key) is not None:
result[key] = str(getattr(self, key))
else:
result[key] = getattr(self, key)
return result
def to_array(all_vendors):
v = [ ven.asdict() for ven in all_vendors ]
return json.dumps(v)
Calling fun,
def all_products():
all_products = Products.query.all()
return to_array(all_products)
The AlchemyEncoder is wonderful but sometimes fails with Decimal values. Here is an improved encoder that solves the decimal problem -
class AlchemyEncoder(json.JSONEncoder):
# To serialize SQLalchemy objects
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
model_fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
print data
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
model_fields[field] = data
except TypeError:
model_fields[field] = None
return model_fields
if isinstance(obj, Decimal):
return float(obj)
return json.JSONEncoder.default(self, obj)
When using sqlalchemy to connect to a db I this is a simple solution which is highly configurable. Use pandas.
import pandas as pd
import sqlalchemy
#sqlalchemy engine configuration
engine = sqlalchemy.create_engine....
def my_function():
#read in from sql directly into a pandas dataframe
#check the pandas documentation for additional config options
sql_DF = pd.read_sql_table("table_name", con=engine)
# "orient" is optional here but allows you to specify the json formatting you require
sql_json = sql_DF.to_json(orient="index")
return sql_json
(Tiny tweak on Sasha B's really excellent answer)
This specifically converts datetime objects to strings which in the original answer would be converted to None:
# Standard library imports
from datetime import datetime
import json
# 3rd party imports
from sqlalchemy.ext.declarative import DeclarativeMeta
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
dict = {}
# Remove invalid fields and just get the column attributes
columns = [x for x in dir(obj) if not x.startswith("_") and x != "metadata"]
for column in columns:
value = obj.__getattribute__(column)
try:
json.dumps(value)
dict[column] = value
except TypeError:
if isinstance(value, datetime):
dict[column] = value.__str__()
else:
dict[column] = None
return dict
return json.JSONEncoder.default(self, obj)
class SqlToDict:
def __init__(self, data) -> None:
self.data = data
def to_timestamp(self, date):
if isinstance(date, datetime):
return int(datetime.timestamp(date))
else:
return date
def to_dict(self) -> List:
arr = []
for i in self.data:
keys = [*i.keys()]
values = [*i]
values = [self.to_timestamp(d) for d in values]
arr.append(dict(zip(keys, values)))
return arr
For example:
SqlToDict(data).to_dict()
Very late 2023
My implementation
def obj_to_dict(obj, remove=['_sa_instance_state'], debug=False):
result = {}
if type(obj).__name__ == "Row":
return dict(obj)
obj = obj.__dict__
for key in obj:
if key in remove:
continue
result[key] = obj[key]
if debug:
print(result)
return result
The built in serializer chokes with utf-8 cannot decode invalid start byte for some inputs. Instead, I went with:
def row_to_dict(row):
temp = row.__dict__
temp.pop('_sa_instance_state', None)
return temp
def rows_to_list(rows):
ret_rows = []
for row in rows:
ret_rows.append(row_to_dict(row))
return ret_rows
#website_blueprint.route('/api/v1/some/endpoint', methods=['GET'])
def some_api():
'''
/some_endpoint
'''
rows = rows_to_list(SomeModel.query.all())
response = app.response_class(
response=jsonplus.dumps(rows),
status=200,
mimetype='application/json'
)
return response
Maybe you can use a class like this
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy import Table
class Custom:
"""Some custom logic here!"""
__table__: Table # def for mypy
#declared_attr
def __tablename__(cls): # pylint: disable=no-self-argument
return cls.__name__ # pylint: disable= no-member
def to_dict(self) -> Dict[str, Any]:
"""Serializes only column data."""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
Base = declarative_base(cls=Custom)
class MyOwnTable(Base):
#COLUMNS!
With that all objects have the to_dict method
While using some raw sql and undefined objects, using cursor.description appeared to get what I was looking for:
with connection.cursor() as cur:
print(query)
cur.execute(query)
for item in cur.fetchall():
row = {column.name: item[i] for i, column in enumerate(cur.description)}
print(row)
This is a JSONEncoder version that preserves model column order and only keeps recursively defined column and relationship fields. It also formats most JSON unserializable types:
import json
from datetime import datetime
from decimal import Decimal
import arrow
from sqlalchemy.ext.declarative import DeclarativeMeta
class SQLAlchemyJSONEncoder(json.JSONEncoder):
"""
SQLAlchemy ORM JSON Encoder
If you have a "backref" relationship defined in your SQLAlchemy model,
this encoder raises a ValueError to stop an infinite loop.
"""
def default(self, obj):
if isinstance(obj, datetime):
return arrow.get(obj).isoformat()
elif isinstance(obj, Decimal):
return float(obj)
elif isinstance(obj, set):
return sorted(obj)
elif isinstance(obj.__class__, DeclarativeMeta):
for attribute, relationship in obj.__mapper__.relationships.items():
if isinstance(relationship.__getattribute__("backref"), tuple):
raise ValueError(
f'{obj.__class__} object has a "backref" relationship '
"that would cause an infinite loop!"
)
dictionary = {}
column_names = [column.name for column in obj.__table__.columns]
for key in column_names:
value = obj.__getattribute__(key)
if isinstance(value, datetime):
value = arrow.get(value).isoformat()
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, set):
value = sorted(value)
dictionary[key] = value
for key in [
attribute
for attribute in dir(obj)
if not attribute.startswith("_")
and attribute != "metadata"
and attribute not in column_names
]:
value = obj.__getattribute__(key)
dictionary[key] = value
return dictionary
return super().default(obj)

String Encrypted Type of JSONType changes are not saved to Database

Backstory
I have a questionnaire that asks sensitive questions most of which are true/false. The majority of the time the values are false which poses a challenge when keeping the data private at rest. When encrypting each question into a separate column, it is really easy to tell which value is true and which is false with a bit of guessing. To combat this, the questions and answers are put into a dictionary object with some salt (nonsense that changes randomly) then encrypted. Making it impossible without the key to know what the answers were.
Method
Below is an example of the model used to encrypt the data with salt at rest making it impossible to look at the data and know the contents.
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils.types import JSONType
from sqlalchemy_utils.types.encrypted.encrypted_type import StringEncryptedType, AesEngine
Base = declarative_base()
class SensitiveQuestionnaire(Base):
user_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
_data = data: dict = sa.Column(StringEncryptedType(JSONType, 'secret', AesEngine, 'pkcs5'),
nullable=False, default=lambda: {'_salt': salt_shaker()})
# values are viewed using a python property to look into the `_data` dict
#property
def sensitive_question(self) -> Optional[float]:
return self._data.get('sensitive_question')
# values are set into the `_data` dict
#sensitive_question.setter
def sensitive_question(self, value: bool) -> None:
self._data['sensitive_question'] = value
# in a real example there would be 20+ properties that map to questions
def __init__(self, **kwargs):
# Sqlalchemy does not use the __init__ method so we are free to set object defaults here
self._data = {'_salt': salt_shaker()}
for key in kwargs:
setattr(self, key, kwargs[key])
#property
def _salt(self) -> str:
return self._data['_salt']
def salt_shaker():
return ''.join([random.choice('hldjs..' for i in range(50)])
The Problem
After the SensitiveQuestionnaire object is initialized none of the changes are persisted in the database.
# GIVEN a questionnaire
questionnaire = model.SensitiveQuestionnaire(user_id=1)
db.session.add()
db.session.commit()
# WHEN updating the questionnaire and saving it to the database
questionnaire.sensitive_question= True
db.session.commit()
# THEN we get the questionnaire from the database
db_questionnaire = model.SensitiveQuestionnaire.query\
.filter(model.SensitiveQuestionnaire.user_id == 1).first()
# THEN the sensitive_question value is persisted
assert db_questionnaire.sensitive_question is True
Value from the db_questionnaire.sensitive_question is None when it should be True.
After spending the better part of the day to figure this out, the cause of the issue is how Sqlalchemy knows when there is a change. The short version is sqlalchemy uses python's __setitem__ to hook in sqlalchemy's change() method letting it know there was a change. More info can be found in sqlalchemy's docs.
The answer is to wrap the StringEncryptedType in a MultableDict Type
Mutation Tracking
Provide support for tracking of in-place changes to scalar values, which are propagated into ORM change events on owning parent objects.
From SqlAlchemy's docs: https://docs.sqlalchemy.org/en/13/orm/extensions/mutable.html
Solution
Condensed version... wrapping the StringEncryptedType in a MutableDict
_data = data: dict = sa.Column(
MutableDict.as_mutable(StringEncryptedType(JSONType, 'secret', AesEngine, 'pkcs5')),
nullable=False, default=lambda: {'_salt': salt_shaker()})
Full version from the question above
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy_utils.types import JSONType
from sqlalchemy_utils.types.encrypted.encrypted_type import StringEncryptedType, AesEngine
Base = declarative_base()
class SensitiveQuestionnaire(Base):
user_id: int = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
# The MutableDict.as_mutable below is what changed!
_data = data: dict = sa.Column(
MutableDict.as_mutable(StringEncryptedType(JSONType, 'secret', AesEngine, 'pkcs5')),
nullable=False, default=lambda: {'_salt': salt_shaker()})
#property
def sensitive_question(self) -> Optional[float]:
return self._data.get('sensitive_question')
# values are set into the `_data` dict
#sensitive_question.setter
def sensitive_question(self, value: bool) -> None:
self._data['sensitive_question'] = value
# in a real example there would be 20+ properties that map to questions
def __init__(self, **kwargs):
self._data = {'_salt': salt_shaker()}
for key in kwargs:
setattr(self, key, kwargs[key])
#property
def _salt(self) -> str:
return self._data['_salt']
def salt_shaker():
return ''.join([random.choice('hldjs..' for i in range(50)])

SQLAchemy New Uppercase Code field type with TypeDecorator

Happy New Year. Wish you all the best in 2016.
I want to create a new column type which automatically converts to uppercase when assigned. So far I have the following code:
class CodeColumn(db.TypeDecorator):
impl = db.String
def process_bind_param(self, value, dialect):
return value.upper()
def process_result_value(self, value, dialect):
return value.upper()
class Item(db.Model):
__tablename__ = 'item'
code = db.Column(CodeColumn(30), primary_key=True)
name = db.Column(db.String(128), nullable=False)
It seems to work but the value is converted to uppercase only on db.session.commit():
./manage.py shell
>>> i = Item()
>>> i.code = 'apple200'
>>> i.code
'apple200'
>>> i.name = 'Apples 200'
>>> db.session.add(i)
>>> db.session.flush()
>>> i.code
'apple200'
>>> db.session.commit()
>>> i.code
'APPLE200'
>>>
Would it be possible to amend the code without adding implementation, like property setter, to the individual columns but rather to the TypeDecorator so that values of such fields are automatically converted to uppercase when first assigned?
I know that I can achieve the effect with events or property setter, but I would like to have a custom column type to avoid setting those events and setters in multiple places.
Thanks

SQLAlchemy filter always returns false

I have a simple player entity:
__tablename__ = 'player'
_id = Column('id', SmallInteger, primary_key=True)
_nickName = Column('nick_name', String)
def __init__(self, nickName):
self._nickName = nickName
#property
def id(self):
return self._id
#property
def nickName(self):
return self._nickName.decode(encoding='UTF-8')
#nickName.setter
def nickName(self, nickName):
self._nickName = nickName
when i do:
players = session.query(Player).filter(Player.nickName=='foo')
and i print the players var i got this:
SELECT player.id AS player_id, player.nick_name AS player_nick_name
FROM player
WHERE false
Obviously, when I add .first() at the end of the session query, the result is None.
I have tried with filter_by() and get the same result.
Any help is welcome.
While using #hybrid_property will fix this in the general case, you shouldn't need to be decoding manually at all. Just set the column type to Unicode instead of String and, assuming your server plays nice, you should correctly get back a unicode.
You also don't need the id property at all.
So all you should need for this class is:
class Player(Base):
__tablename__ = 'player'
id = Column(SmallInteger, primary_key=True)
nickName = Column(Unicode)
(Both the column names and __init__ arguments can be generated automatically.)
If there's some reason your database isn't handling Unicode correctly, well, that's a different problem that we'd love to help you fix. :)
You cannot use regular #propertys as query parameters. Use a #hybrid_property instead:
from sqlalchemy.ext.hybrid import hybrid_property
#hybrid_property
def nickName(self):
return self._nickName.decode(encoding='UTF-8')
#nickName.setter
def nickName(self, nickName):
self._nickName = nickName
This makes Player.nickName (so on the attribute on the class) useful in SQL expressions.

SQL Alchemy overriding ==

I am creating SQLAlchemy class that represents user credentials.
I want to have field password that stores hashed value of password. Therefore I would like to override its behavior the following way:
When assigned credentials.password = value it actually stores hash of the value
When comparing credentials.password == value it actually compares with hash of the value
I have read the following part of SQLAlchemy documentation http://docs.sqlalchemy.org/en/rel_0_7/orm/mapper_config.html#using-descriptors-and-hybrids
And I think I do understand how to solve the issue number 1.
I am however unsure, how to do second point. Is there a way to do it the safe way (without breaking SQLAlchemy)?
Here is the example model:
class Credentials(Base):
__tablename__ = 'credentials'
id = Column(Integer, primary_key=True)
_password = Column('password', String)
#hybrid_property
def password(self):
return self._password
#password.setter(self):
self._password = hash(self._password)
For comparing, since you can't un-hash the password, you would need to create a custom type for the Column class, that over-rides the eq operator:
class MyPasswordType(String):
class comparator_factory(String.Comparator):
def __eq__(self, other):
return self.operate(operators.eq, hash(other))
Have a look at: http://docs.sqlalchemy.org/en/latest/core/types.html#types-operators
And http://docs.sqlalchemy.org/en/latest/core/types.html#sqlalchemy.types.TypeEngine.comparator_factory
To set you just need to pass in the value:
#password.setter
def password(self, value):
self._password = hash(value)

Categories

Resources