How to convert SQLAlchemy row object to a Python dict? - python

Is there a simple way to iterate over column name and value pairs?
My version of SQLAlchemy is 0.5.6
Here is the sample code where I tried using dict(row):
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
print "sqlalchemy version:",sqlalchemy.__version__
engine = create_engine('sqlite:///:memory:', echo=False)
metadata = MetaData()
users_table = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
)
metadata.create_all(engine)
class User(declarative_base()):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
Session = sessionmaker(bind=engine)
session = Session()
user1 = User("anurag")
session.add(user1)
session.commit()
# uncommenting next line throws exception 'TypeError: 'User' object is not iterable'
#print dict(user1)
# this one also throws 'TypeError: 'User' object is not iterable'
for u in session.query(User).all():
print dict(u)
Running this code on my system outputs:
Traceback (most recent call last):
File "untitled-1.py", line 37, in <module>
print dict(u)
TypeError: 'User' object is not iterable

You may access the internal __dict__ of a SQLAlchemy object, like the following:
for u in session.query(User).all():
print u.__dict__

As per #zzzeek in comments:
note that this is the correct answer for modern versions of
SQLAlchemy, assuming "row" is a core row object, not an ORM-mapped
instance.
for row in resultproxy:
row_as_dict = row._mapping # SQLAlchemy 1.4 and greater
# row_as_dict = dict(row) # SQLAlchemy 1.3 and earlier
background on row._mapping, new as of SQLAlchemy 1.4: https://docs.sqlalchemy.org/en/stable/core/connections.html#sqlalchemy.engine.Row._mapping

I couldn't get a good answer so I use this:
def row2dict(row):
d = {}
for column in row.__table__.columns:
d[column.name] = str(getattr(row, column.name))
return d
Edit: if above function is too long and not suited for some tastes here is a one liner (python 2.7+)
row2dict = lambda r: {c.name: str(getattr(r, c.name)) for c in r.__table__.columns}

In SQLAlchemy v0.8 and newer, use the inspection system.
from sqlalchemy import inspect
def object_as_dict(obj):
return {c.key: getattr(obj, c.key)
for c in inspect(obj).mapper.column_attrs}
user = session.query(User).first()
d = object_as_dict(user)
Note that .key is the attribute name, which can be different from the column name, e.g. in the following case:
class_ = Column('class', Text)
This method also works for column_property.

rows have an _asdict() function which gives a dict
In [8]: r1 = db.session.query(Topic.name).first()
In [9]: r1
Out[9]: (u'blah')
In [10]: r1.name
Out[10]: u'blah'
In [11]: r1._asdict()
Out[11]: {'name': u'blah'}

Assuming the following functions will be added to the class User the following will return all key-value pairs of all columns:
def columns_to_dict(self):
dict_ = {}
for key in self.__mapper__.c.keys():
dict_[key] = getattr(self, key)
return dict_
unlike the other answers all but only those attributes of the object are returned which are Column attributes at class level of the object. Therefore no _sa_instance_state or any other attribute SQLalchemy or you add to the object are included. Reference
EDIT: Forget to say, that this also works on inherited Columns.
hybrid_property extention
If you also want to include hybrid_property attributes the following will work:
from sqlalchemy import inspect
from sqlalchemy.ext.hybrid import hybrid_property
def publics_to_dict(self) -> {}:
dict_ = {}
for key in self.__mapper__.c.keys():
if not key.startswith('_'):
dict_[key] = getattr(self, key)
for key, prop in inspect(self.__class__).all_orm_descriptors.items():
if isinstance(prop, hybrid_property):
dict_[key] = getattr(self, key)
return dict_
I assume here that you mark Columns with an beginning _ to indicate that you want to hide them, either because you access the attribute by an hybrid_property or you simply do not want to show them. Reference
Tipp all_orm_descriptors also returns hybrid_method and AssociationProxy if you also want to include them.
Remarks to other answers
Every answer (like 1, 2 ) which based on the __dict__ attribute simply returns all attributes of the object. This could be much more attributes then you want. Like I sad this includes _sa_instance_state or any other attribute you define on this object.
Every answer (like 1, 2 ) which is based on the dict() function only works on SQLalchemy row objects returned by session.execute() not on the classes you define to work with, like the class User from the question.
The solving answer which is based on row.__table__.columns will definitely not work. row.__table__.columns contains the column names of the SQL Database. These can only be equal to the attributes name of the python object. If not you get an AttributeError.
For answers (like 1, 2 ) based on class_mapper(obj.__class__).mapped_table.c it is the same.

as #balki mentioned:
The _asdict() method can be used if you're querying a specific field because it is returned as a KeyedTuple.
In [1]: foo = db.session.query(Topic.name).first()
In [2]: foo._asdict()
Out[2]: {'name': u'blah'}
Whereas, if you do not specify a column you can use one of the other proposed methods - such as the one provided by #charlax. Note that this method is only valid for 2.7+.
In [1]: foo = db.session.query(Topic).first()
In [2]: {x.name: getattr(foo, x.name) for x in foo.__table__.columns}
Out[2]: {'name': u'blah'}

Old question, but since this the first result for "sqlalchemy row to dict" in Google it deserves a better answer.
The RowProxy object that SqlAlchemy returns has the items() method:
http://docs.sqlalchemy.org/en/latest/core/connections.html#sqlalchemy.engine.RowProxy.items
It simply returns a list of (key, value) tuples. So one can convert a row to dict using the following:
In Python <= 2.6:
rows = conn.execute(query)
list_of_dicts = [dict((key, value) for key, value in row.items()) for row in rows]
In Python >= 2.7:
rows = conn.execute(query)
list_of_dicts = [{key: value for (key, value) in row.items()} for row in rows]

A very simple solution: row._asdict().
sqlalchemy.engine.Row._asdict() (v1.4)
sqlalchemy.util.KeyedTuple._asdict() (v1.3)
> data = session.query(Table).all()
> [row._asdict() for row in data]

with sqlalchemy 1.4
session.execute(select(User.id, User.username)).mappings().all()
>> [{'id': 1, 'username': 'Bob'}, {'id': 2, 'username': 'Alice'}]

Following #balki answer, since SQLAlchemy 0.8 you can use _asdict(), available for KeyedTuple objects. This renders a pretty straightforward answer to the original question. Just, change in your example the last two lines (the for loop) for this one:
for u in session.query(User).all():
print u._asdict()
This works because in the above code u is an object of type class KeyedTuple, since .all() returns a list of KeyedTuple. Therefore it has the method _asdict(), which nicely returns u as a dictionary.
WRT the answer by #STB: AFAIK, anything that .all() returns is a list of KeypedTuple. Therefore, the above works either if you specify a column or not, as long as you are dealing with the result of .all() as applied to a Query object.

from sqlalchemy.orm import class_mapper
def asdict(obj):
return dict((col.name, getattr(obj, col.name))
for col in class_mapper(obj.__class__).mapped_table.c)

Refer to Alex Brasetvik's Answer, you can use one line of code to solve the problem
row_as_dict = [dict(row) for row in resultproxy]
Under the comment section of Alex Brasetvik's Answer, zzzeek the creator of SQLAlchemy stated this is the "Correct Method" for the problem.

I've found this post because I was looking for a way to convert a SQLAlchemy row into a dict. I'm using SqlSoup... but the answer was built by myself, so, if it could helps someone here's my two cents:
a = db.execute('select * from acquisizioni_motes')
b = a.fetchall()
c = b[0]
# and now, finally...
dict(zip(c.keys(), c.values()))

You could try to do it in this way.
for u in session.query(User).all():
print(u._asdict())
It use a built-in method in the query object that return a dictonary object of the query object.
references: https://docs.sqlalchemy.org/en/latest/orm/query.html

With python 3.8+, we can do this with dataclass, and the asdict method that comes with it:
from dataclasses import dataclass, asdict
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer, create_engine
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=False)
#dataclass
class User(Base):
__tablename__ = 'users'
id: int = Column(Integer, primary_key=True)
name: str = Column(String)
email = Column(String)
def __init__(self, name):
self.name = name
self.email = 'hello#example.com'
Base.metadata.create_all(engine)
SessionMaker = sessionmaker(bind=engine)
session = SessionMaker()
user1 = User("anurag")
session.add(user1)
session.commit()
query_result = session.query(User).one() # type: User
print(f'{query_result.id=:}, {query_result.name=:}, {query_result.email=:}')
# query_result.id=1, query_result.name=anurag, query_result.email=hello#example.com
query_result_dict = asdict(query_result)
print(query_result_dict)
# {'id': 1, 'name': 'anurag'}
The key is to use the #dataclass decorator, and annotate each column with its type (the : str part of the name: str = Column(String) line).
Also note that since the email is not annotated, it is not included in query_result_dict.

The expression you are iterating through evaluates to list of model objects, not rows. So the following is correct usage of them:
for u in session.query(User).all():
print u.id, u.name
Do you realy need to convert them to dicts? Sure, there is a lot of ways, but then you don't need ORM part of SQLAlchemy:
result = session.execute(User.__table__.select())
for row in result:
print dict(row)
Update: Take a look at sqlalchemy.orm.attributes module. It has a set of functions to work with object state, that might be useful for you, especially instance_dict().

I've just been dealing with this issue for a few minutes.
The answer marked as correct doesn't respect the type of the fields.
Solution comes from dictalchemy adding some interesting fetures.
https://pythonhosted.org/dictalchemy/
I've just tested it and works fine.
Base = declarative_base(cls=DictableModel)
session.query(User).asdict()
{'id': 1, 'username': 'Gerald'}
session.query(User).asdict(exclude=['id'])
{'username': 'Gerald'}

class User(object):
def to_dict(self):
return dict([(k, getattr(self, k)) for k in self.__dict__.keys() if not k.startswith("_")])
That should work.

You can convert sqlalchemy object to dictionary like this and return it as json/dictionary.
Helper functions:
import json
from collections import OrderedDict
def asdict(self):
result = OrderedDict()
for key in self.__mapper__.c.keys():
if getattr(self, key) is not None:
result[key] = str(getattr(self, key))
else:
result[key] = getattr(self, key)
return result
def to_array(all_vendors):
v = [ ven.asdict() for ven in all_vendors ]
return json.dumps(v)
Driver Function:
def all_products():
all_products = Products.query.all()
return to_array(all_products)

Two ways:
1.
for row in session.execute(session.query(User).statement):
print(dict(row))
2.
selected_columns = User.__table__.columns
rows = session.query(User).with_entities(*selected_columns).all()
for row in rows :
print(row._asdict())

Here is how Elixir does it. The value of this solution is that it allows recursively including the dictionary representation of relations.
def to_dict(self, deep={}, exclude=[]):
"""Generate a JSON-style nested dict/list structure from an object."""
col_prop_names = [p.key for p in self.mapper.iterate_properties \
if isinstance(p, ColumnProperty)]
data = dict([(name, getattr(self, name))
for name in col_prop_names if name not in exclude])
for rname, rdeep in deep.iteritems():
dbdata = getattr(self, rname)
#FIXME: use attribute names (ie coltoprop) instead of column names
fks = self.mapper.get_property(rname).remote_side
exclude = [c.name for c in fks]
if dbdata is None:
data[rname] = None
elif isinstance(dbdata, list):
data[rname] = [o.to_dict(rdeep, exclude) for o in dbdata]
else:
data[rname] = dbdata.to_dict(rdeep, exclude)
return data

With this code you can also to add to your query "filter" or "join" and this work!
query = session.query(User)
def query_to_dict(query):
def _create_dict(r):
return {c.get('name'): getattr(r, c.get('name')) for c in query.column_descriptions}
return [_create_dict(r) for r in query]

For the sake of everyone and myself, here is how I use it:
def run_sql(conn_String):
output_connection = engine.create_engine(conn_string, poolclass=NullPool).connect()
rows = output_connection.execute('select * from db1.t1').fetchall()
return [dict(row) for row in rows]

As OP stated, calling the dict initializer raises an exception with the message "User" object is not iterable. So the real question is how to make a SQLAlchemy Model iterable?
We'll have to implement the special methods __iter__ and __next__, but if we inherit directly from the declarative_base model, we would still run into the undesirable "_sa_instance_state" key. What's worse, is we would have to loop through __dict__.keys() for every call to __next__ because the keys() method returns a View -- an iterable that is not indexed. This would increase the time complexity by a factor of N, where N is the number of keys in __dict__. Generating the dict would cost O(N^2). We can do better.
We can implement our own Base class that implements the required special methods and stores a list of of the column names that can be accessed by index, reducing the time complexity of generating the dict to O(N). This has the added benefit that we can define the logic once and inherit from our Base class anytime we want our model class to be iterable.
class IterableBase(declarative_base()):
__abstract__ = True
def _init_keys(self):
self._keys = [c.name for c in self.__table__.columns]
self._dict = {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_keys()
def __setattr__(self, name, value):
super().__setattr__(name, value)
if name not in ('_dict', '_keys', '_n') and '_dict' in self.__dict__:
self._dict[name] = value
def __iter__(self):
self._n = 0
return self
def __next__(self):
if self._n >= len(self._keys):
raise StopIteration
self._n += 1
key = self._keys[self._n-1]
return (key, self._dict[key])
Now the User class can inherit directly from our IterableBase class.
class User(IterableBase):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
You can confirm that calling the dict function with a User instance as an argument returns the desired dictionary, sans "_sa_instance_state". You may have noticed the __setattr__ method that was declared in the IterableBase class. This ensures the _dict is updated when attributes are mutated or set after initialization.
def main():
user1 = User('Bob')
print(dict(user1))
# outputs {'id': None, 'name': 'Bob'}
user1.id = 42
print(dict(user1))
# outputs {'id': 42, 'name': 'Bob'}
if __name__ == '__main__':
main()

After querying the database using following SQLAlchemy code:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = 'sqlite:///./examples/sql_app.db'
engine = create_engine(SQLALCHEMY_DATABASE_URL, echo=True)
query = sqlalchemy.select(TABLE)
result = engine.execute(query).fetchall()
You can use this one-liner:
query_dict = [record._mapping for record in results]

I have a variation on Marco Mariani's answer, expressed as a decorator. The main difference is that it'll handle lists of entities, as well as safely ignoring some other types of return values (which is very useful when writing tests using mocks):
#decorator
def to_dict(f, *args, **kwargs):
result = f(*args, **kwargs)
if is_iterable(result) and not is_dict(result):
return map(asdict, result)
return asdict(result)
def asdict(obj):
return dict((col.name, getattr(obj, col.name))
for col in class_mapper(obj.__class__).mapped_table.c)
def is_dict(obj):
return isinstance(obj, dict)
def is_iterable(obj):
return True if getattr(obj, '__iter__', False) else False

To complete #Anurag Uniyal 's answer, here is a method that will recursively follow relationships:
from sqlalchemy.inspection import inspect
def to_dict(obj, with_relationships=True):
d = {}
for column in obj.__table__.columns:
if with_relationships and len(column.foreign_keys) > 0:
# Skip foreign keys
continue
d[column.name] = getattr(obj, column.name)
if with_relationships:
for relationship in inspect(type(obj)).relationships:
val = getattr(obj, relationship.key)
d[relationship.key] = to_dict(val) if val else None
return d
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
first_name = Column(TEXT)
address_id = Column(Integer, ForeignKey('addresses.id')
address = relationship('Address')
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
city = Column(TEXT)
user = User(first_name='Nathan', address=Address(city='Lyon'))
# Add and commit user to session to create ids
to_dict(user)
# {'id': 1, 'first_name': 'Nathan', 'address': {'city': 'Lyon'}}
to_dict(user, with_relationship=False)
# {'id': 1, 'first_name': 'Nathan', 'address_id': 1}

We can get a list of object in dict:
def queryset_to_dict(query_result):
query_columns = query_result[0].keys()
res = [list(ele) for ele in query_result]
dict_list = [dict(zip(query_columns, l)) for l in res]
return dict_list
query_result = db.session.query(LanguageMaster).all()
dictvalue=queryset_to_dict(query_result)

from copy import copy
def to_record(row):
record = copy(row.__dict__)
del record["_sa_instance_state"]
return record
If not using copy, you might run into errors.

Related

convert models to json [duplicate]

Django has some good automatic serialization of ORM models returned from DB to JSON format.
How to serialize SQLAlchemy query result to JSON format?
I tried jsonpickle.encode but it encodes query object itself.
I tried json.dumps(items) but it returns
TypeError: <Product('3', 'some name', 'some desc')> is not JSON serializable
Is it really so hard to serialize SQLAlchemy ORM objects to JSON /XML? Isn't there any default serializer for it? It's very common task to serialize ORM query results nowadays.
What I need is just to return JSON or XML data representation of SQLAlchemy query result.
SQLAlchemy objects query result in JSON/XML format is needed to be used in javascript datagird (JQGrid http://www.trirand.com/blog/)
You could just output your object as a dictionary:
class User:
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
And then you use User.as_dict() to serialize your object.
As explained in Convert sqlalchemy row object to python dict
A flat implementation
You could use something like this:
from sqlalchemy.ext.declarative import DeclarativeMeta
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
fields[field] = data
except TypeError:
fields[field] = None
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
and then convert to JSON using:
c = YourAlchemyClass()
print json.dumps(c, cls=AlchemyEncoder)
It will ignore fields that are not encodable (set them to 'None').
It doesn't auto-expand relations (since this could lead to self-references, and loop forever).
A recursive, non-circular implementation
If, however, you'd rather loop forever, you could use:
from sqlalchemy.ext.declarative import DeclarativeMeta
def new_alchemy_encoder():
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
fields[field] = obj.__getattribute__(field)
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
And then encode objects using:
print json.dumps(e, cls=new_alchemy_encoder(), check_circular=False)
This would encode all children, and all their children, and all their children... Potentially encode your entire database, basically. When it reaches something its encoded before, it will encode it as 'None'.
A recursive, possibly-circular, selective implementation
Another alternative, probably better, is to be able to specify the fields you want to expand:
def new_alchemy_encoder(revisit_self = False, fields_to_expand = []):
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
val = obj.__getattribute__(field)
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or (isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field not in fields_to_expand:
# not expanding this field: set it to None and continue
fields[field] = None
continue
fields[field] = val
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
You can now call it with:
print json.dumps(e, cls=new_alchemy_encoder(False, ['parents']), check_circular=False)
To only expand SQLAlchemy fields called 'parents', for example.
Python 3.7+ and Flask 1.1+ can use the built-in dataclasses package
from dataclasses import dataclass
from datetime import datetime
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
#dataclass
class User(db.Model):
id: int
email: str
id = db.Column(db.Integer, primary_key=True, auto_increment=True)
email = db.Column(db.String(200), unique=True)
#app.route('/users/')
def users():
users = User.query.all()
return jsonify(users)
if __name__ == "__main__":
users = User(email="user1#gmail.com"), User(email="user2#gmail.com")
db.create_all()
db.session.add_all(users)
db.session.commit()
app.run()
The /users/ route will now return a list of users.
[
{"email": "user1#gmail.com", "id": 1},
{"email": "user2#gmail.com", "id": 2}
]
Auto-serialize related models
#dataclass
class Account(db.Model):
id: int
users: User
id = db.Column(db.Integer)
users = db.relationship(User) # User model would need a db.ForeignKey field
The response from jsonify(account) would be this.
{
"id":1,
"users":[
{
"email":"user1#gmail.com",
"id":1
},
{
"email":"user2#gmail.com",
"id":2
}
]
}
Overwrite the default JSON Encoder
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
"Add support for serializing timedeltas"
def default(o):
if type(o) == datetime.timedelta:
return str(o)
if type(o) == datetime.datetime:
return o.isoformat()
return super().default(o)
app.json_encoder = CustomJSONEncoder
You can convert a RowProxy to a dict like this:
d = dict(row.items())
Then serialize that to JSON ( you will have to specify an encoder for things like datetime values )
It's not that hard if you just want one record ( and not a full hierarchy of related records ).
json.dumps([(dict(row.items())) for row in rs])
I recommend using marshmallow. It allows you to create serializers to represent your model instances with support to relations and nested objects.
Here is a truncated example from their docs. Take the ORM model, Author:
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
first = db.Column(db.String(80))
last = db.Column(db.String(80))
A marshmallow schema for that class is constructed like this:
class AuthorSchema(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method("format_name", dump_only=True)
def format_name(self, author):
return "{}, {}".format(author.last, author.first)
...and used like this:
author_schema = AuthorSchema()
author_schema.dump(Author.query.first())
...would produce an output like this:
{
"first": "Tim",
"formatted_name": "Peters, Tim",
"id": 1,
"last": "Peters"
}
Have a look at their full Flask-SQLAlchemy Example.
A library called marshmallow-sqlalchemy specifically integrates SQLAlchemy and marshmallow. In that library, the schema for the Author model described above looks like this:
class AuthorSchema(ModelSchema):
class Meta:
model = Author
The integration allows the field types to be inferred from the SQLAlchemy Column types.
marshmallow-sqlalchemy here.
You can use introspection of SqlAlchemy as this :
mysql = SQLAlchemy()
from sqlalchemy import inspect
class Contacts(mysql.Model):
__tablename__ = 'CONTACTS'
id = mysql.Column(mysql.Integer, primary_key=True)
first_name = mysql.Column(mysql.String(128), nullable=False)
last_name = mysql.Column(mysql.String(128), nullable=False)
phone = mysql.Column(mysql.String(128), nullable=False)
email = mysql.Column(mysql.String(128), nullable=False)
street = mysql.Column(mysql.String(128), nullable=False)
zip_code = mysql.Column(mysql.String(128), nullable=False)
city = mysql.Column(mysql.String(128), nullable=False)
def toDict(self):
return { c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs }
#app.route('/contacts',methods=['GET'])
def getContacts():
contacts = Contacts.query.all()
contactsArr = []
for contact in contacts:
contactsArr.append(contact.toDict())
return jsonify(contactsArr)
#app.route('/contacts/<int:id>',methods=['GET'])
def getContact(id):
contact = Contacts.query.get(id)
return jsonify(contact.toDict())
Get inspired from an answer here :
Convert sqlalchemy row object to python dict
Flask-JsonTools package has an implementation of JsonSerializableBase Base class for your models.
Usage:
from sqlalchemy.ext.declarative import declarative_base
from flask.ext.jsontools import JsonSerializableBase
Base = declarative_base(cls=(JsonSerializableBase,))
class User(Base):
#...
Now the User model is magically serializable.
If your framework is not Flask, you can just grab the code
For security reasons you should never return all the model's fields. I prefer to selectively choose them.
Flask's json encoding now supports UUID, datetime and relationships (and added query and query_class for flask_sqlalchemy db.Model class). I've updated the encoder as follows:
app/json_encoder.py
from sqlalchemy.ext.declarative import DeclarativeMeta
from flask import json
class AlchemyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o.__class__, DeclarativeMeta):
data = {}
fields = o.__json__() if hasattr(o, '__json__') else dir(o)
for field in [f for f in fields if not f.startswith('_') and f not in ['metadata', 'query', 'query_class']]:
value = o.__getattribute__(field)
try:
json.dumps(value)
data[field] = value
except TypeError:
data[field] = None
return data
return json.JSONEncoder.default(self, o)
app/__init__.py
# json encoding
from app.json_encoder import AlchemyEncoder
app.json_encoder = AlchemyEncoder
With this I can optionally add a __json__ property that returns the list of fields I wish to encode:
app/models.py
class Queue(db.Model):
id = db.Column(db.Integer, primary_key=True)
song_id = db.Column(db.Integer, db.ForeignKey('song.id'), unique=True, nullable=False)
song = db.relationship('Song', lazy='joined')
type = db.Column(db.String(20), server_default=u'audio/mpeg')
src = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(db.DateTime, server_default=db.func.now(), onupdate=db.func.now())
def __init__(self, song):
self.song = song
self.src = song.full_path
def __json__(self):
return ['song', 'src', 'type', 'created_at']
I add #jsonapi to my view, return the resultlist and then my output is as follows:
[
{
"created_at": "Thu, 23 Jul 2015 11:36:53 GMT",
"song":
{
"full_path": "/static/music/Audioslave/Audioslave [2002]/1 Cochise.mp3",
"id": 2,
"path_name": "Audioslave/Audioslave [2002]/1 Cochise.mp3"
},
"src": "/static/music/Audioslave/Audioslave [2002]/1 Cochise.mp3",
"type": "audio/mpeg"
}
]
A more detailed explanation.
In your model, add:
def as_dict(self):
return {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
The str() is for python 3 so if using python 2 use unicode(). It should help deserialize dates. You can remove it if not dealing with those.
You can now query the database like this
some_result = User.query.filter_by(id=current_user.id).first().as_dict()
First() is needed to avoid weird errors. as_dict() will now deserialize the result. After deserialization, it is ready to be turned to json
jsonify(some_result)
While the original question goes back awhile, the number of answers here (and my own experiences) suggest it's a non-trivial question with a lot of different approaches of varying complexity with different trade-offs.
That's why I built the SQLAthanor library that extends SQLAlchemy's declarative ORM with configurable serialization/de-serialization support that you might want to take a look at.
The library supports:
Python 2.7, 3.4, 3.5, and 3.6.
SQLAlchemy versions 0.9 and higher
serialization/de-serialization to/from JSON, CSV, YAML, and Python dict
serialization/de-serialization of columns/attributes, relationships, hybrid properties, and association proxies
enabling and disabling of serialization for particular formats and columns/relationships/attributes (e.g. you want to support an inbound password value, but never include an outbound one)
pre-serialization and post-deserialization value processing (for validation or type coercion)
a pretty straightforward syntax that is both Pythonic and seamlessly consistent with SQLAlchemy's own approach
You can check out the (I hope!) comprehensive docs here: https://sqlathanor.readthedocs.io/en/latest
Hope this helps!
Custom serialization and deserialization.
"from_json" (class method) builds a Model object based on json data.
"deserialize" could be called only on instance, and merge all data from json into Model instance.
"serialize" - recursive serialization
__write_only__ property is needed to define write only properties ("password_hash" for example).
class Serializable(object):
__exclude__ = ('id',)
__include__ = ()
__write_only__ = ()
#classmethod
def from_json(cls, json, selfObj=None):
if selfObj is None:
self = cls()
else:
self = selfObj
exclude = (cls.__exclude__ or ()) + Serializable.__exclude__
include = cls.__include__ or ()
if json:
for prop, value in json.iteritems():
# ignore all non user data, e.g. only
if (not (prop in exclude) | (prop in include)) and isinstance(
getattr(cls, prop, None), QueryableAttribute):
setattr(self, prop, value)
return self
def deserialize(self, json):
if not json:
return None
return self.__class__.from_json(json, selfObj=self)
#classmethod
def serialize_list(cls, object_list=[]):
output = []
for li in object_list:
if isinstance(li, Serializable):
output.append(li.serialize())
else:
output.append(li)
return output
def serialize(self, **kwargs):
# init write only props
if len(getattr(self.__class__, '__write_only__', ())) == 0:
self.__class__.__write_only__ = ()
dictionary = {}
expand = kwargs.get('expand', ()) or ()
prop = 'props'
if expand:
# expand all the fields
for key in expand:
getattr(self, key)
iterable = self.__dict__.items()
is_custom_property_set = False
# include only properties passed as parameter
if (prop in kwargs) and (kwargs.get(prop, None) is not None):
is_custom_property_set = True
iterable = kwargs.get(prop, None)
# loop trough all accessible properties
for key in iterable:
accessor = key
if isinstance(key, tuple):
accessor = key[0]
if not (accessor in self.__class__.__write_only__) and not accessor.startswith('_'):
# force select from db to be able get relationships
if is_custom_property_set:
getattr(self, accessor, None)
if isinstance(self.__dict__.get(accessor), list):
dictionary[accessor] = self.__class__.serialize_list(object_list=self.__dict__.get(accessor))
# check if those properties are read only
elif isinstance(self.__dict__.get(accessor), Serializable):
dictionary[accessor] = self.__dict__.get(accessor).serialize()
else:
dictionary[accessor] = self.__dict__.get(accessor)
return dictionary
Here is a solution that lets you select the relations you want to include in your output as deep as you would like to go.
NOTE: This is a complete re-write taking a dict/str as an arg rather than a list. fixes some stuff..
def deep_dict(self, relations={}):
"""Output a dict of an SA object recursing as deep as you want.
Takes one argument, relations which is a dictionary of relations we'd
like to pull out. The relations dict items can be a single relation
name or deeper relation names connected by sub dicts
Example:
Say we have a Person object with a family relationship
person.deep_dict(relations={'family':None})
Say the family object has homes as a relation then we can do
person.deep_dict(relations={'family':{'homes':None}})
OR
person.deep_dict(relations={'family':'homes'})
Say homes has a relation like rooms you can do
person.deep_dict(relations={'family':{'homes':'rooms'}})
and so on...
"""
mydict = dict((c, str(a)) for c, a in
self.__dict__.items() if c != '_sa_instance_state')
if not relations:
# just return ourselves
return mydict
# otherwise we need to go deeper
if not isinstance(relations, dict) and not isinstance(relations, str):
raise Exception("relations should be a dict, it is of type {}".format(type(relations)))
# got here so check and handle if we were passed a dict
if isinstance(relations, dict):
# we were passed deeper info
for left, right in relations.items():
myrel = getattr(self, left)
if isinstance(myrel, list):
mydict[left] = [rel.deep_dict(relations=right) for rel in myrel]
else:
mydict[left] = myrel.deep_dict(relations=right)
# if we get here check and handle if we were passed a string
elif isinstance(relations, str):
# passed a single item
myrel = getattr(self, relations)
left = relations
if isinstance(myrel, list):
mydict[left] = [rel.deep_dict(relations=None)
for rel in myrel]
else:
mydict[left] = myrel.deep_dict(relations=None)
return mydict
so for an example using person/family/homes/rooms... turning it into json all you need is
json.dumps(person.deep_dict(relations={'family':{'homes':'rooms'}}))
step1:
class CNAME:
...
def as_dict(self):
return {item.name: getattr(self, item.name) for item in self.__table__.columns}
step2:
list = []
for data in session.query(CNAME).all():
list.append(data.as_dict())
step3:
return jsonify(list)
Even though it's a old post, Maybe I didn't answer the question above, but I want to talk about my serialization, at least it works for me.
I use FastAPI,SqlAlchemy and MySQL, but I don't use orm model;
# from sqlalchemy import create_engine
# from sqlalchemy.orm import sessionmaker
# engine = create_engine(config.SQLALCHEMY_DATABASE_URL, pool_pre_ping=True)
# SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Serialization code
import decimal
import datetime
def alchemy_encoder(obj):
"""JSON encoder function for SQLAlchemy special classes."""
if isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(obj, decimal.Decimal):
return float(obj)
import json
from sqlalchemy import text
# db is SessionLocal() object
app_sql = 'SELECT * FROM app_info ORDER BY app_id LIMIT :page,:page_size'
# The next two are the parameters passed in
page = 1
page_size = 10
# execute sql and return a <class 'sqlalchemy.engine.result.ResultProxy'> object
app_list = db.execute(text(app_sql), {'page': page, 'page_size': page_size})
# serialize
res = json.loads(json.dumps([dict(r) for r in app_list], default=alchemy_encoder))
If it doesn't work, please ignore my answer. I refer to it here
https://codeandlife.com/2014/12/07/sqlalchemy-results-to-json-the-easy-way/
install simplejson by
pip install simplejson and the create a class
class Serialise(object):
def _asdict(self):
"""
Serialization logic for converting entities using flask's jsonify
:return: An ordered dictionary
:rtype: :class:`collections.OrderedDict`
"""
result = OrderedDict()
# Get the columns
for key in self.__mapper__.c.keys():
if isinstance(getattr(self, key), datetime):
result["x"] = getattr(self, key).timestamp() * 1000
result["timestamp"] = result["x"]
else:
result[key] = getattr(self, key)
return result
and inherit this class to every orm classes so that this _asdict function gets registered to every ORM class and boom.
And use jsonify anywhere
It is not so straighforward. I wrote some code to do this. I'm still working on it, and it uses the MochiKit framework. It basically translates compound objects between Python and Javascript using a proxy and registered JSON converters.
Browser side for database objects is db.js
It needs the basic Python proxy source in proxy.js.
On the Python side there is the base proxy module.
Then finally the SqlAlchemy object encoder in webserver.py.
It also depends on metadata extractors found in the models.py file.
def alc2json(row):
return dict([(col, str(getattr(row,col))) for col in row.__table__.columns.keys()])
I thought I'd play a little code golf with this one.
FYI: I am using automap_base since we have a separately designed schema according to business requirements. I just started using SQLAlchemy today but the documentation states that automap_base is an extension to declarative_base which seems to be the typical paradigm in the SQLAlchemy ORM so I believe this should work.
It does not get fancy with following foreign keys per Tjorriemorrie's solution, but it simply matches columns to values and handles Python types by str()-ing the column values. Our values consist Python datetime.time and decimal.Decimal class type results so it gets the job done.
Hope this helps any passers-by!
I know this is quite an older post. I took solution given by #SashaB and modified as per my need.
I added following things to it:
Field ignore list: A list of fields to be ignored while serializing
Field replace list: A dictionary containing field names to be replaced by values while serializing.
Removed methods and BaseQuery getting serialized
My code is as follows:
def alchemy_json_encoder(revisit_self = False, fields_to_expand = [], fields_to_ignore = [], fields_to_replace = {}):
"""
Serialize SQLAlchemy result into JSon
:param revisit_self: True / False
:param fields_to_expand: Fields which are to be expanded for including their children and all
:param fields_to_ignore: Fields to be ignored while encoding
:param fields_to_replace: Field keys to be replaced by values assigned in dictionary
:return: Json serialized SQLAlchemy object
"""
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata' and x not in fields_to_ignore]:
val = obj.__getattribute__(field)
# is this field method defination, or an SQLalchemy object
if not hasattr(val, "__call__") and not isinstance(val, BaseQuery):
field_name = fields_to_replace[field] if field in fields_to_replace else field
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or \
(isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field not in fields_to_expand:
# not expanding this field: set it to None and continue
fields[field_name] = None
continue
fields[field_name] = val
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
Hope it helps someone!
Use the built-in serializer in SQLAlchemy:
from sqlalchemy.ext.serializer import loads, dumps
obj = MyAlchemyObject()
# serialize object
serialized_obj = dumps(obj)
# deserialize object
obj = loads(serialized_obj)
If you're transferring the object between sessions, remember to detach the object from the current session using session.expunge(obj).
To attach it again, just do session.add(obj).
Under Flask, this works and handles datatime fields, transforming a field of type
'time': datetime.datetime(2018, 3, 22, 15, 40) into
"time": "2018-03-22 15:40:00":
obj = {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
# This to get the JSON body
return json.dumps(obj)
# Or this to get a response object
return jsonify(obj)
following code will serialize sqlalchemy result to json.
import json
from collections import OrderedDict
def asdict(self):
result = OrderedDict()
for key in self.__mapper__.c.keys():
if getattr(self, key) is not None:
result[key] = str(getattr(self, key))
else:
result[key] = getattr(self, key)
return result
def to_array(all_vendors):
v = [ ven.asdict() for ven in all_vendors ]
return json.dumps(v)
Calling fun,
def all_products():
all_products = Products.query.all()
return to_array(all_products)
The AlchemyEncoder is wonderful but sometimes fails with Decimal values. Here is an improved encoder that solves the decimal problem -
class AlchemyEncoder(json.JSONEncoder):
# To serialize SQLalchemy objects
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
model_fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
print data
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
model_fields[field] = data
except TypeError:
model_fields[field] = None
return model_fields
if isinstance(obj, Decimal):
return float(obj)
return json.JSONEncoder.default(self, obj)
When using sqlalchemy to connect to a db I this is a simple solution which is highly configurable. Use pandas.
import pandas as pd
import sqlalchemy
#sqlalchemy engine configuration
engine = sqlalchemy.create_engine....
def my_function():
#read in from sql directly into a pandas dataframe
#check the pandas documentation for additional config options
sql_DF = pd.read_sql_table("table_name", con=engine)
# "orient" is optional here but allows you to specify the json formatting you require
sql_json = sql_DF.to_json(orient="index")
return sql_json
(Tiny tweak on Sasha B's really excellent answer)
This specifically converts datetime objects to strings which in the original answer would be converted to None:
# Standard library imports
from datetime import datetime
import json
# 3rd party imports
from sqlalchemy.ext.declarative import DeclarativeMeta
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
dict = {}
# Remove invalid fields and just get the column attributes
columns = [x for x in dir(obj) if not x.startswith("_") and x != "metadata"]
for column in columns:
value = obj.__getattribute__(column)
try:
json.dumps(value)
dict[column] = value
except TypeError:
if isinstance(value, datetime):
dict[column] = value.__str__()
else:
dict[column] = None
return dict
return json.JSONEncoder.default(self, obj)
class SqlToDict:
def __init__(self, data) -> None:
self.data = data
def to_timestamp(self, date):
if isinstance(date, datetime):
return int(datetime.timestamp(date))
else:
return date
def to_dict(self) -> List:
arr = []
for i in self.data:
keys = [*i.keys()]
values = [*i]
values = [self.to_timestamp(d) for d in values]
arr.append(dict(zip(keys, values)))
return arr
For example:
SqlToDict(data).to_dict()
Very late 2023
My implementation
def obj_to_dict(obj, remove=['_sa_instance_state'], debug=False):
result = {}
if type(obj).__name__ == "Row":
return dict(obj)
obj = obj.__dict__
for key in obj:
if key in remove:
continue
result[key] = obj[key]
if debug:
print(result)
return result
The built in serializer chokes with utf-8 cannot decode invalid start byte for some inputs. Instead, I went with:
def row_to_dict(row):
temp = row.__dict__
temp.pop('_sa_instance_state', None)
return temp
def rows_to_list(rows):
ret_rows = []
for row in rows:
ret_rows.append(row_to_dict(row))
return ret_rows
#website_blueprint.route('/api/v1/some/endpoint', methods=['GET'])
def some_api():
'''
/some_endpoint
'''
rows = rows_to_list(SomeModel.query.all())
response = app.response_class(
response=jsonplus.dumps(rows),
status=200,
mimetype='application/json'
)
return response
Maybe you can use a class like this
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy import Table
class Custom:
"""Some custom logic here!"""
__table__: Table # def for mypy
#declared_attr
def __tablename__(cls): # pylint: disable=no-self-argument
return cls.__name__ # pylint: disable= no-member
def to_dict(self) -> Dict[str, Any]:
"""Serializes only column data."""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
Base = declarative_base(cls=Custom)
class MyOwnTable(Base):
#COLUMNS!
With that all objects have the to_dict method
While using some raw sql and undefined objects, using cursor.description appeared to get what I was looking for:
with connection.cursor() as cur:
print(query)
cur.execute(query)
for item in cur.fetchall():
row = {column.name: item[i] for i, column in enumerate(cur.description)}
print(row)
This is a JSONEncoder version that preserves model column order and only keeps recursively defined column and relationship fields. It also formats most JSON unserializable types:
import json
from datetime import datetime
from decimal import Decimal
import arrow
from sqlalchemy.ext.declarative import DeclarativeMeta
class SQLAlchemyJSONEncoder(json.JSONEncoder):
"""
SQLAlchemy ORM JSON Encoder
If you have a "backref" relationship defined in your SQLAlchemy model,
this encoder raises a ValueError to stop an infinite loop.
"""
def default(self, obj):
if isinstance(obj, datetime):
return arrow.get(obj).isoformat()
elif isinstance(obj, Decimal):
return float(obj)
elif isinstance(obj, set):
return sorted(obj)
elif isinstance(obj.__class__, DeclarativeMeta):
for attribute, relationship in obj.__mapper__.relationships.items():
if isinstance(relationship.__getattribute__("backref"), tuple):
raise ValueError(
f'{obj.__class__} object has a "backref" relationship '
"that would cause an infinite loop!"
)
dictionary = {}
column_names = [column.name for column in obj.__table__.columns]
for key in column_names:
value = obj.__getattribute__(key)
if isinstance(value, datetime):
value = arrow.get(value).isoformat()
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, set):
value = sorted(value)
dictionary[key] = value
for key in [
attribute
for attribute in dir(obj)
if not attribute.startswith("_")
and attribute != "metadata"
and attribute not in column_names
]:
value = obj.__getattribute__(key)
dictionary[key] = value
return dictionary
return super().default(obj)

Return JSON list from Flask Sqlalchemy query [duplicate]

Django has some good automatic serialization of ORM models returned from DB to JSON format.
How to serialize SQLAlchemy query result to JSON format?
I tried jsonpickle.encode but it encodes query object itself.
I tried json.dumps(items) but it returns
TypeError: <Product('3', 'some name', 'some desc')> is not JSON serializable
Is it really so hard to serialize SQLAlchemy ORM objects to JSON /XML? Isn't there any default serializer for it? It's very common task to serialize ORM query results nowadays.
What I need is just to return JSON or XML data representation of SQLAlchemy query result.
SQLAlchemy objects query result in JSON/XML format is needed to be used in javascript datagird (JQGrid http://www.trirand.com/blog/)
You could just output your object as a dictionary:
class User:
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
And then you use User.as_dict() to serialize your object.
As explained in Convert sqlalchemy row object to python dict
A flat implementation
You could use something like this:
from sqlalchemy.ext.declarative import DeclarativeMeta
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
fields[field] = data
except TypeError:
fields[field] = None
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
and then convert to JSON using:
c = YourAlchemyClass()
print json.dumps(c, cls=AlchemyEncoder)
It will ignore fields that are not encodable (set them to 'None').
It doesn't auto-expand relations (since this could lead to self-references, and loop forever).
A recursive, non-circular implementation
If, however, you'd rather loop forever, you could use:
from sqlalchemy.ext.declarative import DeclarativeMeta
def new_alchemy_encoder():
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
fields[field] = obj.__getattribute__(field)
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
And then encode objects using:
print json.dumps(e, cls=new_alchemy_encoder(), check_circular=False)
This would encode all children, and all their children, and all their children... Potentially encode your entire database, basically. When it reaches something its encoded before, it will encode it as 'None'.
A recursive, possibly-circular, selective implementation
Another alternative, probably better, is to be able to specify the fields you want to expand:
def new_alchemy_encoder(revisit_self = False, fields_to_expand = []):
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
val = obj.__getattribute__(field)
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or (isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field not in fields_to_expand:
# not expanding this field: set it to None and continue
fields[field] = None
continue
fields[field] = val
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
You can now call it with:
print json.dumps(e, cls=new_alchemy_encoder(False, ['parents']), check_circular=False)
To only expand SQLAlchemy fields called 'parents', for example.
Python 3.7+ and Flask 1.1+ can use the built-in dataclasses package
from dataclasses import dataclass
from datetime import datetime
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
#dataclass
class User(db.Model):
id: int
email: str
id = db.Column(db.Integer, primary_key=True, auto_increment=True)
email = db.Column(db.String(200), unique=True)
#app.route('/users/')
def users():
users = User.query.all()
return jsonify(users)
if __name__ == "__main__":
users = User(email="user1#gmail.com"), User(email="user2#gmail.com")
db.create_all()
db.session.add_all(users)
db.session.commit()
app.run()
The /users/ route will now return a list of users.
[
{"email": "user1#gmail.com", "id": 1},
{"email": "user2#gmail.com", "id": 2}
]
Auto-serialize related models
#dataclass
class Account(db.Model):
id: int
users: User
id = db.Column(db.Integer)
users = db.relationship(User) # User model would need a db.ForeignKey field
The response from jsonify(account) would be this.
{
"id":1,
"users":[
{
"email":"user1#gmail.com",
"id":1
},
{
"email":"user2#gmail.com",
"id":2
}
]
}
Overwrite the default JSON Encoder
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
"Add support for serializing timedeltas"
def default(o):
if type(o) == datetime.timedelta:
return str(o)
if type(o) == datetime.datetime:
return o.isoformat()
return super().default(o)
app.json_encoder = CustomJSONEncoder
You can convert a RowProxy to a dict like this:
d = dict(row.items())
Then serialize that to JSON ( you will have to specify an encoder for things like datetime values )
It's not that hard if you just want one record ( and not a full hierarchy of related records ).
json.dumps([(dict(row.items())) for row in rs])
I recommend using marshmallow. It allows you to create serializers to represent your model instances with support to relations and nested objects.
Here is a truncated example from their docs. Take the ORM model, Author:
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
first = db.Column(db.String(80))
last = db.Column(db.String(80))
A marshmallow schema for that class is constructed like this:
class AuthorSchema(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method("format_name", dump_only=True)
def format_name(self, author):
return "{}, {}".format(author.last, author.first)
...and used like this:
author_schema = AuthorSchema()
author_schema.dump(Author.query.first())
...would produce an output like this:
{
"first": "Tim",
"formatted_name": "Peters, Tim",
"id": 1,
"last": "Peters"
}
Have a look at their full Flask-SQLAlchemy Example.
A library called marshmallow-sqlalchemy specifically integrates SQLAlchemy and marshmallow. In that library, the schema for the Author model described above looks like this:
class AuthorSchema(ModelSchema):
class Meta:
model = Author
The integration allows the field types to be inferred from the SQLAlchemy Column types.
marshmallow-sqlalchemy here.
You can use introspection of SqlAlchemy as this :
mysql = SQLAlchemy()
from sqlalchemy import inspect
class Contacts(mysql.Model):
__tablename__ = 'CONTACTS'
id = mysql.Column(mysql.Integer, primary_key=True)
first_name = mysql.Column(mysql.String(128), nullable=False)
last_name = mysql.Column(mysql.String(128), nullable=False)
phone = mysql.Column(mysql.String(128), nullable=False)
email = mysql.Column(mysql.String(128), nullable=False)
street = mysql.Column(mysql.String(128), nullable=False)
zip_code = mysql.Column(mysql.String(128), nullable=False)
city = mysql.Column(mysql.String(128), nullable=False)
def toDict(self):
return { c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs }
#app.route('/contacts',methods=['GET'])
def getContacts():
contacts = Contacts.query.all()
contactsArr = []
for contact in contacts:
contactsArr.append(contact.toDict())
return jsonify(contactsArr)
#app.route('/contacts/<int:id>',methods=['GET'])
def getContact(id):
contact = Contacts.query.get(id)
return jsonify(contact.toDict())
Get inspired from an answer here :
Convert sqlalchemy row object to python dict
Flask-JsonTools package has an implementation of JsonSerializableBase Base class for your models.
Usage:
from sqlalchemy.ext.declarative import declarative_base
from flask.ext.jsontools import JsonSerializableBase
Base = declarative_base(cls=(JsonSerializableBase,))
class User(Base):
#...
Now the User model is magically serializable.
If your framework is not Flask, you can just grab the code
For security reasons you should never return all the model's fields. I prefer to selectively choose them.
Flask's json encoding now supports UUID, datetime and relationships (and added query and query_class for flask_sqlalchemy db.Model class). I've updated the encoder as follows:
app/json_encoder.py
from sqlalchemy.ext.declarative import DeclarativeMeta
from flask import json
class AlchemyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o.__class__, DeclarativeMeta):
data = {}
fields = o.__json__() if hasattr(o, '__json__') else dir(o)
for field in [f for f in fields if not f.startswith('_') and f not in ['metadata', 'query', 'query_class']]:
value = o.__getattribute__(field)
try:
json.dumps(value)
data[field] = value
except TypeError:
data[field] = None
return data
return json.JSONEncoder.default(self, o)
app/__init__.py
# json encoding
from app.json_encoder import AlchemyEncoder
app.json_encoder = AlchemyEncoder
With this I can optionally add a __json__ property that returns the list of fields I wish to encode:
app/models.py
class Queue(db.Model):
id = db.Column(db.Integer, primary_key=True)
song_id = db.Column(db.Integer, db.ForeignKey('song.id'), unique=True, nullable=False)
song = db.relationship('Song', lazy='joined')
type = db.Column(db.String(20), server_default=u'audio/mpeg')
src = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(db.DateTime, server_default=db.func.now(), onupdate=db.func.now())
def __init__(self, song):
self.song = song
self.src = song.full_path
def __json__(self):
return ['song', 'src', 'type', 'created_at']
I add #jsonapi to my view, return the resultlist and then my output is as follows:
[
{
"created_at": "Thu, 23 Jul 2015 11:36:53 GMT",
"song":
{
"full_path": "/static/music/Audioslave/Audioslave [2002]/1 Cochise.mp3",
"id": 2,
"path_name": "Audioslave/Audioslave [2002]/1 Cochise.mp3"
},
"src": "/static/music/Audioslave/Audioslave [2002]/1 Cochise.mp3",
"type": "audio/mpeg"
}
]
A more detailed explanation.
In your model, add:
def as_dict(self):
return {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
The str() is for python 3 so if using python 2 use unicode(). It should help deserialize dates. You can remove it if not dealing with those.
You can now query the database like this
some_result = User.query.filter_by(id=current_user.id).first().as_dict()
First() is needed to avoid weird errors. as_dict() will now deserialize the result. After deserialization, it is ready to be turned to json
jsonify(some_result)
While the original question goes back awhile, the number of answers here (and my own experiences) suggest it's a non-trivial question with a lot of different approaches of varying complexity with different trade-offs.
That's why I built the SQLAthanor library that extends SQLAlchemy's declarative ORM with configurable serialization/de-serialization support that you might want to take a look at.
The library supports:
Python 2.7, 3.4, 3.5, and 3.6.
SQLAlchemy versions 0.9 and higher
serialization/de-serialization to/from JSON, CSV, YAML, and Python dict
serialization/de-serialization of columns/attributes, relationships, hybrid properties, and association proxies
enabling and disabling of serialization for particular formats and columns/relationships/attributes (e.g. you want to support an inbound password value, but never include an outbound one)
pre-serialization and post-deserialization value processing (for validation or type coercion)
a pretty straightforward syntax that is both Pythonic and seamlessly consistent with SQLAlchemy's own approach
You can check out the (I hope!) comprehensive docs here: https://sqlathanor.readthedocs.io/en/latest
Hope this helps!
Custom serialization and deserialization.
"from_json" (class method) builds a Model object based on json data.
"deserialize" could be called only on instance, and merge all data from json into Model instance.
"serialize" - recursive serialization
__write_only__ property is needed to define write only properties ("password_hash" for example).
class Serializable(object):
__exclude__ = ('id',)
__include__ = ()
__write_only__ = ()
#classmethod
def from_json(cls, json, selfObj=None):
if selfObj is None:
self = cls()
else:
self = selfObj
exclude = (cls.__exclude__ or ()) + Serializable.__exclude__
include = cls.__include__ or ()
if json:
for prop, value in json.iteritems():
# ignore all non user data, e.g. only
if (not (prop in exclude) | (prop in include)) and isinstance(
getattr(cls, prop, None), QueryableAttribute):
setattr(self, prop, value)
return self
def deserialize(self, json):
if not json:
return None
return self.__class__.from_json(json, selfObj=self)
#classmethod
def serialize_list(cls, object_list=[]):
output = []
for li in object_list:
if isinstance(li, Serializable):
output.append(li.serialize())
else:
output.append(li)
return output
def serialize(self, **kwargs):
# init write only props
if len(getattr(self.__class__, '__write_only__', ())) == 0:
self.__class__.__write_only__ = ()
dictionary = {}
expand = kwargs.get('expand', ()) or ()
prop = 'props'
if expand:
# expand all the fields
for key in expand:
getattr(self, key)
iterable = self.__dict__.items()
is_custom_property_set = False
# include only properties passed as parameter
if (prop in kwargs) and (kwargs.get(prop, None) is not None):
is_custom_property_set = True
iterable = kwargs.get(prop, None)
# loop trough all accessible properties
for key in iterable:
accessor = key
if isinstance(key, tuple):
accessor = key[0]
if not (accessor in self.__class__.__write_only__) and not accessor.startswith('_'):
# force select from db to be able get relationships
if is_custom_property_set:
getattr(self, accessor, None)
if isinstance(self.__dict__.get(accessor), list):
dictionary[accessor] = self.__class__.serialize_list(object_list=self.__dict__.get(accessor))
# check if those properties are read only
elif isinstance(self.__dict__.get(accessor), Serializable):
dictionary[accessor] = self.__dict__.get(accessor).serialize()
else:
dictionary[accessor] = self.__dict__.get(accessor)
return dictionary
Here is a solution that lets you select the relations you want to include in your output as deep as you would like to go.
NOTE: This is a complete re-write taking a dict/str as an arg rather than a list. fixes some stuff..
def deep_dict(self, relations={}):
"""Output a dict of an SA object recursing as deep as you want.
Takes one argument, relations which is a dictionary of relations we'd
like to pull out. The relations dict items can be a single relation
name or deeper relation names connected by sub dicts
Example:
Say we have a Person object with a family relationship
person.deep_dict(relations={'family':None})
Say the family object has homes as a relation then we can do
person.deep_dict(relations={'family':{'homes':None}})
OR
person.deep_dict(relations={'family':'homes'})
Say homes has a relation like rooms you can do
person.deep_dict(relations={'family':{'homes':'rooms'}})
and so on...
"""
mydict = dict((c, str(a)) for c, a in
self.__dict__.items() if c != '_sa_instance_state')
if not relations:
# just return ourselves
return mydict
# otherwise we need to go deeper
if not isinstance(relations, dict) and not isinstance(relations, str):
raise Exception("relations should be a dict, it is of type {}".format(type(relations)))
# got here so check and handle if we were passed a dict
if isinstance(relations, dict):
# we were passed deeper info
for left, right in relations.items():
myrel = getattr(self, left)
if isinstance(myrel, list):
mydict[left] = [rel.deep_dict(relations=right) for rel in myrel]
else:
mydict[left] = myrel.deep_dict(relations=right)
# if we get here check and handle if we were passed a string
elif isinstance(relations, str):
# passed a single item
myrel = getattr(self, relations)
left = relations
if isinstance(myrel, list):
mydict[left] = [rel.deep_dict(relations=None)
for rel in myrel]
else:
mydict[left] = myrel.deep_dict(relations=None)
return mydict
so for an example using person/family/homes/rooms... turning it into json all you need is
json.dumps(person.deep_dict(relations={'family':{'homes':'rooms'}}))
step1:
class CNAME:
...
def as_dict(self):
return {item.name: getattr(self, item.name) for item in self.__table__.columns}
step2:
list = []
for data in session.query(CNAME).all():
list.append(data.as_dict())
step3:
return jsonify(list)
Even though it's a old post, Maybe I didn't answer the question above, but I want to talk about my serialization, at least it works for me.
I use FastAPI,SqlAlchemy and MySQL, but I don't use orm model;
# from sqlalchemy import create_engine
# from sqlalchemy.orm import sessionmaker
# engine = create_engine(config.SQLALCHEMY_DATABASE_URL, pool_pre_ping=True)
# SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Serialization code
import decimal
import datetime
def alchemy_encoder(obj):
"""JSON encoder function for SQLAlchemy special classes."""
if isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(obj, decimal.Decimal):
return float(obj)
import json
from sqlalchemy import text
# db is SessionLocal() object
app_sql = 'SELECT * FROM app_info ORDER BY app_id LIMIT :page,:page_size'
# The next two are the parameters passed in
page = 1
page_size = 10
# execute sql and return a <class 'sqlalchemy.engine.result.ResultProxy'> object
app_list = db.execute(text(app_sql), {'page': page, 'page_size': page_size})
# serialize
res = json.loads(json.dumps([dict(r) for r in app_list], default=alchemy_encoder))
If it doesn't work, please ignore my answer. I refer to it here
https://codeandlife.com/2014/12/07/sqlalchemy-results-to-json-the-easy-way/
install simplejson by
pip install simplejson and the create a class
class Serialise(object):
def _asdict(self):
"""
Serialization logic for converting entities using flask's jsonify
:return: An ordered dictionary
:rtype: :class:`collections.OrderedDict`
"""
result = OrderedDict()
# Get the columns
for key in self.__mapper__.c.keys():
if isinstance(getattr(self, key), datetime):
result["x"] = getattr(self, key).timestamp() * 1000
result["timestamp"] = result["x"]
else:
result[key] = getattr(self, key)
return result
and inherit this class to every orm classes so that this _asdict function gets registered to every ORM class and boom.
And use jsonify anywhere
It is not so straighforward. I wrote some code to do this. I'm still working on it, and it uses the MochiKit framework. It basically translates compound objects between Python and Javascript using a proxy and registered JSON converters.
Browser side for database objects is db.js
It needs the basic Python proxy source in proxy.js.
On the Python side there is the base proxy module.
Then finally the SqlAlchemy object encoder in webserver.py.
It also depends on metadata extractors found in the models.py file.
def alc2json(row):
return dict([(col, str(getattr(row,col))) for col in row.__table__.columns.keys()])
I thought I'd play a little code golf with this one.
FYI: I am using automap_base since we have a separately designed schema according to business requirements. I just started using SQLAlchemy today but the documentation states that automap_base is an extension to declarative_base which seems to be the typical paradigm in the SQLAlchemy ORM so I believe this should work.
It does not get fancy with following foreign keys per Tjorriemorrie's solution, but it simply matches columns to values and handles Python types by str()-ing the column values. Our values consist Python datetime.time and decimal.Decimal class type results so it gets the job done.
Hope this helps any passers-by!
I know this is quite an older post. I took solution given by #SashaB and modified as per my need.
I added following things to it:
Field ignore list: A list of fields to be ignored while serializing
Field replace list: A dictionary containing field names to be replaced by values while serializing.
Removed methods and BaseQuery getting serialized
My code is as follows:
def alchemy_json_encoder(revisit_self = False, fields_to_expand = [], fields_to_ignore = [], fields_to_replace = {}):
"""
Serialize SQLAlchemy result into JSon
:param revisit_self: True / False
:param fields_to_expand: Fields which are to be expanded for including their children and all
:param fields_to_ignore: Fields to be ignored while encoding
:param fields_to_replace: Field keys to be replaced by values assigned in dictionary
:return: Json serialized SQLAlchemy object
"""
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata' and x not in fields_to_ignore]:
val = obj.__getattribute__(field)
# is this field method defination, or an SQLalchemy object
if not hasattr(val, "__call__") and not isinstance(val, BaseQuery):
field_name = fields_to_replace[field] if field in fields_to_replace else field
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or \
(isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field not in fields_to_expand:
# not expanding this field: set it to None and continue
fields[field_name] = None
continue
fields[field_name] = val
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
Hope it helps someone!
Use the built-in serializer in SQLAlchemy:
from sqlalchemy.ext.serializer import loads, dumps
obj = MyAlchemyObject()
# serialize object
serialized_obj = dumps(obj)
# deserialize object
obj = loads(serialized_obj)
If you're transferring the object between sessions, remember to detach the object from the current session using session.expunge(obj).
To attach it again, just do session.add(obj).
Under Flask, this works and handles datatime fields, transforming a field of type
'time': datetime.datetime(2018, 3, 22, 15, 40) into
"time": "2018-03-22 15:40:00":
obj = {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
# This to get the JSON body
return json.dumps(obj)
# Or this to get a response object
return jsonify(obj)
following code will serialize sqlalchemy result to json.
import json
from collections import OrderedDict
def asdict(self):
result = OrderedDict()
for key in self.__mapper__.c.keys():
if getattr(self, key) is not None:
result[key] = str(getattr(self, key))
else:
result[key] = getattr(self, key)
return result
def to_array(all_vendors):
v = [ ven.asdict() for ven in all_vendors ]
return json.dumps(v)
Calling fun,
def all_products():
all_products = Products.query.all()
return to_array(all_products)
The AlchemyEncoder is wonderful but sometimes fails with Decimal values. Here is an improved encoder that solves the decimal problem -
class AlchemyEncoder(json.JSONEncoder):
# To serialize SQLalchemy objects
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
model_fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
print data
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
model_fields[field] = data
except TypeError:
model_fields[field] = None
return model_fields
if isinstance(obj, Decimal):
return float(obj)
return json.JSONEncoder.default(self, obj)
When using sqlalchemy to connect to a db I this is a simple solution which is highly configurable. Use pandas.
import pandas as pd
import sqlalchemy
#sqlalchemy engine configuration
engine = sqlalchemy.create_engine....
def my_function():
#read in from sql directly into a pandas dataframe
#check the pandas documentation for additional config options
sql_DF = pd.read_sql_table("table_name", con=engine)
# "orient" is optional here but allows you to specify the json formatting you require
sql_json = sql_DF.to_json(orient="index")
return sql_json
(Tiny tweak on Sasha B's really excellent answer)
This specifically converts datetime objects to strings which in the original answer would be converted to None:
# Standard library imports
from datetime import datetime
import json
# 3rd party imports
from sqlalchemy.ext.declarative import DeclarativeMeta
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
dict = {}
# Remove invalid fields and just get the column attributes
columns = [x for x in dir(obj) if not x.startswith("_") and x != "metadata"]
for column in columns:
value = obj.__getattribute__(column)
try:
json.dumps(value)
dict[column] = value
except TypeError:
if isinstance(value, datetime):
dict[column] = value.__str__()
else:
dict[column] = None
return dict
return json.JSONEncoder.default(self, obj)
class SqlToDict:
def __init__(self, data) -> None:
self.data = data
def to_timestamp(self, date):
if isinstance(date, datetime):
return int(datetime.timestamp(date))
else:
return date
def to_dict(self) -> List:
arr = []
for i in self.data:
keys = [*i.keys()]
values = [*i]
values = [self.to_timestamp(d) for d in values]
arr.append(dict(zip(keys, values)))
return arr
For example:
SqlToDict(data).to_dict()
Very late 2023
My implementation
def obj_to_dict(obj, remove=['_sa_instance_state'], debug=False):
result = {}
if type(obj).__name__ == "Row":
return dict(obj)
obj = obj.__dict__
for key in obj:
if key in remove:
continue
result[key] = obj[key]
if debug:
print(result)
return result
The built in serializer chokes with utf-8 cannot decode invalid start byte for some inputs. Instead, I went with:
def row_to_dict(row):
temp = row.__dict__
temp.pop('_sa_instance_state', None)
return temp
def rows_to_list(rows):
ret_rows = []
for row in rows:
ret_rows.append(row_to_dict(row))
return ret_rows
#website_blueprint.route('/api/v1/some/endpoint', methods=['GET'])
def some_api():
'''
/some_endpoint
'''
rows = rows_to_list(SomeModel.query.all())
response = app.response_class(
response=jsonplus.dumps(rows),
status=200,
mimetype='application/json'
)
return response
Maybe you can use a class like this
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy import Table
class Custom:
"""Some custom logic here!"""
__table__: Table # def for mypy
#declared_attr
def __tablename__(cls): # pylint: disable=no-self-argument
return cls.__name__ # pylint: disable= no-member
def to_dict(self) -> Dict[str, Any]:
"""Serializes only column data."""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
Base = declarative_base(cls=Custom)
class MyOwnTable(Base):
#COLUMNS!
With that all objects have the to_dict method
While using some raw sql and undefined objects, using cursor.description appeared to get what I was looking for:
with connection.cursor() as cur:
print(query)
cur.execute(query)
for item in cur.fetchall():
row = {column.name: item[i] for i, column in enumerate(cur.description)}
print(row)
This is a JSONEncoder version that preserves model column order and only keeps recursively defined column and relationship fields. It also formats most JSON unserializable types:
import json
from datetime import datetime
from decimal import Decimal
import arrow
from sqlalchemy.ext.declarative import DeclarativeMeta
class SQLAlchemyJSONEncoder(json.JSONEncoder):
"""
SQLAlchemy ORM JSON Encoder
If you have a "backref" relationship defined in your SQLAlchemy model,
this encoder raises a ValueError to stop an infinite loop.
"""
def default(self, obj):
if isinstance(obj, datetime):
return arrow.get(obj).isoformat()
elif isinstance(obj, Decimal):
return float(obj)
elif isinstance(obj, set):
return sorted(obj)
elif isinstance(obj.__class__, DeclarativeMeta):
for attribute, relationship in obj.__mapper__.relationships.items():
if isinstance(relationship.__getattribute__("backref"), tuple):
raise ValueError(
f'{obj.__class__} object has a "backref" relationship '
"that would cause an infinite loop!"
)
dictionary = {}
column_names = [column.name for column in obj.__table__.columns]
for key in column_names:
value = obj.__getattribute__(key)
if isinstance(value, datetime):
value = arrow.get(value).isoformat()
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, set):
value = sorted(value)
dictionary[key] = value
for key in [
attribute
for attribute in dir(obj)
if not attribute.startswith("_")
and attribute != "metadata"
and attribute not in column_names
]:
value = obj.__getattribute__(key)
dictionary[key] = value
return dictionary
return super().default(obj)

String Encrypted Type of JSONType changes are not saved to Database

Backstory
I have a questionnaire that asks sensitive questions most of which are true/false. The majority of the time the values are false which poses a challenge when keeping the data private at rest. When encrypting each question into a separate column, it is really easy to tell which value is true and which is false with a bit of guessing. To combat this, the questions and answers are put into a dictionary object with some salt (nonsense that changes randomly) then encrypted. Making it impossible without the key to know what the answers were.
Method
Below is an example of the model used to encrypt the data with salt at rest making it impossible to look at the data and know the contents.
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils.types import JSONType
from sqlalchemy_utils.types.encrypted.encrypted_type import StringEncryptedType, AesEngine
Base = declarative_base()
class SensitiveQuestionnaire(Base):
user_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
_data = data: dict = sa.Column(StringEncryptedType(JSONType, 'secret', AesEngine, 'pkcs5'),
nullable=False, default=lambda: {'_salt': salt_shaker()})
# values are viewed using a python property to look into the `_data` dict
#property
def sensitive_question(self) -> Optional[float]:
return self._data.get('sensitive_question')
# values are set into the `_data` dict
#sensitive_question.setter
def sensitive_question(self, value: bool) -> None:
self._data['sensitive_question'] = value
# in a real example there would be 20+ properties that map to questions
def __init__(self, **kwargs):
# Sqlalchemy does not use the __init__ method so we are free to set object defaults here
self._data = {'_salt': salt_shaker()}
for key in kwargs:
setattr(self, key, kwargs[key])
#property
def _salt(self) -> str:
return self._data['_salt']
def salt_shaker():
return ''.join([random.choice('hldjs..' for i in range(50)])
The Problem
After the SensitiveQuestionnaire object is initialized none of the changes are persisted in the database.
# GIVEN a questionnaire
questionnaire = model.SensitiveQuestionnaire(user_id=1)
db.session.add()
db.session.commit()
# WHEN updating the questionnaire and saving it to the database
questionnaire.sensitive_question= True
db.session.commit()
# THEN we get the questionnaire from the database
db_questionnaire = model.SensitiveQuestionnaire.query\
.filter(model.SensitiveQuestionnaire.user_id == 1).first()
# THEN the sensitive_question value is persisted
assert db_questionnaire.sensitive_question is True
Value from the db_questionnaire.sensitive_question is None when it should be True.
After spending the better part of the day to figure this out, the cause of the issue is how Sqlalchemy knows when there is a change. The short version is sqlalchemy uses python's __setitem__ to hook in sqlalchemy's change() method letting it know there was a change. More info can be found in sqlalchemy's docs.
The answer is to wrap the StringEncryptedType in a MultableDict Type
Mutation Tracking
Provide support for tracking of in-place changes to scalar values, which are propagated into ORM change events on owning parent objects.
From SqlAlchemy's docs: https://docs.sqlalchemy.org/en/13/orm/extensions/mutable.html
Solution
Condensed version... wrapping the StringEncryptedType in a MutableDict
_data = data: dict = sa.Column(
MutableDict.as_mutable(StringEncryptedType(JSONType, 'secret', AesEngine, 'pkcs5')),
nullable=False, default=lambda: {'_salt': salt_shaker()})
Full version from the question above
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy_utils.types import JSONType
from sqlalchemy_utils.types.encrypted.encrypted_type import StringEncryptedType, AesEngine
Base = declarative_base()
class SensitiveQuestionnaire(Base):
user_id: int = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
# The MutableDict.as_mutable below is what changed!
_data = data: dict = sa.Column(
MutableDict.as_mutable(StringEncryptedType(JSONType, 'secret', AesEngine, 'pkcs5')),
nullable=False, default=lambda: {'_salt': salt_shaker()})
#property
def sensitive_question(self) -> Optional[float]:
return self._data.get('sensitive_question')
# values are set into the `_data` dict
#sensitive_question.setter
def sensitive_question(self, value: bool) -> None:
self._data['sensitive_question'] = value
# in a real example there would be 20+ properties that map to questions
def __init__(self, **kwargs):
self._data = {'_salt': salt_shaker()}
for key in kwargs:
setattr(self, key, kwargs[key])
#property
def _salt(self) -> str:
return self._data['_salt']
def salt_shaker():
return ''.join([random.choice('hldjs..' for i in range(50)])

Flask Postgresql array not permanently updating

I'm working on a project using Flask and a PostgreSQL database, with SQLAlchemy.
I have Group objects which have a list of User IDs who are members of the group. For some reason, when I try to add an ID to a group, it will not save properly.
If I try members.append(user_id), it doesn't seem to work at all. However, if I try members += [user_id], the id will show up in the view listing all the groups, but if I restart the server, the added value(s) is (are) not there. The initial values, however, are.
Related code:
Adding group to the database initially:
db = SQLAlchemy(app)
# ...
g = Group(request.form['name'], user_id)
db.session.add(g)
db.session.commit()
The Group class:
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.postgresql import ARRAY
class Group(db.Model):
__tablename__ = "groups"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
leader = db.Column(db.Integer)
# list of the members in the group based on user id
members = db.Column(ARRAY(db.Integer))
def __init__(self, name, leader):
self.name = name
self.leader = leader
self.members = [leader]
def __repr__(self):
return "Name: {}, Leader: {}, Members: {}".format(self.name, self.leader, self.members)
def add_user(self, user_id):
self.members += [user_id]
My test function for updating the Group:
def add_2_to_group():
g = Group.query.all()[0]
g.add_user(2)
db.session.commit()
return redirect(url_for('show_groups'))
Thanks for any help!
As you have mentioned, the ARRAY datatype in sqlalchemy is immutable. This means it isn’t possible to add new data into array once it has been initialised.
To solve this, create class MutableList.
from sqlalchemy.ext.mutable import Mutable
class MutableList(Mutable, list):
def append(self, value):
list.append(self, value)
self.changed()
#classmethod
def coerce(cls, key, value):
if not isinstance(value, MutableList):
if isinstance(value, list):
return MutableList(value)
return Mutable.coerce(key, value)
else:
return value
This snippet allows you to extend a list to add mutability to it. So, now you can use the class above to create a mutable array type like:
class Group(db.Model):
...
members = db.Column(MutableList.as_mutable(ARRAY(db.Integer)))
...
You can use the flag_modified function to mark the property as having changed. In this example, you could change your add_user method to:
from sqlalchemy.orm.attributes import flag_modified
# ~~~
def add_user(self, user_id):
self.members += [user_id]
flag_modified(self, 'members')
To anyone in the future: so it turns out that arrays through SQLAlchemy are immutable. So, once they're initialized in the database, they can't change size. There's probably a way to do this, but there are better ways to do what we're trying to do.
This is a hacky solution, but what you can do is:
Store the existing array temporarily
Set the column value to None
Set the column value to the existing temporary array
For example:
g = Group.query.all()[0]
temp_array = g.members
g.members = None
db.session.commit()
db.session.refresh(g)
g.members = temp_array
db.session.commit()
In my case it was solved by using the new reference for storing a object variable and assiging that new created variable in object variable.so, Instead of updating the existing objects variable it will create a new reference address which reflect the changes.
Here in Model,
Table: question
optional_id = sa.Column(sa.ARRAY(sa.Integer), nullable=True)
In views,
option_list=list(question.optional_id if question.optional_id else [])
if option_list:
question.optional_id.clear()
option_list.append(obj.id)
question.optional_id=option_list
else:
question.optional_id=[obj.id]

method of iterating over sqlalchemy model's defined columns?

I've been trying to figure out how to iterate over the list of columns defined in a SQLAlchemy model. I want it for writing some serialization and copy methods to a couple of models. I can't just iterate over the obj.__dict__ since it contains a lot of SA specific items.
Anyone know of a way to just get the id and desc names from the following?
class JobStatus(Base):
__tablename__ = 'jobstatus'
id = Column(Integer, primary_key=True)
desc = Column(Unicode(20))
In this small case I could easily create a:
def logme(self):
return {'id': self.id, 'desc': self.desc}
but I'd prefer something that auto-generates the dict (for larger objects).
You could use the following function:
def __unicode__(self):
return "[%s(%s)]" % (self.__class__.__name__, ', '.join('%s=%s' % (k, self.__dict__[k]) for k in sorted(self.__dict__) if '_sa_' != k[:4]))
It will exclude SA magic attributes, but will not exclude the relations. So basically it might load the dependencies, parents, children etc, which is definitely not desirable.
But it is actually much easier because if you inherit from Base, you have a __table__ attribute, so that you can do:
for c in JobStatus.__table__.columns:
print c
for c in JobStatus.__table__.foreign_keys:
print c
See How to discover table properties from SQLAlchemy mapped object - similar question.
Edit by Mike: Please see functions such as Mapper.c and Mapper.mapped_table. If using 0.8 and higher also see Mapper.attrs and related functions.
Example for Mapper.attrs:
from sqlalchemy import inspect
mapper = inspect(JobStatus)
for column in mapper.attrs:
print column.key
You can get the list of defined properties from the mapper. For your case you're interested in only ColumnProperty objects.
from sqlalchemy.orm import class_mapper
import sqlalchemy
def attribute_names(cls):
return [prop.key for prop in class_mapper(cls).iterate_properties
if isinstance(prop, sqlalchemy.orm.ColumnProperty)]
I realise that this is an old question, but I've just come across the same requirement and would like to offer an alternative solution to future readers.
As Josh notes, full SQL field names will be returned by JobStatus.__table__.columns, so rather than the original field name id, you will get jobstatus.id. Not as useful as it could be.
The solution to obtaining a list of field names as they were originally defined is to look the _data attribute on the column object, which contains the full data. If we look at JobStatus.__table__.columns._data, it looks like this:
{'desc': Column('desc', Unicode(length=20), table=<jobstatus>),
'id': Column('id', Integer(), table=<jobstatus>, primary_key=True, nullable=False)}
From here you can simply call JobStatus.__table__.columns._data.keys() which gives you a nice, clean list:
['id', 'desc']
Assuming you're using SQLAlchemy's declarative mapping, you can use __mapper__ attribute to get at the class mapper. To get all mapped attributes (including relationships):
obj.__mapper__.attrs.keys()
If you want strictly column names, use obj.__mapper__.column_attrs.keys(). See the documentation for other views.
https://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper.attrs
self.__table__.columns will "only" give you the columns defined in that particular class, i.e. without inherited ones. if you need all, use self.__mapper__.columns. in your example i'd probably use something like this:
class JobStatus(Base):
...
def __iter__(self):
values = vars(self)
for attr in self.__mapper__.columns.keys():
if attr in values:
yield attr, values[attr]
def logme(self):
return dict(self)
To get an as_dict method on all of my classes I used a Mixin class which uses the technics described by Ants Aasma.
class BaseMixin(object):
def as_dict(self):
result = {}
for prop in class_mapper(self.__class__).iterate_properties:
if isinstance(prop, ColumnProperty):
result[prop.key] = getattr(self, prop.key)
return result
And then use it like this in your classes
class MyClass(BaseMixin, Base):
pass
That way you can invoke the following on an instance of MyClass.
> myclass = MyClass()
> myclass.as_dict()
Hope this helps.
I've played arround with this a bit further, I actually needed to render my instances as dict as the form of a HAL object with it's links to related objects. So I've added this little magic down here, which will crawl over all properties of the class same as the above, with the difference that I will crawl deeper into Relaionship properties and generate links for these automatically.
Please note that this will only work for relationships have a single primary key
from sqlalchemy.orm import class_mapper, ColumnProperty
from functools import reduce
def deepgetattr(obj, attr):
"""Recurses through an attribute chain to get the ultimate value."""
return reduce(getattr, attr.split('.'), obj)
class BaseMixin(object):
def as_dict(self):
IgnoreInstrumented = (
InstrumentedList, InstrumentedDict, InstrumentedSet
)
result = {}
for prop in class_mapper(self.__class__).iterate_properties:
if isinstance(getattr(self, prop.key), IgnoreInstrumented):
# All reverse relations are assigned to each related instances
# we don't need to link these, so we skip
continue
if isinstance(prop, ColumnProperty):
# Add simple property to the dictionary with its value
result[prop.key] = getattr(self, prop.key)
if isinstance(prop, RelationshipProperty):
# Construct links relaions
if 'links' not in result:
result['links'] = {}
# Get value using nested class keys
value = (
deepgetattr(
self, prop.key + "." + prop.mapper.primary_key[0].key
)
)
result['links'][prop.key] = {}
result['links'][prop.key]['href'] = (
"/{}/{}".format(prop.key, value)
)
return result
self.__dict__
returns a dict where keys are attribute names and values the values of the object.
/!\ there is a supplementary attribute: '_sa_instance_state'
but you can handle it :)
While row._asdict() worked for most of the cases, I needed some approach that also works after object creation process (db.session.add etc.). The idea is to create a method to_dict accessing columns on the table object and use standard getattr.
class Inventory(db.Model):
__tablename__ = 'inventory'
id = db.Column('id', db.Integer(), primary_key=True)
date = db.Column('date', db.DateTime, nullable=False, default=datetime.utcnow)
item = db.Column('item', db.String(100))
def to_dict(self):
return {
column.name: getattr(self, column.name, None)
for column in Inventory.__table__.columns
}
record = Inventory(item="gloves")
db.session.add(record)
db.session.commit()
# print(record._asdict()) # << that doesn't work
print(record.to_dict()) # << that works as intended
This solution will produce dict with columns only - no meta attributes or anything that you will have to manually clean after the next major update (if any).
PS. I use flask-sqlalchemy but it does not change the idea
I want to get the data of a particular instance of Model dynamically. I used this code.
def to_json(instance):
# get columns data
data = {}
columns = list(instance.__table__.columns)
for column in columns:
data[column.name] = instance.__dict__[column.name]
return data
To map a model from sqlalchemy to a json, taking into account relationships, I use this code
from sqlalchemy.orm import class_mapper
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import ColumnProperty
from sqlalchemy.orm import RelationshipProperty
class BaseMixin(object):
"""BaseMixin"""
__repr_hide = ["created_at", "updated_at"]
__insert_hide = []
#property
def _repr_hide(self):
return self.__repr_hide
#_repr_hide.setter
def _repr_hide(self, k):
self.__repr_hide.append(k)
#property
def _insert_hide(self):
return self.__insert_hide
#_insert_hide.setter
def _insert_hide(self, k):
self.__insert_hide.append(k)
def serialize(self, obj):
"""serialize from json"""
for k, v in obj.items():
if k in self.__repr_hide:
continue
if k in self.__insert_hide:
continue
if k in self.__table__.c.keys():
setattr(self, k, v)
return self
def deserialize(self, backref=None):
"""deserialize to json"""
res = dict()
for prop in class_mapper(self.__class__).iterate_properties:
if prop.key in self.__repr_hide:
continue
if isinstance(prop, ColumnProperty):
res[prop.key] = getattr(self, prop.key)
for prop in class_mapper(self.__class__).iterate_properties:
if prop.key in self.__repr_hide:
continue
if isinstance(prop, RelationshipProperty):
if prop.key == str(backref):
continue
key, value = prop.key, getattr(self, prop.key)
if value is None:
res[key] = None
elif isinstance(value.__class__, DeclarativeMeta):
res[key] = value.deserialize(backref=self.__table__)
else:
res[key] = [i.deserialize(backref=self.__table__) for i in value]
return res
def __iter__(self):
return iter(self.deserialize().items())
def __repr__(self):
vals = ", ".join(
"%s=%r" % (n, getattr(self, n))
for n in self.__table__.c.keys()
if n not in self._repr_hide
)
return "<%s={%s}>" % (self.__class__.__name__, vals)
I know this is an old question, but what about:
class JobStatus(Base):
...
def columns(self):
return [col for col in dir(self) if isinstance(col, db.Column)]
Then, to get column names: jobStatus.columns()
That would return ['id', 'desc']
Then you can loop through, and do stuff with the columns and values:
for col in jobStatus.colums():
doStuff(getattr(jobStatus, col))

Categories

Resources