How do I make compound columns with SQLAlchemy declarative? - python

I frequently make models with Text columns that hold Markdown formatted richtext. My models look like this:
class Document(Base):
id = Column(Integer, primary_key=True)
title = Column(Unicode(250))
description = Column(Text)
description_html = Column(Text)
My edit forms (a) read from and write to description and then (b) write the Markdown formatted version to description_html. My (Jinja2) view templates (c) load the HTML version with {{ doc.description_html|safe }}.
I'd like to cut down these three recurring operations into one Column definition, like this:
class Document(Base):
id = Column(Integer, primary_key=True)
title = Column(Unicode(250))
description = Column(MarkdownText)
Where MarkdownText is a new column type that:
Makes two columns in the database table (description and description_html),
Upon writes to the column, also writes a Markdown formatted version to the html column, and
Provides a __html__() method that returns the contents of the html column. This will allow it to be used from a Jinja2 template as {{ doc.description }} without the safe filter.
Question: Is #1 possible? Can I define a column that makes two columns?

Here we go - now with composite columns:
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Text
from sqlalchemy.orm import composite, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///')
session = sessionmaker(bind=engine)()
Base = declarative_base()
class MarkdownText(object):
def __init__(self, text):
self._text = text
self._html = "<html>%s</html>" % text
#classmethod
def _from_db(cls, text, html):
mt = MarkdownText(text)
mt._html = html
return mt
def __composite_values__(self):
return (self._text, self._html)
def __str__(self):
return self._text
#property
def __html__(self):
return self._html
class Foo(Base):
__tablename__ = 'foo'
id = Column(Integer, primary_key=True)
a = composite(MarkdownText._from_db,
Column('_text', Text),
Column('_html', Text))
def __init__(self, a):
self.a = MarkdownText(a)
def __repr__(self):
return '(%s)' % (self.a)
Base.metadata.create_all(engine)
session.add_all([Foo('test'), Foo('nips')])
session.commit()
x = session.query(Foo).all()
print x
print x[0].a.__html__
print x[0].a
And this gives us nicely:
[(test), (nips)]
<html>test</html>
test

Instead of answering to your bullet points, I better ask you this: "Do you really want to store both plain text and html text in database?". Here's how I would do it:
def text2html(text):
# TODO: Implement me!
pass
class Document(Base):
id = Column(Integer, primary_key=True)
title = Column(Unicode(250))
description = Column(Text)
#property
def description_html(self):
return text2html(self.description)
And in view the html description can be accessed just as document.description_html...

Related

How to convert Class object into json string [duplicate]

This question already has answers here:
How to serialize SqlAlchemy result to JSON?
(37 answers)
Closed 4 years ago.
I'm trying to jsonify a SQLAlchemy result set in Flask/Python.
The Flask mailing list suggested the following method http://librelist.com/browser//flask/2011/2/16/jsonify-sqlalchemy-pagination-collection-result/#04a0754b63387f87e59dda564bde426e :
return jsonify(json_list = qryresult)
However I'm getting the following error back:
TypeError: <flaskext.sqlalchemy.BaseQuery object at 0x102c2df90>
is not JSON serializable
What am I overlooking here?
I have found this question: How to serialize SqlAlchemy result to JSON? which seems very similar however I didn't know whether Flask had some magic to make it easier as the mailing list post suggested.
Edit: for clarification, this is what my model looks like
class Rating(db.Model):
__tablename__ = 'rating'
id = db.Column(db.Integer, primary_key=True)
fullurl = db.Column(db.String())
url = db.Column(db.String())
comments = db.Column(db.Text)
overall = db.Column(db.Integer)
shipping = db.Column(db.Integer)
cost = db.Column(db.Integer)
honesty = db.Column(db.Integer)
communication = db.Column(db.Integer)
name = db.Column(db.String())
ipaddr = db.Column(db.String())
date = db.Column(db.String())
def __init__(self, fullurl, url, comments, overall, shipping, cost, honesty, communication, name, ipaddr, date):
self.fullurl = fullurl
self.url = url
self.comments = comments
self.overall = overall
self.shipping = shipping
self.cost = cost
self.honesty = honesty
self.communication = communication
self.name = name
self.ipaddr = ipaddr
self.date = date
It seems that you actually haven't executed your query. Try following:
return jsonify(json_list = qryresult.all())
[Edit]: Problem with jsonify is, that usually the objects cannot be jsonified automatically. Even Python's datetime fails ;)
What I have done in the past, is adding an extra property (like serialize) to classes that need to be serialized.
def dump_datetime(value):
"""Deserialize datetime object into string form for JSON processing."""
if value is None:
return None
return [value.strftime("%Y-%m-%d"), value.strftime("%H:%M:%S")]
class Foo(db.Model):
# ... SQLAlchemy defs here..
def __init__(self, ...):
# self.foo = ...
pass
#property
def serialize(self):
"""Return object data in easily serializable format"""
return {
'id' : self.id,
'modified_at': dump_datetime(self.modified_at),
# This is an example how to deal with Many2Many relations
'many2many' : self.serialize_many2many
}
#property
def serialize_many2many(self):
"""
Return object's relations in easily serializable format.
NB! Calls many2many's serialize property.
"""
return [ item.serialize for item in self.many2many]
And now for views I can just do:
return jsonify(json_list=[i.serialize for i in qryresult.all()])
[Edit 2019]:
In case you have more complex objects or circular references, use a library like marshmallow).
Here's what's usually sufficient for me:
I create a serialization mixin which I use with my models. The serialization function basically fetches whatever attributes the SQLAlchemy inspector exposes and puts it in a dict.
from sqlalchemy.inspection import inspect
class Serializer(object):
def serialize(self):
return {c: getattr(self, c) for c in inspect(self).attrs.keys()}
#staticmethod
def serialize_list(l):
return [m.serialize() for m in l]
All that's needed now is to extend the SQLAlchemy model with the Serializer mixin class.
If there are fields you do not wish to expose, or that need special formatting, simply override the serialize() function in the model subclass.
class User(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String)
password = db.Column(db.String)
# ...
def serialize(self):
d = Serializer.serialize(self)
del d['password']
return d
In your controllers, all you have to do is to call the serialize() function (or serialize_list(l) if the query results in a list) on the results:
def get_user(id):
user = User.query.get(id)
return json.dumps(user.serialize())
def get_users():
users = User.query.all()
return json.dumps(User.serialize_list(users))
I had the same need, to serialize into json. Take a look at this question. It shows how to discover columns programmatically. So, from that I created the code below. It works for me, and I'll be using it in my web app. Happy coding!
def to_json(inst, cls):
"""
Jsonify the sql alchemy query result.
"""
convert = dict()
# add your coversions for things like datetime's
# and what-not that aren't serializable.
d = dict()
for c in cls.__table__.columns:
v = getattr(inst, c.name)
if c.type in convert.keys() and v is not None:
try:
d[c.name] = convert[c.type](v)
except:
d[c.name] = "Error: Failed to covert using ", str(convert[c.type])
elif v is None:
d[c.name] = str()
else:
d[c.name] = v
return json.dumps(d)
class Person(base):
__tablename__ = 'person'
id = Column(Integer, Sequence('person_id_seq'), primary_key=True)
first_name = Column(Text)
last_name = Column(Text)
email = Column(Text)
#property
def json(self):
return to_json(self, self.__class__)
Here's my approach:
https://github.com/n0nSmoker/SQLAlchemy-serializer
pip install SQLAlchemy-serializer
You can easily add mixin to your model and then just call
.to_dict() method on its instance.
You also can write your own mixin on base of SerializerMixin.
For a flat query (no joins) you can do this
#app.route('/results/')
def results():
data = Table.query.all()
result = [d.__dict__ for d in data]
return jsonify(result=result)
and if you only want to return certain columns from the database you can do this
#app.route('/results/')
def results():
cols = ['id', 'url', 'shipping']
data = Table.query.all()
result = [{col: getattr(d, col) for col in cols} for d in data]
return jsonify(result=result)
Ok, I've been working on this for a few hours, and I've developed what I believe to be the most pythonic solution yet. The following code snippets are python3 but shouldn't be too horribly painful to backport if you need.
The first thing we're gonna do is start with a mixin that makes your db models act kinda like dicts:
from sqlalchemy.inspection import inspect
class ModelMixin:
"""Provide dict-like interface to db.Model subclasses."""
def __getitem__(self, key):
"""Expose object attributes like dict values."""
return getattr(self, key)
def keys(self):
"""Identify what db columns we have."""
return inspect(self).attrs.keys()
Now we're going to define our model, inheriting the mixin:
class MyModel(db.Model, ModelMixin):
id = db.Column(db.Integer, primary_key=True)
foo = db.Column(...)
bar = db.Column(...)
# etc ...
That's all it takes to be able to pass an instance of MyModel() to dict() and get a real live dict instance out of it, which gets us quite a long way towards making jsonify() understand it. Next, we need to extend JSONEncoder to get us the rest of the way:
from flask.json import JSONEncoder
from contextlib import suppress
class MyJSONEncoder(JSONEncoder):
def default(self, obj):
# Optional: convert datetime objects to ISO format
with suppress(AttributeError):
return obj.isoformat()
return dict(obj)
app.json_encoder = MyJSONEncoder
Bonus points: if your model contains computed fields (that is, you want your JSON output to contain fields that aren't actually stored in the database), that's easy too. Just define your computed fields as #propertys, and extend the keys() method like so:
class MyModel(db.Model, ModelMixin):
id = db.Column(db.Integer, primary_key=True)
foo = db.Column(...)
bar = db.Column(...)
#property
def computed_field(self):
return 'this value did not come from the db'
def keys(self):
return super().keys() + ['computed_field']
Now it's trivial to jsonify:
#app.route('/whatever', methods=['GET'])
def whatever():
return jsonify(dict(results=MyModel.query.all()))
If you are using flask-restful you can use marshal:
from flask.ext.restful import Resource, fields, marshal
topic_fields = {
'title': fields.String,
'content': fields.String,
'uri': fields.Url('topic'),
'creator': fields.String,
'created': fields.DateTime(dt_format='rfc822')
}
class TopicListApi(Resource):
def get(self):
return {'topics': [marshal(topic, topic_fields) for topic in DbTopic.query.all()]}
You need to explicitly list what you are returning and what type it is, which I prefer anyway for an api. Serialization is easily taken care of (no need for jsonify), dates are also not a problem. Note that the content for the uri field is automatically generated based on the topic endpoint and the id.
Here's my answer if you're using the declarative base (with help from some of the answers already posted):
# in your models definition where you define and extend declarative_base()
from sqlalchemy.ext.declarative import declarative_base
...
Base = declarative_base()
Base.query = db_session.query_property()
...
# define a new class (call "Model" or whatever) with an as_dict() method defined
class Model():
def as_dict(self):
return { c.name: getattr(self, c.name) for c in self.__table__.columns }
# and extend both the Base and Model class in your model definition, e.g.
class Rating(Base, Model):
____tablename__ = 'rating'
id = db.Column(db.Integer, primary_key=True)
fullurl = db.Column(db.String())
url = db.Column(db.String())
comments = db.Column(db.Text)
...
# then after you query and have a resultset (rs) of ratings
rs = Rating.query.all()
# you can jsonify it with
s = json.dumps([r.as_dict() for r in rs], default=alchemyencoder)
print (s)
# or if you have a single row
r = Rating.query.first()
# you can jsonify it with
s = json.dumps(r.as_dict(), default=alchemyencoder)
# you will need this alchemyencoder where your are calling json.dumps to handle datetime and decimal format
# credit to Joonas # http://codeandlife.com/2014/12/07/sqlalchemy-results-to-json-the-easy-way/
def alchemyencoder(obj):
"""JSON encoder function for SQLAlchemy special classes."""
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, decimal.Decimal):
return float(obj)
Flask-Restful 0.3.6 the Request Parsing recommend marshmallow
marshmallow is an ORM/ODM/framework-agnostic library for converting
complex datatypes, such as objects, to and from native Python
datatypes.
A simple marshmallow example is showing below.
from marshmallow import Schema, fields
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
created_at = fields.DateTime()
from marshmallow import pprint
user = User(name="Monty", email="monty#python.org")
schema = UserSchema()
result = schema.dump(user)
pprint(result)
# {"name": "Monty",
# "email": "monty#python.org",
# "created_at": "2014-08-17T14:54:16.049594+00:00"}
The core features contain
Declaring Schemas
Serializing Objects (“Dumping”)
Deserializing Objects (“Loading”)
Handling Collections of Objects
Validation
Specifying Attribute Names
Specifying Serialization/Deserialization Keys
Refactoring: Implicit Field Creation
Ordering Output
“Read-only” and “Write-only” Fields
Specify Default Serialization/Deserialization Values
Nesting Schemas
Custom Fields
Here is a way to add an as_dict() method on every class, as well as any other method you want to have on every single class.
Not sure if this is the desired way or not, but it works...
class Base(object):
def as_dict(self):
return dict((c.name,
getattr(self, c.name))
for c in self.__table__.columns)
Base = declarative_base(cls=Base)
I've been looking at this problem for the better part of a day, and here's what I've come up with (credit to https://stackoverflow.com/a/5249214/196358 for pointing me in this direction).
(Note: I'm using flask-sqlalchemy, so my model declaration format is a bit different from straight sqlalchemy).
In my models.py file:
import json
class Serializer(object):
__public__ = None
"Must be implemented by implementors"
def to_serializable_dict(self):
dict = {}
for public_key in self.__public__:
value = getattr(self, public_key)
if value:
dict[public_key] = value
return dict
class SWEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Serializer):
return obj.to_serializable_dict()
if isinstance(obj, (datetime)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def SWJsonify(*args, **kwargs):
return current_app.response_class(json.dumps(dict(*args, **kwargs), cls=SWEncoder, indent=None if request.is_xhr else 2), mimetype='application/json')
# stolen from https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py
and all my model objects look like this:
class User(db.Model, Serializer):
__public__ = ['id','username']
... field definitions ...
In my views I call SWJsonify wherever I would have called Jsonify, like so:
#app.route('/posts')
def posts():
posts = Post.query.limit(PER_PAGE).all()
return SWJsonify({'posts':posts })
Seems to work pretty well. Even on relationships. I haven't gotten far with it, so YMMV, but so far it feels pretty "right" to me.
Suggestions welcome.
I was looking for something like the rails approach used in ActiveRecord to_json and implemented something similar using this Mixin after being unsatisfied with other suggestions. It handles nested models, and including or excluding attributes of the top level or nested models.
class Serializer(object):
def serialize(self, include={}, exclude=[], only=[]):
serialized = {}
for key in inspect(self).attrs.keys():
to_be_serialized = True
value = getattr(self, key)
if key in exclude or (only and key not in only):
to_be_serialized = False
elif isinstance(value, BaseQuery):
to_be_serialized = False
if key in include:
to_be_serialized = True
nested_params = include.get(key, {})
value = [i.serialize(**nested_params) for i in value]
if to_be_serialized:
serialized[key] = value
return serialized
Then, to get the BaseQuery serializable I extended BaseQuery
class SerializableBaseQuery(BaseQuery):
def serialize(self, include={}, exclude=[], only=[]):
return [m.serialize(include, exclude, only) for m in self]
For the following models
class ContactInfo(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
full_name = db.Column(db.String())
source = db.Column(db.String())
source_id = db.Column(db.String())
email_addresses = db.relationship('EmailAddress', backref='contact_info', lazy='dynamic')
phone_numbers = db.relationship('PhoneNumber', backref='contact_info', lazy='dynamic')
class EmailAddress(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
email_address = db.Column(db.String())
type = db.Column(db.String())
contact_info_id = db.Column(db.Integer, db.ForeignKey('contact_info.id'))
class PhoneNumber(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
phone_number = db.Column(db.String())
type = db.Column(db.String())
contact_info_id = db.Column(db.Integer, db.ForeignKey('contact_info.id'))
phone_numbers = db.relationship('Invite', backref='phone_number', lazy='dynamic')
You could do something like
#app.route("/contact/search", methods=['GET'])
def contact_search():
contact_name = request.args.get("name")
matching_contacts = ContactInfo.query.filter(ContactInfo.full_name.like("%{}%".format(contact_name)))
serialized_contact_info = matching_contacts.serialize(
include={
"phone_numbers" : {
"exclude" : ["contact_info", "contact_info_id"]
},
"email_addresses" : {
"exclude" : ["contact_info", "contact_info_id"]
}
}
)
return jsonify(serialized_contact_info)
I was working with a sql query defaultdict of lists of RowProxy objects named jobDict
It took me a while to figure out what Type the objects were.
This was a really simple quick way to resolve to some clean jsonEncoding just by typecasting the row to a list and by initially defining the dict with a value of list.
jobDict = defaultdict(list)
def set_default(obj):
# trickyness needed here via import to know type
if isinstance(obj, RowProxy):
return list(obj)
raise TypeError
jsonEncoded = json.dumps(jobDict, default=set_default)
I just want to add my method to do this.
just define a custome json encoder to serilize your db models.
class ParentEncoder(json.JSONEncoder):
def default(self, obj):
# convert object to a dict
d = {}
if isinstance(obj, Parent):
return {"id": obj.id, "name": obj.name, 'children': list(obj.child)}
if isinstance(obj, Child):
return {"id": obj.id, "name": obj.name}
d.update(obj.__dict__)
return d
then in your view function
parents = Parent.query.all()
dat = json.dumps({"data": parents}, cls=ParentEncoder)
resp = Response(response=dat, status=200, mimetype="application/json")
return (resp)
it works well though the parent have relationships
It's been a lot of times and there are lots of valid answers, but the following code block seems to work:
my_object = SqlAlchemyModel()
my_serializable_obj = my_object.__dict__
del my_serializable_obj["_sa_instance_state"]
print(jsonify(my_serializable_object))
I'm aware that this is not a perfect solution, nor as elegant as the others, however for those who want o quick fix, they might try this.

SQLAlchemy Declarative: Adding a static text attribute to a column

I am using: SQLAlchemy 0.7.9 and Python 2.7.3 with Bottle 0.11.4. I am an amateur at python.
I have a class (with many columns) derived from declarative base like this:
class Base(object):
#declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key = True)
def to_dict(self):
serialized = dict((column_name, getattr(self, column_name))
for column_name in self.__table__.c.keys())
return serialized
Base = declarative_base(cls=Base)
class Case(Base):
version = Column(Integer)
title = Column(String(32))
plausible_dd = Column(Text)
frame = Column(Text)
primary_task = Column(Text)
secondary_task = Column(Text)
eval_objectives = Column(Text)
...
I am currently using this 'route' in Bottle to dump out a row/class in json like this:
#app.route('/<name>/:record')
def default(name, record, db):
myClass = getattr(sys.modules[__name__], name)
parms = db.query(myClass).filter(myClass.id == record)
result = json.dumps(([parm.to_dict() for parm in parms]))
return result
My first question is: How can I have each column have some static text that I can use as a proper name such that I can iterate over the columns and get their values AND proper names? For example:
class Case(Base):
version = Column(Integer)
version.pn = "Version Number"
My second question is: Does the following do what I am looking for? I have seen examples of this, but I don't understand the explanation.
Example from sqlalchemy.org:
id = Column("some_table_id", Integer)
My interpretation of the example:
version = Column("Version Number", Integer)
Obviously I don't want a table column to be created. I just want the column to have an "attribute" in the generic sense. Thank you in advance.
info dictionary could be used for that. In your model class define it like this:
class Case(Base):
version = Column(Integer, info={'description': 'Version Number'})
Then it can accessed as the table column property:
desc = Case.__table__.c.version.info.get('description', '<no description>')
Update
Here's one way to iterate through all the columns in the table and get their names, values and descriptions. This example uses dict comprehension, which is available since Python 2.7.
class Case(Base):
# Column definitions go here...
def as_dict(self):
return {c.name: (getattr(self, c.name), c.info.get('description'))
for c in self.__table__.c}

jsonify a SQLAlchemy result set in Flask [duplicate]

This question already has answers here:
How to serialize SqlAlchemy result to JSON?
(37 answers)
Closed 4 years ago.
I'm trying to jsonify a SQLAlchemy result set in Flask/Python.
The Flask mailing list suggested the following method http://librelist.com/browser//flask/2011/2/16/jsonify-sqlalchemy-pagination-collection-result/#04a0754b63387f87e59dda564bde426e :
return jsonify(json_list = qryresult)
However I'm getting the following error back:
TypeError: <flaskext.sqlalchemy.BaseQuery object at 0x102c2df90>
is not JSON serializable
What am I overlooking here?
I have found this question: How to serialize SqlAlchemy result to JSON? which seems very similar however I didn't know whether Flask had some magic to make it easier as the mailing list post suggested.
Edit: for clarification, this is what my model looks like
class Rating(db.Model):
__tablename__ = 'rating'
id = db.Column(db.Integer, primary_key=True)
fullurl = db.Column(db.String())
url = db.Column(db.String())
comments = db.Column(db.Text)
overall = db.Column(db.Integer)
shipping = db.Column(db.Integer)
cost = db.Column(db.Integer)
honesty = db.Column(db.Integer)
communication = db.Column(db.Integer)
name = db.Column(db.String())
ipaddr = db.Column(db.String())
date = db.Column(db.String())
def __init__(self, fullurl, url, comments, overall, shipping, cost, honesty, communication, name, ipaddr, date):
self.fullurl = fullurl
self.url = url
self.comments = comments
self.overall = overall
self.shipping = shipping
self.cost = cost
self.honesty = honesty
self.communication = communication
self.name = name
self.ipaddr = ipaddr
self.date = date
It seems that you actually haven't executed your query. Try following:
return jsonify(json_list = qryresult.all())
[Edit]: Problem with jsonify is, that usually the objects cannot be jsonified automatically. Even Python's datetime fails ;)
What I have done in the past, is adding an extra property (like serialize) to classes that need to be serialized.
def dump_datetime(value):
"""Deserialize datetime object into string form for JSON processing."""
if value is None:
return None
return [value.strftime("%Y-%m-%d"), value.strftime("%H:%M:%S")]
class Foo(db.Model):
# ... SQLAlchemy defs here..
def __init__(self, ...):
# self.foo = ...
pass
#property
def serialize(self):
"""Return object data in easily serializable format"""
return {
'id' : self.id,
'modified_at': dump_datetime(self.modified_at),
# This is an example how to deal with Many2Many relations
'many2many' : self.serialize_many2many
}
#property
def serialize_many2many(self):
"""
Return object's relations in easily serializable format.
NB! Calls many2many's serialize property.
"""
return [ item.serialize for item in self.many2many]
And now for views I can just do:
return jsonify(json_list=[i.serialize for i in qryresult.all()])
[Edit 2019]:
In case you have more complex objects or circular references, use a library like marshmallow).
Here's what's usually sufficient for me:
I create a serialization mixin which I use with my models. The serialization function basically fetches whatever attributes the SQLAlchemy inspector exposes and puts it in a dict.
from sqlalchemy.inspection import inspect
class Serializer(object):
def serialize(self):
return {c: getattr(self, c) for c in inspect(self).attrs.keys()}
#staticmethod
def serialize_list(l):
return [m.serialize() for m in l]
All that's needed now is to extend the SQLAlchemy model with the Serializer mixin class.
If there are fields you do not wish to expose, or that need special formatting, simply override the serialize() function in the model subclass.
class User(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String)
password = db.Column(db.String)
# ...
def serialize(self):
d = Serializer.serialize(self)
del d['password']
return d
In your controllers, all you have to do is to call the serialize() function (or serialize_list(l) if the query results in a list) on the results:
def get_user(id):
user = User.query.get(id)
return json.dumps(user.serialize())
def get_users():
users = User.query.all()
return json.dumps(User.serialize_list(users))
I had the same need, to serialize into json. Take a look at this question. It shows how to discover columns programmatically. So, from that I created the code below. It works for me, and I'll be using it in my web app. Happy coding!
def to_json(inst, cls):
"""
Jsonify the sql alchemy query result.
"""
convert = dict()
# add your coversions for things like datetime's
# and what-not that aren't serializable.
d = dict()
for c in cls.__table__.columns:
v = getattr(inst, c.name)
if c.type in convert.keys() and v is not None:
try:
d[c.name] = convert[c.type](v)
except:
d[c.name] = "Error: Failed to covert using ", str(convert[c.type])
elif v is None:
d[c.name] = str()
else:
d[c.name] = v
return json.dumps(d)
class Person(base):
__tablename__ = 'person'
id = Column(Integer, Sequence('person_id_seq'), primary_key=True)
first_name = Column(Text)
last_name = Column(Text)
email = Column(Text)
#property
def json(self):
return to_json(self, self.__class__)
Here's my approach:
https://github.com/n0nSmoker/SQLAlchemy-serializer
pip install SQLAlchemy-serializer
You can easily add mixin to your model and then just call
.to_dict() method on its instance.
You also can write your own mixin on base of SerializerMixin.
For a flat query (no joins) you can do this
#app.route('/results/')
def results():
data = Table.query.all()
result = [d.__dict__ for d in data]
return jsonify(result=result)
and if you only want to return certain columns from the database you can do this
#app.route('/results/')
def results():
cols = ['id', 'url', 'shipping']
data = Table.query.all()
result = [{col: getattr(d, col) for col in cols} for d in data]
return jsonify(result=result)
Ok, I've been working on this for a few hours, and I've developed what I believe to be the most pythonic solution yet. The following code snippets are python3 but shouldn't be too horribly painful to backport if you need.
The first thing we're gonna do is start with a mixin that makes your db models act kinda like dicts:
from sqlalchemy.inspection import inspect
class ModelMixin:
"""Provide dict-like interface to db.Model subclasses."""
def __getitem__(self, key):
"""Expose object attributes like dict values."""
return getattr(self, key)
def keys(self):
"""Identify what db columns we have."""
return inspect(self).attrs.keys()
Now we're going to define our model, inheriting the mixin:
class MyModel(db.Model, ModelMixin):
id = db.Column(db.Integer, primary_key=True)
foo = db.Column(...)
bar = db.Column(...)
# etc ...
That's all it takes to be able to pass an instance of MyModel() to dict() and get a real live dict instance out of it, which gets us quite a long way towards making jsonify() understand it. Next, we need to extend JSONEncoder to get us the rest of the way:
from flask.json import JSONEncoder
from contextlib import suppress
class MyJSONEncoder(JSONEncoder):
def default(self, obj):
# Optional: convert datetime objects to ISO format
with suppress(AttributeError):
return obj.isoformat()
return dict(obj)
app.json_encoder = MyJSONEncoder
Bonus points: if your model contains computed fields (that is, you want your JSON output to contain fields that aren't actually stored in the database), that's easy too. Just define your computed fields as #propertys, and extend the keys() method like so:
class MyModel(db.Model, ModelMixin):
id = db.Column(db.Integer, primary_key=True)
foo = db.Column(...)
bar = db.Column(...)
#property
def computed_field(self):
return 'this value did not come from the db'
def keys(self):
return super().keys() + ['computed_field']
Now it's trivial to jsonify:
#app.route('/whatever', methods=['GET'])
def whatever():
return jsonify(dict(results=MyModel.query.all()))
If you are using flask-restful you can use marshal:
from flask.ext.restful import Resource, fields, marshal
topic_fields = {
'title': fields.String,
'content': fields.String,
'uri': fields.Url('topic'),
'creator': fields.String,
'created': fields.DateTime(dt_format='rfc822')
}
class TopicListApi(Resource):
def get(self):
return {'topics': [marshal(topic, topic_fields) for topic in DbTopic.query.all()]}
You need to explicitly list what you are returning and what type it is, which I prefer anyway for an api. Serialization is easily taken care of (no need for jsonify), dates are also not a problem. Note that the content for the uri field is automatically generated based on the topic endpoint and the id.
Here's my answer if you're using the declarative base (with help from some of the answers already posted):
# in your models definition where you define and extend declarative_base()
from sqlalchemy.ext.declarative import declarative_base
...
Base = declarative_base()
Base.query = db_session.query_property()
...
# define a new class (call "Model" or whatever) with an as_dict() method defined
class Model():
def as_dict(self):
return { c.name: getattr(self, c.name) for c in self.__table__.columns }
# and extend both the Base and Model class in your model definition, e.g.
class Rating(Base, Model):
____tablename__ = 'rating'
id = db.Column(db.Integer, primary_key=True)
fullurl = db.Column(db.String())
url = db.Column(db.String())
comments = db.Column(db.Text)
...
# then after you query and have a resultset (rs) of ratings
rs = Rating.query.all()
# you can jsonify it with
s = json.dumps([r.as_dict() for r in rs], default=alchemyencoder)
print (s)
# or if you have a single row
r = Rating.query.first()
# you can jsonify it with
s = json.dumps(r.as_dict(), default=alchemyencoder)
# you will need this alchemyencoder where your are calling json.dumps to handle datetime and decimal format
# credit to Joonas # http://codeandlife.com/2014/12/07/sqlalchemy-results-to-json-the-easy-way/
def alchemyencoder(obj):
"""JSON encoder function for SQLAlchemy special classes."""
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, decimal.Decimal):
return float(obj)
Flask-Restful 0.3.6 the Request Parsing recommend marshmallow
marshmallow is an ORM/ODM/framework-agnostic library for converting
complex datatypes, such as objects, to and from native Python
datatypes.
A simple marshmallow example is showing below.
from marshmallow import Schema, fields
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
created_at = fields.DateTime()
from marshmallow import pprint
user = User(name="Monty", email="monty#python.org")
schema = UserSchema()
result = schema.dump(user)
pprint(result)
# {"name": "Monty",
# "email": "monty#python.org",
# "created_at": "2014-08-17T14:54:16.049594+00:00"}
The core features contain
Declaring Schemas
Serializing Objects (“Dumping”)
Deserializing Objects (“Loading”)
Handling Collections of Objects
Validation
Specifying Attribute Names
Specifying Serialization/Deserialization Keys
Refactoring: Implicit Field Creation
Ordering Output
“Read-only” and “Write-only” Fields
Specify Default Serialization/Deserialization Values
Nesting Schemas
Custom Fields
Here is a way to add an as_dict() method on every class, as well as any other method you want to have on every single class.
Not sure if this is the desired way or not, but it works...
class Base(object):
def as_dict(self):
return dict((c.name,
getattr(self, c.name))
for c in self.__table__.columns)
Base = declarative_base(cls=Base)
I've been looking at this problem for the better part of a day, and here's what I've come up with (credit to https://stackoverflow.com/a/5249214/196358 for pointing me in this direction).
(Note: I'm using flask-sqlalchemy, so my model declaration format is a bit different from straight sqlalchemy).
In my models.py file:
import json
class Serializer(object):
__public__ = None
"Must be implemented by implementors"
def to_serializable_dict(self):
dict = {}
for public_key in self.__public__:
value = getattr(self, public_key)
if value:
dict[public_key] = value
return dict
class SWEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Serializer):
return obj.to_serializable_dict()
if isinstance(obj, (datetime)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def SWJsonify(*args, **kwargs):
return current_app.response_class(json.dumps(dict(*args, **kwargs), cls=SWEncoder, indent=None if request.is_xhr else 2), mimetype='application/json')
# stolen from https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py
and all my model objects look like this:
class User(db.Model, Serializer):
__public__ = ['id','username']
... field definitions ...
In my views I call SWJsonify wherever I would have called Jsonify, like so:
#app.route('/posts')
def posts():
posts = Post.query.limit(PER_PAGE).all()
return SWJsonify({'posts':posts })
Seems to work pretty well. Even on relationships. I haven't gotten far with it, so YMMV, but so far it feels pretty "right" to me.
Suggestions welcome.
I was looking for something like the rails approach used in ActiveRecord to_json and implemented something similar using this Mixin after being unsatisfied with other suggestions. It handles nested models, and including or excluding attributes of the top level or nested models.
class Serializer(object):
def serialize(self, include={}, exclude=[], only=[]):
serialized = {}
for key in inspect(self).attrs.keys():
to_be_serialized = True
value = getattr(self, key)
if key in exclude or (only and key not in only):
to_be_serialized = False
elif isinstance(value, BaseQuery):
to_be_serialized = False
if key in include:
to_be_serialized = True
nested_params = include.get(key, {})
value = [i.serialize(**nested_params) for i in value]
if to_be_serialized:
serialized[key] = value
return serialized
Then, to get the BaseQuery serializable I extended BaseQuery
class SerializableBaseQuery(BaseQuery):
def serialize(self, include={}, exclude=[], only=[]):
return [m.serialize(include, exclude, only) for m in self]
For the following models
class ContactInfo(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
full_name = db.Column(db.String())
source = db.Column(db.String())
source_id = db.Column(db.String())
email_addresses = db.relationship('EmailAddress', backref='contact_info', lazy='dynamic')
phone_numbers = db.relationship('PhoneNumber', backref='contact_info', lazy='dynamic')
class EmailAddress(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
email_address = db.Column(db.String())
type = db.Column(db.String())
contact_info_id = db.Column(db.Integer, db.ForeignKey('contact_info.id'))
class PhoneNumber(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
phone_number = db.Column(db.String())
type = db.Column(db.String())
contact_info_id = db.Column(db.Integer, db.ForeignKey('contact_info.id'))
phone_numbers = db.relationship('Invite', backref='phone_number', lazy='dynamic')
You could do something like
#app.route("/contact/search", methods=['GET'])
def contact_search():
contact_name = request.args.get("name")
matching_contacts = ContactInfo.query.filter(ContactInfo.full_name.like("%{}%".format(contact_name)))
serialized_contact_info = matching_contacts.serialize(
include={
"phone_numbers" : {
"exclude" : ["contact_info", "contact_info_id"]
},
"email_addresses" : {
"exclude" : ["contact_info", "contact_info_id"]
}
}
)
return jsonify(serialized_contact_info)
I was working with a sql query defaultdict of lists of RowProxy objects named jobDict
It took me a while to figure out what Type the objects were.
This was a really simple quick way to resolve to some clean jsonEncoding just by typecasting the row to a list and by initially defining the dict with a value of list.
jobDict = defaultdict(list)
def set_default(obj):
# trickyness needed here via import to know type
if isinstance(obj, RowProxy):
return list(obj)
raise TypeError
jsonEncoded = json.dumps(jobDict, default=set_default)
I just want to add my method to do this.
just define a custome json encoder to serilize your db models.
class ParentEncoder(json.JSONEncoder):
def default(self, obj):
# convert object to a dict
d = {}
if isinstance(obj, Parent):
return {"id": obj.id, "name": obj.name, 'children': list(obj.child)}
if isinstance(obj, Child):
return {"id": obj.id, "name": obj.name}
d.update(obj.__dict__)
return d
then in your view function
parents = Parent.query.all()
dat = json.dumps({"data": parents}, cls=ParentEncoder)
resp = Response(response=dat, status=200, mimetype="application/json")
return (resp)
it works well though the parent have relationships
It's been a lot of times and there are lots of valid answers, but the following code block seems to work:
my_object = SqlAlchemyModel()
my_serializable_obj = my_object.__dict__
del my_serializable_obj["_sa_instance_state"]
print(jsonify(my_serializable_object))
I'm aware that this is not a perfect solution, nor as elegant as the others, however for those who want o quick fix, they might try this.

Flask-SQLalchemy update a row's information

How can I update a row's information?
For example I'd like to alter the name column of the row that has the id 5.
Retrieve an object using the tutorial shown in the Flask-SQLAlchemy documentation. Once you have the entity that you want to change, change the entity itself. Then, db.session.commit().
For example:
admin = User.query.filter_by(username='admin').first()
admin.email = 'my_new_email#example.com'
db.session.commit()
user = User.query.get(5)
user.name = 'New Name'
db.session.commit()
Flask-SQLAlchemy is based on SQLAlchemy, so be sure to check out the SQLAlchemy Docs as well.
There is a method update on BaseQuery object in SQLAlchemy, which is returned by filter_by.
num_rows_updated = User.query.filter_by(username='admin').update(dict(email='my_new_email#example.com')))
db.session.commit()
The advantage of using update over changing the entity comes when there are many objects to be updated.
If you want to give add_user permission to all the admins,
rows_changed = User.query.filter_by(role='admin').update(dict(permission='add_user'))
db.session.commit()
Notice that filter_by takes keyword arguments (use only one =) as opposed to filter which takes an expression.
This does not work if you modify a pickled attribute of the model. Pickled attributes should be replaced in order to trigger updates:
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from pprint import pprint
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqllite:////tmp/users.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
data = db.Column(db.PickleType())
def __init__(self, name, data):
self.name = name
self.data = data
def __repr__(self):
return '<User %r>' % self.username
db.create_all()
# Create a user.
bob = User('Bob', {})
db.session.add(bob)
db.session.commit()
# Retrieve the row by its name.
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {}
# Modifying data is ignored.
bob.data['foo'] = 123
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {}
# Replacing data is respected.
bob.data = {'bar': 321}
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {'bar': 321}
# Modifying data is ignored.
bob.data['moo'] = 789
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {'bar': 321}
Just assigning the value and committing them will work for all the data types but JSON and Pickled attributes. Since pickled type is explained above I'll note down a slightly different but easy way to update JSONs.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
data = db.Column(db.JSON)
def __init__(self, name, data):
self.name = name
self.data = data
Let's say the model is like above.
user = User("Jon Dove", {"country":"Sri Lanka"})
db.session.add(user)
db.session.flush()
db.session.commit()
This will add the user into the MySQL database with data {"country":"Sri Lanka"}
Modifying data will be ignored. My code that didn't work is as follows.
user = User.query().filter(User.name=='Jon Dove')
data = user.data
data["province"] = "south"
user.data = data
db.session.merge(user)
db.session.flush()
db.session.commit()
Instead of going through the painful work of copying the JSON to a new dict (not assigning it to a new variable as above), which should have worked I found a simple way to do that. There is a way to flag the system that JSONs have changed.
Following is the working code.
from sqlalchemy.orm.attributes import flag_modified
user = User.query().filter(User.name=='Jon Dove')
data = user.data
data["province"] = "south"
user.data = data
flag_modified(user, "data")
db.session.merge(user)
db.session.flush()
db.session.commit()
This worked like a charm.
There is another method proposed along with this method here
Hope I've helped some one.
Models.py define the serializers
def default(o):
if isinstance(o, (date, datetime)):
return o.isoformat()
def get_model_columns(instance,exclude=[]):
columns=instance.__table__.columns.keys()
columns=list(set(columns)-set(exclude))
return columns
class User(db.Model):
__tablename__='user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
.......
####
def serializers(self):
cols = get_model_columns(self)
dict_val = {}
for c in cols:
dict_val[c] = getattr(self, c)
return json.loads(json.dumps(dict_val,default=default))
In RestApi, We can update the record dynamically by passing the json data into update query:
class UpdateUserDetails(Resource):
#auth_token_required
def post(self):
json_data = request.get_json()
user_id = current_user.id
try:
instance = User.query.filter(User.id==user_id)
data=instance.update(dict(json_data))
db.session.commit()
updateddata=instance.first()
msg={"msg":"User details updated successfully","data":updateddata.serializers()}
code=200
except Exception as e:
print(e)
msg = {"msg": "Failed to update the userdetails! please contact your administartor."}
code=500
return msg
I was looking for something a little less intrusive then #Ramesh's answer (which was good) but still dynamic. Here is a solution attaching an update method to a db.Model object.
You pass in a dictionary and it will update only the columns that you pass in.
class SampleObject(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
name = db.Column(db.String(128), nullable=False)
notes = db.Column(db.Text, nullable=False)
def update(self, update_dictionary: dict):
for col_name in self.__table__.columns.keys():
if col_name in update_dictionary:
setattr(self, col_name, update_dictionary[col_name])
db.session.add(self)
db.session.commit()
Then in a route you can do
object = SampleObject.query.where(SampleObject.id == id).first()
object.update(update_dictionary=request.get_json())
Update the Columns in flask
admin = User.query.filter_by(username='admin').first()
admin.email = 'my_new_email#example.com'
admin.save()
To use the update method (which updates the entree outside of the session) you have to query the object in steps like this:
query = db.session.query(UserModel)
query = query.filter(UserModel.id == user_id)
query.update(user_dumped)
db.session.commit()

Reverse mapping from a table to a model in SQLAlchemy

To provide an activity log in my SQLAlchemy-based app, I have a model like this:
class ActivityLog(Base):
__tablename__ = 'activitylog'
id = Column(Integer, primary_key=True)
activity_by_id = Column(Integer, ForeignKey('users.id'), nullable=False)
activity_by = relation(User, primaryjoin=activity_by_id == User.id)
activity_at = Column(DateTime, default=datetime.utcnow, nullable=False)
activity_type = Column(SmallInteger, nullable=False)
target_table = Column(Unicode(20), nullable=False)
target_id = Column(Integer, nullable=False)
target_title = Column(Unicode(255), nullable=False)
The log contains entries for multiple tables, so I can't use ForeignKey relations. Log entries are made like this:
doc = Document(name=u'mydoc', title=u'My Test Document',
created_by=user, edited_by=user)
session.add(doc)
session.flush() # See note below
log = ActivityLog(activity_by=user, activity_type=ACTIVITY_ADD,
target_table=Document.__table__.name, target_id=doc.id,
target_title=doc.title)
session.add(log)
This leaves me with three problems:
I have to flush the session before my doc object gets an id. If I had used a ForeignKey column and a relation mapper, I could have simply called ActivityLog(target=doc) and let SQLAlchemy do the work. Is there any way to work around needing to flush by hand?
The target_table parameter is too verbose. I suppose I could solve this with a target property setter in ActivityLog that automatically retrieves the table name and id from a given instance.
Biggest of all, I'm not sure how to retrieve a model instance from the database. Given an ActivityLog instance log, calling self.session.query(log.target_table).get(log.target_id) does not work, as query() expects a model as parameter.
One workaround appears to be to use polymorphism and derive all my models from a base model which ActivityLog recognises. Something like this:
class Entity(Base):
__tablename__ = 'entities'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
edited_at = Column(DateTime, onupdate=datetime.utcnow, nullable=False)
entity_type = Column(Unicode(20), nullable=False)
__mapper_args__ = {'polymorphic_on': entity_type}
class Document(Entity):
__tablename__ = 'documents'
__mapper_args__ = {'polymorphic_identity': 'document'}
body = Column(UnicodeText, nullable=False)
class ActivityLog(Base):
__tablename__ = 'activitylog'
id = Column(Integer, primary_key=True)
...
target_id = Column(Integer, ForeignKey('entities.id'), nullable=False)
target = relation(Entity)
If I do this, ActivityLog(...).target will give me a Document instance when it refers to a Document, but I'm not sure it's worth the overhead of having two tables for everything. Should I go ahead and do it this way?
One way to solve this is polymorphic associations. It should solve all 3 of your issues and also make database foreign key constraints work. See the polymorphic association example in SQLAlchemy source. Mike Bayer has an old blogpost that discusses this in greater detail.
Definitely go through the blogpost and examples Ants linked to. I did not find the explanation confusion, but rather assuming some more experience on the topic.
Few things I can suggest are:
ForeignKeys: in general I agree they are a good thing go have, but I am not sure it is conceptually important in your case: you seem to be using this ActivityLog as an orthogonal cross-cutting concern (AOP); but version with foreign keys would effectively make your business objects aware of the ActivityLog. Another problem with having FK for audit purposes using schema setup you have is that if you allow object deletion, FK requirement will delete all the ActivityLog entries for this object.
Automatic logging: you are doing all this logging manually whenever you create/modify(/delete) the object. With SA you could implement a SessionExtension with before_commit which would do the job for you automatically.
In this way you completely can avoid writing parts like below:
log = ActivityLog(activity_by=user, activity_type=ACTIVITY_ADD,
target_table=Document.__table__.name, target_id=doc.id,
target_title=doc.title)
session.add(log)
EDIT-1: complete sample code added
The code is based on the first non-FK version from http://techspot.zzzeek.org/?p=13.
The choice not to use FK is based on the fact that for audit purposes when the
main object is deleted, it should not cascade to delete all the audit log entries.
Also this keeps auditable objects unaware of the fact they are being audited.
Implementation uses a SA one-to-many relationship. It is possible that some
objects are modified many times, which will result in many audit log entries.
By default SA will load the relationship objects when adding a new entry to the
list. Assuming that during "normal" usage we would like only to add new audit
log entry, we use lazy='noload' flag so that the relation from the main object
will never be loaded. It is loaded when navigated from the other side though,
and also can be loaded from the main object using custom query, which is shown
in the example as well using activitylog_readonly readonly property.
Code (runnable with some tests):
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, SmallInteger, String, DateTime, ForeignKey, Table, UnicodeText, Unicode, and_
from sqlalchemy.orm import relationship, dynamic_loader, scoped_session, sessionmaker, class_mapper, backref
from sqlalchemy.orm.session import Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.interfaces import SessionExtension
import logging
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger()
ACTIVITY_ADD = 1
ACTIVITY_MOD = 2
ACTIVITY_DEL = 3
class ActivityLogSessionExtension(SessionExtension):
_logger = logging.getLogger('ActivityLogSessionExtension')
def before_commit(self, session):
self._logger.debug("before_commit: %s", session)
for d in session.new:
self._logger.info("before_commit >> add: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_ADD)
for d in session.dirty:
self._logger.info("before_commit >> mod: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_MOD)
for d in session.deleted:
self._logger.info("before_commit >> del: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_DEL)
# Configure test data SA
engine = create_engine('sqlite:///:memory:', echo=False)
session = scoped_session(sessionmaker(bind=engine, autoflush=False, extension=ActivityLogSessionExtension()))
Base = declarative_base()
Base.query = session.query_property()
class _BaseMixin(object):
""" Just a helper mixin class to set properties on object creation.
Also provides a convenient default __repr__() function, but be aware that
also relationships are printed, which might result in loading relations.
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
', '.join('%s=%r' % (k, self.__dict__[k])
for k in sorted(self.__dict__) if '_sa_' != k[:4] and '_backref_' != k[:9])
)
class User(Base, _BaseMixin):
__tablename__ = u'users'
id = Column(Integer, primary_key=True)
name = Column(String)
class Document(Base, _BaseMixin):
__tablename__ = u'documents'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
body = Column(UnicodeText, nullable=False)
class Folder(Base, _BaseMixin):
__tablename__ = u'folders'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
comment = Column(UnicodeText, nullable=False)
class ActivityLog(Base, _BaseMixin):
__tablename__ = u'activitylog'
id = Column(Integer, primary_key=True)
activity_by_id = Column(Integer, ForeignKey('users.id'), nullable=False)
activity_by = relationship(User) # #note: no need to specify the primaryjoin
activity_at = Column(DateTime, default=datetime.utcnow, nullable=False)
activity_type = Column(SmallInteger, nullable=False)
target_table = Column(Unicode(20), nullable=False)
target_id = Column(Integer, nullable=False)
target_title = Column(Unicode(255), nullable=False)
# backref relation for auditable
target = property(lambda self: getattr(self, '_backref_%s' % self.target_table))
def _get_user():
""" This method returns the User object for the current user.
#todo: proper implementation required
#hack: currently returns the 'user2'
"""
return session.query(User).filter_by(name='user2').one()
# auditable support function
# based on first non-FK version from http://techspot.zzzeek.org/?p=13
def auditable(cls, name):
def create_activitylog(self, activity_type):
log = ActivityLog(activity_by=_get_user(),
activity_type=activity_type,
target_table=table.name,
target_title=self.title,
)
getattr(self, name).append(log)
return log
mapper = class_mapper(cls)
table = mapper.local_table
cls.create_activitylog = create_activitylog
def _get_activitylog(self):
return Session.object_session(self).query(ActivityLog).with_parent(self).all()
setattr(cls, '%s_readonly' %(name,), property(_get_activitylog))
# no constraints, therefore define constraints in an ad-hoc fashion.
primaryjoin = and_(
list(table.primary_key)[0] == ActivityLog.__table__.c.target_id,
ActivityLog.__table__.c.target_table == table.name
)
foreign_keys = [ActivityLog.__table__.c.target_id]
mapper.add_property(name,
# #note: because we use the relationship, by default all previous
# ActivityLog items will be loaded for an object when new one is
# added. To avoid this, use either dynamic_loader (http://www.sqlalchemy.org/docs/reference/orm/mapping.html#sqlalchemy.orm.dynamic_loader)
# or lazy='noload'. This is the trade-off decision to be made.
# Additional benefit of using lazy='noload' is that one can also
# record DEL operations in the same way as ADD, MOD
relationship(
ActivityLog,
lazy='noload', # important for relationship
primaryjoin=primaryjoin,
foreign_keys=foreign_keys,
backref=backref('_backref_%s' % table.name,
primaryjoin=list(table.primary_key)[0] == ActivityLog.__table__.c.target_id,
foreign_keys=foreign_keys)
)
)
# this will define which classes support the ActivityLog interface
auditable(Document, 'activitylogs')
auditable(Folder, 'activitylogs')
# create db schema
Base.metadata.create_all(engine)
## >>>>> TESTS >>>>>>
# create some basic data first
u1 = User(name='user1')
u2 = User(name='user2')
session.add(u1)
session.add(u2)
session.commit()
session.expunge_all()
# --check--
assert not(_get_user() is None)
##############################
## ADD
##############################
_logger.info('-' * 80)
d1 = Document(title=u'Document-1', body=u'Doc1 some body skipped the body')
# when not using SessionExtension for any reason, this can be called manually
#d1.create_activitylog(ACTIVITY_ADD)
session.add(d1)
session.commit()
f1 = Folder(title=u'Folder-1', comment=u'This folder is empty')
# when not using SessionExtension for any reason, this can be called manually
#f1.create_activitylog(ACTIVITY_ADD)
session.add(f1)
session.commit()
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
_logger.debug(logs)
assert len(logs) == 2
assert logs[0].activity_type == ACTIVITY_ADD
assert logs[0].target.title == u'Document-1'
assert logs[0].target.title == logs[0].target_title
assert logs[1].activity_type == ACTIVITY_ADD
assert logs[1].target.title == u'Folder-1'
assert logs[1].target.title == logs[1].target_title
##############################
## MOD(ify)
##############################
_logger.info('-' * 80)
session.expunge_all()
d1 = session.query(Document).filter_by(id=1).one()
assert d1.title == u'Document-1'
assert d1.body == u'Doc1 some body skipped the body'
assert d1.activitylogs == []
d1.title = u'Modified: Document-1'
d1.body = u'Modified: body'
# when not using SessionExtension (or it does not work, this can be called manually)
#d1.create_activitylog(ACTIVITY_MOD)
session.commit()
_logger.debug(d1.activitylogs_readonly)
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
assert len(logs)==3
assert logs[2].activity_type == ACTIVITY_MOD
assert logs[2].target.title == u'Modified: Document-1'
assert logs[2].target.title == logs[2].target_title
##############################
## DEL(ete)
##############################
_logger.info('-' * 80)
session.expunge_all()
d1 = session.query(Document).filter_by(id=1).one()
# when not using SessionExtension for any reason, this can be called manually,
#d1.create_activitylog(ACTIVITY_DEL)
session.delete(d1)
session.commit()
session.expunge_all()
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
assert len(logs)==4
assert logs[0].target is None
assert logs[2].target is None
assert logs[3].activity_type == ACTIVITY_DEL
assert logs[3].target is None
##############################
## print all activity logs
##############################
_logger.info('=' * 80)
logs = session.query(ActivityLog).all()
for log in logs:
_ = log.target
_logger.info("%s -> %s", log, log.target)
##############################
## navigate from main object
##############################
_logger.info('=' * 80)
session.expunge_all()
f1 = session.query(Folder).filter_by(id=1).one()
_logger.info(f1.activitylogs_readonly)

Categories

Resources