SQLAlchemy: NoForeignKeysError - python

I'm creating two tables and linking them using SQLAlchemy relationships (using SQLite as my DB)
class Album(Base):
__tablename__ = 'albums'
id = Column(INTEGER, primary_key=True, unique=True, autoincrement=True, nullable=False)
name = Column(TEXT)
tracks = relationship('Track', back_populates='albums')
class Track(Base):
__tablename__ = 'tracks'
id = Column(INTEGER, primary_key=True, unique=True, autoincrement=True, nullable=False)
name = Column(TEXT)
albums = relationship('Album', back_populates='tracks')
def insert_data(metadata, path, ext):
session = get_session() # returns SQLAlchemy session
tracks = get_or_create(session,
Track,
name=metadata['title']
)
_ = get_or_create(session,
Album,
name=metadata['album'],
tracks=tracks
)
def get_instance(session, model, permutation):
try:
return session.query(model).filter_by(**permutation).first()
except NoResultFound:
return None
def create_instance(session, model, permutation):
try:
instance = model(**permutation)
session.add(instance)
session.flush()
except Exception as msg:
log.error(f'model:{model}, args:{permutation} -> msg:{msg}')
session.rollback()
raise msg
return instance
def get_or_create(session, model, **metadata):
data_permutations = [dict(zip(metadata, value)) for value in product(*metadata.values())]
ret = []
for permutation in data_permutations:
instance = get_instance(session, model, permutation)
if instance is None:
instance = create_instance(session, model, permutation)
ret.append(instance)
session.commit()
return ret
insert_metadata(metadata, path, ext)
metadata looks like this:
{
'name': ['foo'],
'data': ['bar', 'baz'],
...
}
It can have an unlimited amount of keys, that can have lists of any length as values. Therefore I create all possible outcomes (permutations) of this data and save it as a list of unique dicts, like this:
[{'name': 'foo', 'data': 'bar'}, {'name': 'foo', 'data': 'baz'}]
Now, when I call insert_data, I'm getting the following message:
sqlalchemy.exc.NoForeignKeysError: Can't find any foreign key relationships between 'albums' and 'tracks'.
Traceback shows that the function that's throwing the exception is get_instance. I'm suspecting it has something to do with my query, since I double-checked table creation against both the documentation and other Stack Overflow questions, and the syntax seems to be correct.
How do I need to alter my query (see the try block in get_instance) so the program doesn't crash? Or is the error elsewhere?

The problem was that the table 'tracks' was missing the column album_id:
album_id = Column(INTEGER, ForeignKey('albums.id'))

Related

Editing a SQLAlchemy entry & committing the update changes the output of Model method

In a Flask application that uses SQLAlchemy, I have declared a Deliverymen model with some methods, properties and class methods. One of them, get_own_data(self) builds and returns a dictionary with only the relevant information that the end user needs (the full Model declaration is down below):
def get_own_data(self):
own_data = {
key: value
for (key, value) in vars(self).items()
# Exclude the following keys
if key != "_sa_instance_state" and key != "moto_placa"
}
# Add this key, which isn't found in 'vars(self)' as it is a class property
own_data["moto_placa_upper"] = self.moto_placa
return own_data
The key-pair defined at has to be defined like that due to form handling & a property setter that takes user input and turns the placa (or license plate) into upper case. That property and its setter are also available below.
When I try to edit an existing entry and then I commit the changes, the get_own_data method only returns the last key-pair defined, as below:
Python 3.10.4 (main, Mar 23 2022, 23:05:40) [GCC 11.2.0] on linux
App: app [development]
Instance: /home/daniel/Documents/work/breakfast4you/reportes/backend/instance
>>> d = Deliveryman.query.first()
>>> d.get_own_data()
{'user_id': 9, 'nombre': 'James Bond', 'telefono': '1234567890', 'status': 1, 'arp': 'Protección', 'tipo_cuenta': 'Ahorros', 'direccion': 'Dirección de prueba', 'id': 1, 'gov_id': '1234789845', 'eps': 'Sanitas', 'moto_marca': 'AKT', 'nombre_banco': 'Davivienda', 'num_cuenta': '1234567890', 'moto_placa_upper': 'BND 007'}
>>> d.nombre = "Carlos"
>>> db.session.commit()
>>> d.get_own_data()
{'moto_placa_upper': 'BND 007'}
Confusingly, if after committing the changes I access any of the object properties and then run d.get_own_data()... it works again!
>>> d.nombre
'Carlos'
>>> d.get_own_data()
{'nombre': 'Carlos', 'tipo_cuenta': 'Ahorros', 'status': 1, 'user_id': 9, 'arp': 'Protección', 'telefono': '1234567890', 'direccion': 'Dirección de prueba', 'num_cuenta': '1234567890', 'eps': 'Sanitas', 'nombre_banco': 'Davivienda', 'id': 1, 'gov_id': '1234789845', 'moto_marca': 'AKT', 'moto_placa_upper': 'BND 007'}
This is the model declaration:
class Deliveryman(db.Model):
__tablename__ = "delivery_men"
id = db.Column(db.Integer, primary_key=True, unique=True)
user_id = db.Column(db.ForeignKey("users.id"), index=True, unique=True)
nombre = db.Column(db.String(40), nullable=False, index=True)
direccion = db.Column(db.String(120), nullable=False)
telefono = db.Column(db.String(15), nullable=False)
gov_id = db.Column(db.String(20), nullable=False, unique=True)
status = db.Column(db.Integer)
eps = db.Column(db.String(150))
arp = db.Column(db.String(150))
moto_marca = db.Column(db.String(20))
moto_placa = db.Column(db.String(7))
nombre_banco = db.Column(db.String(20))
tipo_cuenta = db.Column(db.String(20))
num_cuenta = db.Column(db.String(30))
#property
def moto_placa_upper(self):
return self.moto_placa
#moto_placa_upper.setter
def moto_placa_upper(self, placa):
if placa is None:
pass
else:
self.moto_placa = placa.upper()
# Another method that is irrelevant to this question
def get_own_data(self):
own_data = {
key: value
for (key, value) in vars(self).items()
if key != "_sa_instance_state" and key != "moto_placa"
}
own_data["moto_placa_upper"] = self.moto_placa
return own_data
# Class methods irrelevant to this question
What could cause this issue? I'm not an expert on OOP and I have the feeling that there's a bug in my method and property declarations, but I cannot point it out.
The problem here is that SQLAlchemy expires objects in the session on commit. Expiry removes all the persisted attributes from a persisted object's __dict__, so there is nothing to iterate over when get_own_data is called.
There are a few ways around this:
Refresh the object by calling db.session.refresh(obj) before calling get_own_data; this will trigger a SELECT to fetch the data.
Disable expiry on commit by passing session_options={'expire_on_commit': False} when creating the SQLAlchemy (db) object. This is a global setting, and you risk your instances containing stale data, as in this case a SELECT is not emitted.
Rather than iterating over vars, iterate over the column names and get the values using getattr; as with refreshing, this will trigger a SELECT.
from sqlalchemy import inspect
...
def get_own_data(self):
insp = inspect(self)
own_data = {
key: getattr(self, key)
for key in insp.mapper.columns.keys()
# Exclude the following keys
if key != "moto_placa"
}
# Add this key, which isn't found in 'vars(self)' as it is a class property
own_data["moto_placa_upper"] = self.moto_placa
return own_data

What's the best practice to enforce a foreign key constraint through a REST API?

I am making a to-do list webapp as a hobby project using flask for the backend and a PostgreSQL database to store the data. The database model is a as follows:
models.py
class Group(db.Model):
__tablename__ = "groups"
group_id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
name = db.Column(db.String(20), unique=True)
class Collection(db.Model):
__tablename__ = "collections"
collection_id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
name = db.Column(db.String(20), unique=True)
group_name = db.Column(db.String(20), db.ForeignKey("groups.name"), nullable=True)
def to_dict(self):
return {
"collection_id": self.collection_id,
"name": self.name,
"group_name": self.group_name,
}
class Task(db.Model):
__tablename__ = "tasks"
task_id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
text = db.Column(db.String(200))
completed = db.Column(db.Boolean, default=False)
collection_name = db.Column(
db.String(20), db.ForeignKey("collections.name"), nullable=False
)
def to_dict(self):
return {
"task_id": self.task_id,
"text": self.text,
"completed": self.completed,
"collection_name": self.collection_name,
}
While making the REST api logic for adding tasks to the database, I was unsure if:
I should check if the collection_name column already exists in the collections table before trying to insert the data in the database.
I should try inserting the row anyway and catch the sqlalchemy.exc.IntegrityError exception if it happens.
The problem I see with the first solution, is I need to query the collections tables for the list of valid collection_name each time I want to add a task, which I am not sure if it's a good practice performance wise.
While the problem I see with the second solution is that the sqlalchemy.exc.IntegrityError exception is pretty vague and in a more sophisticated table with several foreign keys, I would need to parse the exception's message to know which foreign key was violated.
For now, I implemented the second solution because I have a very simple table with only one foreign key constraint.
In the following you can see the code for the controller.py that handles the API call and service.py that talks with the database.
controller.py
#tasks_api.route(COMMON_API_ENDPOINT + "/tasks", methods=["POST"])
def add_task():
request_body = request.get_json()
# Check for missing fields in the call
mandatory_fields = set(["text", "collection_name"])
try:
missing_fields = mandatory_fields - set(request_body.keys())
assert len(missing_fields) == 0
except AssertionError:
return (
jsonify(
{
"error": "The following mandatory fields are missing: "
+ str(missing_fields)
}
),
400,
)
# Try to call the add task service function
try:
task = TaskService.add_task(
text=request_body["text"], collection_name=request_body["collection_name"]
)
except CollectionNotFoundError as e:
return jsonify({"error_message": str(e)}), 400
else:
return (
jsonify(
{
"result": "A new task was created successfully.",
"description": task.to_dict(),
}
),
201,
)
service.py
def add_task(text: str, collection_name: str) -> Task:
try:
with get_session() as session:
task = Task(text=text, collection_name=collection_name)
session.add(task)
return task
except sqlalchemy.exc.IntegrityError:
raise CollectionNotFoundError(
"Foreign key violation: There is no collection with the name "
+ collection_name
)
While writing this post, I wondered if this is an XY problem where both solutions are not the best. I am open to other suggestions too.
Thanks !

Flask-Restless dump_to of primary key field

I am running into an issue that may be bug, but want to verify it with the community. I am basically trying to conform to camelcase for transporting data, then underscore for the database.
However, on the person_serializer, flask-restless will not allow an outbound "idPerson" as a result of the dump_to="idPerson". For some reason, it checks that the primary key exists and gets a keyError since the actual key is "id_person", not "idPerson".
Any help would be appreciated.
class Person(Base):
__tablename__ = "person"
id_person = Column(Integer, primary_key=True)
first_name = Column(String(50))
last_name = Column(String(50))
class PersonSchema(Schema):
id_person = fields.Integer(load_from="idPerson",dump_to="idPerson")
first_name = fields.String(load_from="firstName", dump_to="firstName")
last_name = fields.String(load_from="lastName", dump_to="lastName")
#post_load
def make_user(self, data):
return Person(**data)
person_schema = PersonSchema()
def person_serializer(instance):
return person_schema.dump(instance).data
def person_deserializer(data):
return person_schema.load(data).data
KEY ERROR IS BELOW
try:
# Convert the dictionary representation into an instance of the
# model.
instance = self.deserialize(data)
# Add the created model to the session.
self.session.add(instance)
self.session.commit()
# Get the dictionary representation of the new instance as it
# appears in the database.
result = self.serialize(instance)
except self.validation_exceptions as exception:
return self._handle_validation_exception(exception)
# Determine the value of the primary key for this instance and
# encode URL-encode it (in case it is a Unicode string).
pk_name = self.primary_key or primary_key_name(instance)
> primary_key = result[pk_name]
E KeyError: 'idPerson'

Flask-SQLalchemy update a row's information

How can I update a row's information?
For example I'd like to alter the name column of the row that has the id 5.
Retrieve an object using the tutorial shown in the Flask-SQLAlchemy documentation. Once you have the entity that you want to change, change the entity itself. Then, db.session.commit().
For example:
admin = User.query.filter_by(username='admin').first()
admin.email = 'my_new_email#example.com'
db.session.commit()
user = User.query.get(5)
user.name = 'New Name'
db.session.commit()
Flask-SQLAlchemy is based on SQLAlchemy, so be sure to check out the SQLAlchemy Docs as well.
There is a method update on BaseQuery object in SQLAlchemy, which is returned by filter_by.
num_rows_updated = User.query.filter_by(username='admin').update(dict(email='my_new_email#example.com')))
db.session.commit()
The advantage of using update over changing the entity comes when there are many objects to be updated.
If you want to give add_user permission to all the admins,
rows_changed = User.query.filter_by(role='admin').update(dict(permission='add_user'))
db.session.commit()
Notice that filter_by takes keyword arguments (use only one =) as opposed to filter which takes an expression.
This does not work if you modify a pickled attribute of the model. Pickled attributes should be replaced in order to trigger updates:
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from pprint import pprint
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqllite:////tmp/users.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
data = db.Column(db.PickleType())
def __init__(self, name, data):
self.name = name
self.data = data
def __repr__(self):
return '<User %r>' % self.username
db.create_all()
# Create a user.
bob = User('Bob', {})
db.session.add(bob)
db.session.commit()
# Retrieve the row by its name.
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {}
# Modifying data is ignored.
bob.data['foo'] = 123
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {}
# Replacing data is respected.
bob.data = {'bar': 321}
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {'bar': 321}
# Modifying data is ignored.
bob.data['moo'] = 789
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {'bar': 321}
Just assigning the value and committing them will work for all the data types but JSON and Pickled attributes. Since pickled type is explained above I'll note down a slightly different but easy way to update JSONs.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
data = db.Column(db.JSON)
def __init__(self, name, data):
self.name = name
self.data = data
Let's say the model is like above.
user = User("Jon Dove", {"country":"Sri Lanka"})
db.session.add(user)
db.session.flush()
db.session.commit()
This will add the user into the MySQL database with data {"country":"Sri Lanka"}
Modifying data will be ignored. My code that didn't work is as follows.
user = User.query().filter(User.name=='Jon Dove')
data = user.data
data["province"] = "south"
user.data = data
db.session.merge(user)
db.session.flush()
db.session.commit()
Instead of going through the painful work of copying the JSON to a new dict (not assigning it to a new variable as above), which should have worked I found a simple way to do that. There is a way to flag the system that JSONs have changed.
Following is the working code.
from sqlalchemy.orm.attributes import flag_modified
user = User.query().filter(User.name=='Jon Dove')
data = user.data
data["province"] = "south"
user.data = data
flag_modified(user, "data")
db.session.merge(user)
db.session.flush()
db.session.commit()
This worked like a charm.
There is another method proposed along with this method here
Hope I've helped some one.
Models.py define the serializers
def default(o):
if isinstance(o, (date, datetime)):
return o.isoformat()
def get_model_columns(instance,exclude=[]):
columns=instance.__table__.columns.keys()
columns=list(set(columns)-set(exclude))
return columns
class User(db.Model):
__tablename__='user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
.......
####
def serializers(self):
cols = get_model_columns(self)
dict_val = {}
for c in cols:
dict_val[c] = getattr(self, c)
return json.loads(json.dumps(dict_val,default=default))
In RestApi, We can update the record dynamically by passing the json data into update query:
class UpdateUserDetails(Resource):
#auth_token_required
def post(self):
json_data = request.get_json()
user_id = current_user.id
try:
instance = User.query.filter(User.id==user_id)
data=instance.update(dict(json_data))
db.session.commit()
updateddata=instance.first()
msg={"msg":"User details updated successfully","data":updateddata.serializers()}
code=200
except Exception as e:
print(e)
msg = {"msg": "Failed to update the userdetails! please contact your administartor."}
code=500
return msg
I was looking for something a little less intrusive then #Ramesh's answer (which was good) but still dynamic. Here is a solution attaching an update method to a db.Model object.
You pass in a dictionary and it will update only the columns that you pass in.
class SampleObject(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
name = db.Column(db.String(128), nullable=False)
notes = db.Column(db.Text, nullable=False)
def update(self, update_dictionary: dict):
for col_name in self.__table__.columns.keys():
if col_name in update_dictionary:
setattr(self, col_name, update_dictionary[col_name])
db.session.add(self)
db.session.commit()
Then in a route you can do
object = SampleObject.query.where(SampleObject.id == id).first()
object.update(update_dictionary=request.get_json())
Update the Columns in flask
admin = User.query.filter_by(username='admin').first()
admin.email = 'my_new_email#example.com'
admin.save()
To use the update method (which updates the entree outside of the session) you have to query the object in steps like this:
query = db.session.query(UserModel)
query = query.filter(UserModel.id == user_id)
query.update(user_dumped)
db.session.commit()

Reverse mapping from a table to a model in SQLAlchemy

To provide an activity log in my SQLAlchemy-based app, I have a model like this:
class ActivityLog(Base):
__tablename__ = 'activitylog'
id = Column(Integer, primary_key=True)
activity_by_id = Column(Integer, ForeignKey('users.id'), nullable=False)
activity_by = relation(User, primaryjoin=activity_by_id == User.id)
activity_at = Column(DateTime, default=datetime.utcnow, nullable=False)
activity_type = Column(SmallInteger, nullable=False)
target_table = Column(Unicode(20), nullable=False)
target_id = Column(Integer, nullable=False)
target_title = Column(Unicode(255), nullable=False)
The log contains entries for multiple tables, so I can't use ForeignKey relations. Log entries are made like this:
doc = Document(name=u'mydoc', title=u'My Test Document',
created_by=user, edited_by=user)
session.add(doc)
session.flush() # See note below
log = ActivityLog(activity_by=user, activity_type=ACTIVITY_ADD,
target_table=Document.__table__.name, target_id=doc.id,
target_title=doc.title)
session.add(log)
This leaves me with three problems:
I have to flush the session before my doc object gets an id. If I had used a ForeignKey column and a relation mapper, I could have simply called ActivityLog(target=doc) and let SQLAlchemy do the work. Is there any way to work around needing to flush by hand?
The target_table parameter is too verbose. I suppose I could solve this with a target property setter in ActivityLog that automatically retrieves the table name and id from a given instance.
Biggest of all, I'm not sure how to retrieve a model instance from the database. Given an ActivityLog instance log, calling self.session.query(log.target_table).get(log.target_id) does not work, as query() expects a model as parameter.
One workaround appears to be to use polymorphism and derive all my models from a base model which ActivityLog recognises. Something like this:
class Entity(Base):
__tablename__ = 'entities'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
edited_at = Column(DateTime, onupdate=datetime.utcnow, nullable=False)
entity_type = Column(Unicode(20), nullable=False)
__mapper_args__ = {'polymorphic_on': entity_type}
class Document(Entity):
__tablename__ = 'documents'
__mapper_args__ = {'polymorphic_identity': 'document'}
body = Column(UnicodeText, nullable=False)
class ActivityLog(Base):
__tablename__ = 'activitylog'
id = Column(Integer, primary_key=True)
...
target_id = Column(Integer, ForeignKey('entities.id'), nullable=False)
target = relation(Entity)
If I do this, ActivityLog(...).target will give me a Document instance when it refers to a Document, but I'm not sure it's worth the overhead of having two tables for everything. Should I go ahead and do it this way?
One way to solve this is polymorphic associations. It should solve all 3 of your issues and also make database foreign key constraints work. See the polymorphic association example in SQLAlchemy source. Mike Bayer has an old blogpost that discusses this in greater detail.
Definitely go through the blogpost and examples Ants linked to. I did not find the explanation confusion, but rather assuming some more experience on the topic.
Few things I can suggest are:
ForeignKeys: in general I agree they are a good thing go have, but I am not sure it is conceptually important in your case: you seem to be using this ActivityLog as an orthogonal cross-cutting concern (AOP); but version with foreign keys would effectively make your business objects aware of the ActivityLog. Another problem with having FK for audit purposes using schema setup you have is that if you allow object deletion, FK requirement will delete all the ActivityLog entries for this object.
Automatic logging: you are doing all this logging manually whenever you create/modify(/delete) the object. With SA you could implement a SessionExtension with before_commit which would do the job for you automatically.
In this way you completely can avoid writing parts like below:
log = ActivityLog(activity_by=user, activity_type=ACTIVITY_ADD,
target_table=Document.__table__.name, target_id=doc.id,
target_title=doc.title)
session.add(log)
EDIT-1: complete sample code added
The code is based on the first non-FK version from http://techspot.zzzeek.org/?p=13.
The choice not to use FK is based on the fact that for audit purposes when the
main object is deleted, it should not cascade to delete all the audit log entries.
Also this keeps auditable objects unaware of the fact they are being audited.
Implementation uses a SA one-to-many relationship. It is possible that some
objects are modified many times, which will result in many audit log entries.
By default SA will load the relationship objects when adding a new entry to the
list. Assuming that during "normal" usage we would like only to add new audit
log entry, we use lazy='noload' flag so that the relation from the main object
will never be loaded. It is loaded when navigated from the other side though,
and also can be loaded from the main object using custom query, which is shown
in the example as well using activitylog_readonly readonly property.
Code (runnable with some tests):
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, SmallInteger, String, DateTime, ForeignKey, Table, UnicodeText, Unicode, and_
from sqlalchemy.orm import relationship, dynamic_loader, scoped_session, sessionmaker, class_mapper, backref
from sqlalchemy.orm.session import Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.interfaces import SessionExtension
import logging
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger()
ACTIVITY_ADD = 1
ACTIVITY_MOD = 2
ACTIVITY_DEL = 3
class ActivityLogSessionExtension(SessionExtension):
_logger = logging.getLogger('ActivityLogSessionExtension')
def before_commit(self, session):
self._logger.debug("before_commit: %s", session)
for d in session.new:
self._logger.info("before_commit >> add: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_ADD)
for d in session.dirty:
self._logger.info("before_commit >> mod: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_MOD)
for d in session.deleted:
self._logger.info("before_commit >> del: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_DEL)
# Configure test data SA
engine = create_engine('sqlite:///:memory:', echo=False)
session = scoped_session(sessionmaker(bind=engine, autoflush=False, extension=ActivityLogSessionExtension()))
Base = declarative_base()
Base.query = session.query_property()
class _BaseMixin(object):
""" Just a helper mixin class to set properties on object creation.
Also provides a convenient default __repr__() function, but be aware that
also relationships are printed, which might result in loading relations.
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
', '.join('%s=%r' % (k, self.__dict__[k])
for k in sorted(self.__dict__) if '_sa_' != k[:4] and '_backref_' != k[:9])
)
class User(Base, _BaseMixin):
__tablename__ = u'users'
id = Column(Integer, primary_key=True)
name = Column(String)
class Document(Base, _BaseMixin):
__tablename__ = u'documents'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
body = Column(UnicodeText, nullable=False)
class Folder(Base, _BaseMixin):
__tablename__ = u'folders'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
comment = Column(UnicodeText, nullable=False)
class ActivityLog(Base, _BaseMixin):
__tablename__ = u'activitylog'
id = Column(Integer, primary_key=True)
activity_by_id = Column(Integer, ForeignKey('users.id'), nullable=False)
activity_by = relationship(User) # #note: no need to specify the primaryjoin
activity_at = Column(DateTime, default=datetime.utcnow, nullable=False)
activity_type = Column(SmallInteger, nullable=False)
target_table = Column(Unicode(20), nullable=False)
target_id = Column(Integer, nullable=False)
target_title = Column(Unicode(255), nullable=False)
# backref relation for auditable
target = property(lambda self: getattr(self, '_backref_%s' % self.target_table))
def _get_user():
""" This method returns the User object for the current user.
#todo: proper implementation required
#hack: currently returns the 'user2'
"""
return session.query(User).filter_by(name='user2').one()
# auditable support function
# based on first non-FK version from http://techspot.zzzeek.org/?p=13
def auditable(cls, name):
def create_activitylog(self, activity_type):
log = ActivityLog(activity_by=_get_user(),
activity_type=activity_type,
target_table=table.name,
target_title=self.title,
)
getattr(self, name).append(log)
return log
mapper = class_mapper(cls)
table = mapper.local_table
cls.create_activitylog = create_activitylog
def _get_activitylog(self):
return Session.object_session(self).query(ActivityLog).with_parent(self).all()
setattr(cls, '%s_readonly' %(name,), property(_get_activitylog))
# no constraints, therefore define constraints in an ad-hoc fashion.
primaryjoin = and_(
list(table.primary_key)[0] == ActivityLog.__table__.c.target_id,
ActivityLog.__table__.c.target_table == table.name
)
foreign_keys = [ActivityLog.__table__.c.target_id]
mapper.add_property(name,
# #note: because we use the relationship, by default all previous
# ActivityLog items will be loaded for an object when new one is
# added. To avoid this, use either dynamic_loader (http://www.sqlalchemy.org/docs/reference/orm/mapping.html#sqlalchemy.orm.dynamic_loader)
# or lazy='noload'. This is the trade-off decision to be made.
# Additional benefit of using lazy='noload' is that one can also
# record DEL operations in the same way as ADD, MOD
relationship(
ActivityLog,
lazy='noload', # important for relationship
primaryjoin=primaryjoin,
foreign_keys=foreign_keys,
backref=backref('_backref_%s' % table.name,
primaryjoin=list(table.primary_key)[0] == ActivityLog.__table__.c.target_id,
foreign_keys=foreign_keys)
)
)
# this will define which classes support the ActivityLog interface
auditable(Document, 'activitylogs')
auditable(Folder, 'activitylogs')
# create db schema
Base.metadata.create_all(engine)
## >>>>> TESTS >>>>>>
# create some basic data first
u1 = User(name='user1')
u2 = User(name='user2')
session.add(u1)
session.add(u2)
session.commit()
session.expunge_all()
# --check--
assert not(_get_user() is None)
##############################
## ADD
##############################
_logger.info('-' * 80)
d1 = Document(title=u'Document-1', body=u'Doc1 some body skipped the body')
# when not using SessionExtension for any reason, this can be called manually
#d1.create_activitylog(ACTIVITY_ADD)
session.add(d1)
session.commit()
f1 = Folder(title=u'Folder-1', comment=u'This folder is empty')
# when not using SessionExtension for any reason, this can be called manually
#f1.create_activitylog(ACTIVITY_ADD)
session.add(f1)
session.commit()
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
_logger.debug(logs)
assert len(logs) == 2
assert logs[0].activity_type == ACTIVITY_ADD
assert logs[0].target.title == u'Document-1'
assert logs[0].target.title == logs[0].target_title
assert logs[1].activity_type == ACTIVITY_ADD
assert logs[1].target.title == u'Folder-1'
assert logs[1].target.title == logs[1].target_title
##############################
## MOD(ify)
##############################
_logger.info('-' * 80)
session.expunge_all()
d1 = session.query(Document).filter_by(id=1).one()
assert d1.title == u'Document-1'
assert d1.body == u'Doc1 some body skipped the body'
assert d1.activitylogs == []
d1.title = u'Modified: Document-1'
d1.body = u'Modified: body'
# when not using SessionExtension (or it does not work, this can be called manually)
#d1.create_activitylog(ACTIVITY_MOD)
session.commit()
_logger.debug(d1.activitylogs_readonly)
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
assert len(logs)==3
assert logs[2].activity_type == ACTIVITY_MOD
assert logs[2].target.title == u'Modified: Document-1'
assert logs[2].target.title == logs[2].target_title
##############################
## DEL(ete)
##############################
_logger.info('-' * 80)
session.expunge_all()
d1 = session.query(Document).filter_by(id=1).one()
# when not using SessionExtension for any reason, this can be called manually,
#d1.create_activitylog(ACTIVITY_DEL)
session.delete(d1)
session.commit()
session.expunge_all()
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
assert len(logs)==4
assert logs[0].target is None
assert logs[2].target is None
assert logs[3].activity_type == ACTIVITY_DEL
assert logs[3].target is None
##############################
## print all activity logs
##############################
_logger.info('=' * 80)
logs = session.query(ActivityLog).all()
for log in logs:
_ = log.target
_logger.info("%s -> %s", log, log.target)
##############################
## navigate from main object
##############################
_logger.info('=' * 80)
session.expunge_all()
f1 = session.query(Folder).filter_by(id=1).one()
_logger.info(f1.activitylogs_readonly)

Categories

Resources