Get the relationship metadata from sqlalchemy - python

I have a very simple User class definition:
class User(Base):
implements(interfaces.IUser)
__tablename__ = 'users'
#Fields description
id = Column(Integer, primary_key=True)
client_id = Column(Integer, ForeignKey('w2_client.id'))
client = relationship("Client", backref=backref('users', order_by=id))
I want to generate automatically a GUI to edit the object User (and other type of class). So I need to get all the meta data of the table, for example, I can do:
for c in User.__table__.columns:
print c.name, c.type, c.nullable, c.primary_key, c.foreign_keys
But I can not get any information about the relationship "client", the c.foreign_keys just shows me the table related to the foreign_keys but not the attribute "client" I've defined.
Please let me know if my question is not clear

It's true that is not readily available. I had to come up with my own function after some reverse-engineering.
Here is the metadata that I use. I little different than what you are are looking for, but perhaps you can use it.
# structure returned by get_metadata function.
MetaDataTuple = collections.namedtuple("MetaDataTuple",
"coltype, colname, default, m2m, nullable, uselist, collection")
def get_metadata_iterator(class_):
for prop in class_mapper(class_).iterate_properties:
name = prop.key
if name.startswith("_") or name == "id" or name.endswith("_id"):
continue
md = _get_column_metadata(prop)
if md is None:
continue
yield md
def get_column_metadata(class_, colname):
prop = class_mapper(class_).get_property(colname)
md = _get_column_metadata(prop)
if md is None:
raise ValueError("Not a column name: %r." % (colname,))
return md
def _get_column_metadata(prop):
name = prop.key
m2m = False
default = None
nullable = None
uselist = False
collection = None
proptype = type(prop)
if proptype is ColumnProperty:
coltype = type(prop.columns[0].type).__name__
try:
default = prop.columns[0].default
except AttributeError:
default = None
else:
if default is not None:
default = default.arg(None)
nullable = prop.columns[0].nullable
elif proptype is RelationshipProperty:
coltype = RelationshipProperty.__name__
m2m = prop.secondary is not None
nullable = prop.local_side[0].nullable
uselist = prop.uselist
if prop.collection_class is not None:
collection = type(prop.collection_class()).__name__
else:
collection = "list"
else:
return None
return MetaDataTuple(coltype, str(name), default, m2m, nullable, uselist, collection)
def get_metadata(class_):
"""Returns a list of MetaDataTuple structures.
"""
return list(get_metadata_iterator(class_))
def get_metadata_map(class_):
rv = {}
for metadata in get_metadata_iterator(class_):
rv[metadata.colname] = metadata
return rv
But it doesn't have the primary key. I use a separate function for that.
mapper = class_mapper(ORMClass)
pkname = str(mapper.primary_key[0].name)
Perhaps I should put the primary key name in the metadata.

Related

sqlalchemy: how to return list of objects joining 3 tables?

#hybrid_method
# #paginate
def investors(self, **kwargs):
"""All investors for a given Custodian"""
ind_inv_type_id = InvestorType.where(description="Individual").first().id
inv_query = Investor.with_joined(InvestorAddress, InvestmentAddress, CustodianAddress) \
.filter_by(custodians_id=self.id) \
.with_joined(Investment) \
.filter_by(investor_types_id=ind_inv_type_id)
investors = Investor.where(None, False, inv_query, **kwargs)
temp_inv_query = Investor.with_joined(CustodianInvestor, Custodian)\
.filter_by(Custodian.id==self.id)
temp_investors = Investor.where(None, False, temp_inv_query, **kwargs)
return list(set(investors + temp_investors))
# end def investors
# #auth.access_controlled
class InvestorAddress(db.Model, EntityAddressMixin):
# Metadata
__tablename__ = 'investor_addresses'
# Database Columns
investors_id = db.Column(db.ForeignKey("investors.investors_id"),
nullable=False)
investor = db.relationship("Investor", foreign_keys=[investors_id],
backref=db.backref("InvestorAddress"))
# end class InvestorAddress
class InvestmentAddress(db.Model):
"""This model differs from other EntityAddress Models because it links to either an investor_address or an custodian_address."""
# Metadata
__tablename__ = 'investment_addresses'
# Database Columns
address_types_id = db.Column(
db.ForeignKey("address_types.address_types_id"),
nullable=False)
address_type = db.relationship("AddressType",
foreign_keys=[address_types_id],
backref=db.backref("InvestmentAddress"))
investments_id = db.Column(db.ForeignKey("investments.investments_id"),
nullable=False)
investment = db.relationship("Investment",
foreign_keys=[investments_id],
backref=db.backref("InvestmentAddress"))
investor_addresses_id = db.Column(db.ForeignKey(
"investor_addresses.investor_addresses_id"))
investor_address = db.relationship("InvestorAddress",
foreign_keys=[investor_addresses_id],
backref=db.backref("InvestmentAddress"))
custodian_addresses_id = db.Column(db.ForeignKey(
"custodian_addresses.custodian_addresses_id"))
custodian_address = db.relationship("CustodianAddress",
foreign_keys=[custodian_addresses_id],
backref=db.backref("InvestmentAddress")
)
# end class InvestmentAddress
class CustodianAddress(db.Model, EntityAddressMixin):
"""Defines the relationship between a Custodian and their addresses."""
# Metadata
__tablename__ = 'custodian_addresses'
# Database Columns
custodians_id = db.Column(db.ForeignKey(
"custodians.custodians_id"), nullable=False)
custodian = db.relationship("Custodian", foreign_keys=[custodians_id],
backref=db.backref("CustodianAddress"))
# end CustodianAddress
i have an application and this function is supposed to return a list of 'investors' for a given 'Custodian'. Now when it executes i get an error: "sqlalchemy.exc.ArgumentError: mapper option expects string key or list of attributes". The error comes from the 'join' in the 'inv_query'.
I have included my 3 models that im using for the Join.
As described in the documentation provided by you. here
You should provide string arguments(table names) in with_joined. Given you have defined the relationship
Investor.with_joined('investorAddressTable', 'investmentAddressTable, 'custodianAddressTable')
In case you can use session then you can query the ORM classes directly like
session.query(Investor).join(InvestorAddress).join(InvestmentAddress).join(CustodianAddress).all() # will assume you have set the foreign key properly

Flask-Restless dump_to of primary key field

I am running into an issue that may be bug, but want to verify it with the community. I am basically trying to conform to camelcase for transporting data, then underscore for the database.
However, on the person_serializer, flask-restless will not allow an outbound "idPerson" as a result of the dump_to="idPerson". For some reason, it checks that the primary key exists and gets a keyError since the actual key is "id_person", not "idPerson".
Any help would be appreciated.
class Person(Base):
__tablename__ = "person"
id_person = Column(Integer, primary_key=True)
first_name = Column(String(50))
last_name = Column(String(50))
class PersonSchema(Schema):
id_person = fields.Integer(load_from="idPerson",dump_to="idPerson")
first_name = fields.String(load_from="firstName", dump_to="firstName")
last_name = fields.String(load_from="lastName", dump_to="lastName")
#post_load
def make_user(self, data):
return Person(**data)
person_schema = PersonSchema()
def person_serializer(instance):
return person_schema.dump(instance).data
def person_deserializer(data):
return person_schema.load(data).data
KEY ERROR IS BELOW
try:
# Convert the dictionary representation into an instance of the
# model.
instance = self.deserialize(data)
# Add the created model to the session.
self.session.add(instance)
self.session.commit()
# Get the dictionary representation of the new instance as it
# appears in the database.
result = self.serialize(instance)
except self.validation_exceptions as exception:
return self._handle_validation_exception(exception)
# Determine the value of the primary key for this instance and
# encode URL-encode it (in case it is a Unicode string).
pk_name = self.primary_key or primary_key_name(instance)
> primary_key = result[pk_name]
E KeyError: 'idPerson'

SQLAlchemy Linear One-to-One Relationship

I have two classes mapped to two tables respectively.
Ex:
Obj 1: ID (PK), KEY (String(20))
Obj 2: ID (PK), obj_1_id (FK), value (String(20))
I would like to be able to perform obj_1.value = *val*, whereby val is stored on the secon'd table's respective column, instead of obj_1.value.value = val`.
How can I create such relationship, spread/mapped to two tables' columns?
What I want is not one-to-one (object HAS object) but rather map a column of an object to a different table.
Following is what I have tried (following the docs) and it does not work as it creates obj1.value.value = .. instead of direct column mapping
What I have tried:
class Obj1(Base):
__tablename__ == ...
id = ..
key = ..
value = relationship("Obj2", uselist=False, backref="obj1")
class Obj2(Base):
__tablename__ == ...
id = .. # PK
obj_1_id = .. # FK
value = ...
Why not just wrap the python property:
class Obj1(Base):
__tablename__ = 'obj1'
id = Column(Integer, primary_key=True)
key = Column(String(20))
_value_rel = relationship("Obj2", uselist=False, backref="obj1")
#property
def value(self):
return self._value_rel and self._value_rel.value
#value.setter
def value(self, value):
if value is None:
self._value_rel = None
elif self._value_rel is None:
self._value_rel = Obj2(value=value)
else:
self._value_rel.value = value

Flask-SQLalchemy update a row's information

How can I update a row's information?
For example I'd like to alter the name column of the row that has the id 5.
Retrieve an object using the tutorial shown in the Flask-SQLAlchemy documentation. Once you have the entity that you want to change, change the entity itself. Then, db.session.commit().
For example:
admin = User.query.filter_by(username='admin').first()
admin.email = 'my_new_email#example.com'
db.session.commit()
user = User.query.get(5)
user.name = 'New Name'
db.session.commit()
Flask-SQLAlchemy is based on SQLAlchemy, so be sure to check out the SQLAlchemy Docs as well.
There is a method update on BaseQuery object in SQLAlchemy, which is returned by filter_by.
num_rows_updated = User.query.filter_by(username='admin').update(dict(email='my_new_email#example.com')))
db.session.commit()
The advantage of using update over changing the entity comes when there are many objects to be updated.
If you want to give add_user permission to all the admins,
rows_changed = User.query.filter_by(role='admin').update(dict(permission='add_user'))
db.session.commit()
Notice that filter_by takes keyword arguments (use only one =) as opposed to filter which takes an expression.
This does not work if you modify a pickled attribute of the model. Pickled attributes should be replaced in order to trigger updates:
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from pprint import pprint
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqllite:////tmp/users.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
data = db.Column(db.PickleType())
def __init__(self, name, data):
self.name = name
self.data = data
def __repr__(self):
return '<User %r>' % self.username
db.create_all()
# Create a user.
bob = User('Bob', {})
db.session.add(bob)
db.session.commit()
# Retrieve the row by its name.
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {}
# Modifying data is ignored.
bob.data['foo'] = 123
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {}
# Replacing data is respected.
bob.data = {'bar': 321}
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {'bar': 321}
# Modifying data is ignored.
bob.data['moo'] = 789
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {'bar': 321}
Just assigning the value and committing them will work for all the data types but JSON and Pickled attributes. Since pickled type is explained above I'll note down a slightly different but easy way to update JSONs.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
data = db.Column(db.JSON)
def __init__(self, name, data):
self.name = name
self.data = data
Let's say the model is like above.
user = User("Jon Dove", {"country":"Sri Lanka"})
db.session.add(user)
db.session.flush()
db.session.commit()
This will add the user into the MySQL database with data {"country":"Sri Lanka"}
Modifying data will be ignored. My code that didn't work is as follows.
user = User.query().filter(User.name=='Jon Dove')
data = user.data
data["province"] = "south"
user.data = data
db.session.merge(user)
db.session.flush()
db.session.commit()
Instead of going through the painful work of copying the JSON to a new dict (not assigning it to a new variable as above), which should have worked I found a simple way to do that. There is a way to flag the system that JSONs have changed.
Following is the working code.
from sqlalchemy.orm.attributes import flag_modified
user = User.query().filter(User.name=='Jon Dove')
data = user.data
data["province"] = "south"
user.data = data
flag_modified(user, "data")
db.session.merge(user)
db.session.flush()
db.session.commit()
This worked like a charm.
There is another method proposed along with this method here
Hope I've helped some one.
Models.py define the serializers
def default(o):
if isinstance(o, (date, datetime)):
return o.isoformat()
def get_model_columns(instance,exclude=[]):
columns=instance.__table__.columns.keys()
columns=list(set(columns)-set(exclude))
return columns
class User(db.Model):
__tablename__='user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
.......
####
def serializers(self):
cols = get_model_columns(self)
dict_val = {}
for c in cols:
dict_val[c] = getattr(self, c)
return json.loads(json.dumps(dict_val,default=default))
In RestApi, We can update the record dynamically by passing the json data into update query:
class UpdateUserDetails(Resource):
#auth_token_required
def post(self):
json_data = request.get_json()
user_id = current_user.id
try:
instance = User.query.filter(User.id==user_id)
data=instance.update(dict(json_data))
db.session.commit()
updateddata=instance.first()
msg={"msg":"User details updated successfully","data":updateddata.serializers()}
code=200
except Exception as e:
print(e)
msg = {"msg": "Failed to update the userdetails! please contact your administartor."}
code=500
return msg
I was looking for something a little less intrusive then #Ramesh's answer (which was good) but still dynamic. Here is a solution attaching an update method to a db.Model object.
You pass in a dictionary and it will update only the columns that you pass in.
class SampleObject(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
name = db.Column(db.String(128), nullable=False)
notes = db.Column(db.Text, nullable=False)
def update(self, update_dictionary: dict):
for col_name in self.__table__.columns.keys():
if col_name in update_dictionary:
setattr(self, col_name, update_dictionary[col_name])
db.session.add(self)
db.session.commit()
Then in a route you can do
object = SampleObject.query.where(SampleObject.id == id).first()
object.update(update_dictionary=request.get_json())
Update the Columns in flask
admin = User.query.filter_by(username='admin').first()
admin.email = 'my_new_email#example.com'
admin.save()
To use the update method (which updates the entree outside of the session) you have to query the object in steps like this:
query = db.session.query(UserModel)
query = query.filter(UserModel.id == user_id)
query.update(user_dumped)
db.session.commit()

Reverse mapping from a table to a model in SQLAlchemy

To provide an activity log in my SQLAlchemy-based app, I have a model like this:
class ActivityLog(Base):
__tablename__ = 'activitylog'
id = Column(Integer, primary_key=True)
activity_by_id = Column(Integer, ForeignKey('users.id'), nullable=False)
activity_by = relation(User, primaryjoin=activity_by_id == User.id)
activity_at = Column(DateTime, default=datetime.utcnow, nullable=False)
activity_type = Column(SmallInteger, nullable=False)
target_table = Column(Unicode(20), nullable=False)
target_id = Column(Integer, nullable=False)
target_title = Column(Unicode(255), nullable=False)
The log contains entries for multiple tables, so I can't use ForeignKey relations. Log entries are made like this:
doc = Document(name=u'mydoc', title=u'My Test Document',
created_by=user, edited_by=user)
session.add(doc)
session.flush() # See note below
log = ActivityLog(activity_by=user, activity_type=ACTIVITY_ADD,
target_table=Document.__table__.name, target_id=doc.id,
target_title=doc.title)
session.add(log)
This leaves me with three problems:
I have to flush the session before my doc object gets an id. If I had used a ForeignKey column and a relation mapper, I could have simply called ActivityLog(target=doc) and let SQLAlchemy do the work. Is there any way to work around needing to flush by hand?
The target_table parameter is too verbose. I suppose I could solve this with a target property setter in ActivityLog that automatically retrieves the table name and id from a given instance.
Biggest of all, I'm not sure how to retrieve a model instance from the database. Given an ActivityLog instance log, calling self.session.query(log.target_table).get(log.target_id) does not work, as query() expects a model as parameter.
One workaround appears to be to use polymorphism and derive all my models from a base model which ActivityLog recognises. Something like this:
class Entity(Base):
__tablename__ = 'entities'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
edited_at = Column(DateTime, onupdate=datetime.utcnow, nullable=False)
entity_type = Column(Unicode(20), nullable=False)
__mapper_args__ = {'polymorphic_on': entity_type}
class Document(Entity):
__tablename__ = 'documents'
__mapper_args__ = {'polymorphic_identity': 'document'}
body = Column(UnicodeText, nullable=False)
class ActivityLog(Base):
__tablename__ = 'activitylog'
id = Column(Integer, primary_key=True)
...
target_id = Column(Integer, ForeignKey('entities.id'), nullable=False)
target = relation(Entity)
If I do this, ActivityLog(...).target will give me a Document instance when it refers to a Document, but I'm not sure it's worth the overhead of having two tables for everything. Should I go ahead and do it this way?
One way to solve this is polymorphic associations. It should solve all 3 of your issues and also make database foreign key constraints work. See the polymorphic association example in SQLAlchemy source. Mike Bayer has an old blogpost that discusses this in greater detail.
Definitely go through the blogpost and examples Ants linked to. I did not find the explanation confusion, but rather assuming some more experience on the topic.
Few things I can suggest are:
ForeignKeys: in general I agree they are a good thing go have, but I am not sure it is conceptually important in your case: you seem to be using this ActivityLog as an orthogonal cross-cutting concern (AOP); but version with foreign keys would effectively make your business objects aware of the ActivityLog. Another problem with having FK for audit purposes using schema setup you have is that if you allow object deletion, FK requirement will delete all the ActivityLog entries for this object.
Automatic logging: you are doing all this logging manually whenever you create/modify(/delete) the object. With SA you could implement a SessionExtension with before_commit which would do the job for you automatically.
In this way you completely can avoid writing parts like below:
log = ActivityLog(activity_by=user, activity_type=ACTIVITY_ADD,
target_table=Document.__table__.name, target_id=doc.id,
target_title=doc.title)
session.add(log)
EDIT-1: complete sample code added
The code is based on the first non-FK version from http://techspot.zzzeek.org/?p=13.
The choice not to use FK is based on the fact that for audit purposes when the
main object is deleted, it should not cascade to delete all the audit log entries.
Also this keeps auditable objects unaware of the fact they are being audited.
Implementation uses a SA one-to-many relationship. It is possible that some
objects are modified many times, which will result in many audit log entries.
By default SA will load the relationship objects when adding a new entry to the
list. Assuming that during "normal" usage we would like only to add new audit
log entry, we use lazy='noload' flag so that the relation from the main object
will never be loaded. It is loaded when navigated from the other side though,
and also can be loaded from the main object using custom query, which is shown
in the example as well using activitylog_readonly readonly property.
Code (runnable with some tests):
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, SmallInteger, String, DateTime, ForeignKey, Table, UnicodeText, Unicode, and_
from sqlalchemy.orm import relationship, dynamic_loader, scoped_session, sessionmaker, class_mapper, backref
from sqlalchemy.orm.session import Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.interfaces import SessionExtension
import logging
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger()
ACTIVITY_ADD = 1
ACTIVITY_MOD = 2
ACTIVITY_DEL = 3
class ActivityLogSessionExtension(SessionExtension):
_logger = logging.getLogger('ActivityLogSessionExtension')
def before_commit(self, session):
self._logger.debug("before_commit: %s", session)
for d in session.new:
self._logger.info("before_commit >> add: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_ADD)
for d in session.dirty:
self._logger.info("before_commit >> mod: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_MOD)
for d in session.deleted:
self._logger.info("before_commit >> del: %s", d)
if hasattr(d, 'create_activitylog'):
log = d.create_activitylog(ACTIVITY_DEL)
# Configure test data SA
engine = create_engine('sqlite:///:memory:', echo=False)
session = scoped_session(sessionmaker(bind=engine, autoflush=False, extension=ActivityLogSessionExtension()))
Base = declarative_base()
Base.query = session.query_property()
class _BaseMixin(object):
""" Just a helper mixin class to set properties on object creation.
Also provides a convenient default __repr__() function, but be aware that
also relationships are printed, which might result in loading relations.
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
', '.join('%s=%r' % (k, self.__dict__[k])
for k in sorted(self.__dict__) if '_sa_' != k[:4] and '_backref_' != k[:9])
)
class User(Base, _BaseMixin):
__tablename__ = u'users'
id = Column(Integer, primary_key=True)
name = Column(String)
class Document(Base, _BaseMixin):
__tablename__ = u'documents'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
body = Column(UnicodeText, nullable=False)
class Folder(Base, _BaseMixin):
__tablename__ = u'folders'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
comment = Column(UnicodeText, nullable=False)
class ActivityLog(Base, _BaseMixin):
__tablename__ = u'activitylog'
id = Column(Integer, primary_key=True)
activity_by_id = Column(Integer, ForeignKey('users.id'), nullable=False)
activity_by = relationship(User) # #note: no need to specify the primaryjoin
activity_at = Column(DateTime, default=datetime.utcnow, nullable=False)
activity_type = Column(SmallInteger, nullable=False)
target_table = Column(Unicode(20), nullable=False)
target_id = Column(Integer, nullable=False)
target_title = Column(Unicode(255), nullable=False)
# backref relation for auditable
target = property(lambda self: getattr(self, '_backref_%s' % self.target_table))
def _get_user():
""" This method returns the User object for the current user.
#todo: proper implementation required
#hack: currently returns the 'user2'
"""
return session.query(User).filter_by(name='user2').one()
# auditable support function
# based on first non-FK version from http://techspot.zzzeek.org/?p=13
def auditable(cls, name):
def create_activitylog(self, activity_type):
log = ActivityLog(activity_by=_get_user(),
activity_type=activity_type,
target_table=table.name,
target_title=self.title,
)
getattr(self, name).append(log)
return log
mapper = class_mapper(cls)
table = mapper.local_table
cls.create_activitylog = create_activitylog
def _get_activitylog(self):
return Session.object_session(self).query(ActivityLog).with_parent(self).all()
setattr(cls, '%s_readonly' %(name,), property(_get_activitylog))
# no constraints, therefore define constraints in an ad-hoc fashion.
primaryjoin = and_(
list(table.primary_key)[0] == ActivityLog.__table__.c.target_id,
ActivityLog.__table__.c.target_table == table.name
)
foreign_keys = [ActivityLog.__table__.c.target_id]
mapper.add_property(name,
# #note: because we use the relationship, by default all previous
# ActivityLog items will be loaded for an object when new one is
# added. To avoid this, use either dynamic_loader (http://www.sqlalchemy.org/docs/reference/orm/mapping.html#sqlalchemy.orm.dynamic_loader)
# or lazy='noload'. This is the trade-off decision to be made.
# Additional benefit of using lazy='noload' is that one can also
# record DEL operations in the same way as ADD, MOD
relationship(
ActivityLog,
lazy='noload', # important for relationship
primaryjoin=primaryjoin,
foreign_keys=foreign_keys,
backref=backref('_backref_%s' % table.name,
primaryjoin=list(table.primary_key)[0] == ActivityLog.__table__.c.target_id,
foreign_keys=foreign_keys)
)
)
# this will define which classes support the ActivityLog interface
auditable(Document, 'activitylogs')
auditable(Folder, 'activitylogs')
# create db schema
Base.metadata.create_all(engine)
## >>>>> TESTS >>>>>>
# create some basic data first
u1 = User(name='user1')
u2 = User(name='user2')
session.add(u1)
session.add(u2)
session.commit()
session.expunge_all()
# --check--
assert not(_get_user() is None)
##############################
## ADD
##############################
_logger.info('-' * 80)
d1 = Document(title=u'Document-1', body=u'Doc1 some body skipped the body')
# when not using SessionExtension for any reason, this can be called manually
#d1.create_activitylog(ACTIVITY_ADD)
session.add(d1)
session.commit()
f1 = Folder(title=u'Folder-1', comment=u'This folder is empty')
# when not using SessionExtension for any reason, this can be called manually
#f1.create_activitylog(ACTIVITY_ADD)
session.add(f1)
session.commit()
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
_logger.debug(logs)
assert len(logs) == 2
assert logs[0].activity_type == ACTIVITY_ADD
assert logs[0].target.title == u'Document-1'
assert logs[0].target.title == logs[0].target_title
assert logs[1].activity_type == ACTIVITY_ADD
assert logs[1].target.title == u'Folder-1'
assert logs[1].target.title == logs[1].target_title
##############################
## MOD(ify)
##############################
_logger.info('-' * 80)
session.expunge_all()
d1 = session.query(Document).filter_by(id=1).one()
assert d1.title == u'Document-1'
assert d1.body == u'Doc1 some body skipped the body'
assert d1.activitylogs == []
d1.title = u'Modified: Document-1'
d1.body = u'Modified: body'
# when not using SessionExtension (or it does not work, this can be called manually)
#d1.create_activitylog(ACTIVITY_MOD)
session.commit()
_logger.debug(d1.activitylogs_readonly)
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
assert len(logs)==3
assert logs[2].activity_type == ACTIVITY_MOD
assert logs[2].target.title == u'Modified: Document-1'
assert logs[2].target.title == logs[2].target_title
##############################
## DEL(ete)
##############################
_logger.info('-' * 80)
session.expunge_all()
d1 = session.query(Document).filter_by(id=1).one()
# when not using SessionExtension for any reason, this can be called manually,
#d1.create_activitylog(ACTIVITY_DEL)
session.delete(d1)
session.commit()
session.expunge_all()
# --check--
session.expunge_all()
logs = session.query(ActivityLog).all()
assert len(logs)==4
assert logs[0].target is None
assert logs[2].target is None
assert logs[3].activity_type == ACTIVITY_DEL
assert logs[3].target is None
##############################
## print all activity logs
##############################
_logger.info('=' * 80)
logs = session.query(ActivityLog).all()
for log in logs:
_ = log.target
_logger.info("%s -> %s", log, log.target)
##############################
## navigate from main object
##############################
_logger.info('=' * 80)
session.expunge_all()
f1 = session.query(Folder).filter_by(id=1).one()
_logger.info(f1.activitylogs_readonly)

Categories

Resources