sqlalchemy with dynamic mapping and complex object querying - python

I have the following situation:
class MyBaseClass(object):
def __init__(self, name):
self.name = name
self.period = None
self.foo = None
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
If in running time I need to add some additional columns we could do:
my_base_class_table = Table("MyBaseClass", metadata,
Column('name', String, primary_key=True),
Column('period', DateTime),
Column('foo', Float),
)
my_base_class_table = Table("MyBaseClass", metadata, extend_existing=True)
column_list = ["value_one", "other_name", "random_XARS123"]
for col in column_list:
my_base_class_table.append_column(Column(col, Float))
create_all()
mapper(MyBaseClass, my_base_class_table)
Until here we have a fully functional dynamic table mapping with extended columns.
Now, using the sqlalchemy's ORM you could easily instantiate a MyBaseClass and modify it to reflect changes in the database:
base_class = MyBaseClass(name="Something")
base_class.period = "2002-10-01"
And using the dynamic columns with unknown column names:
for col in column_list:
base_class[col] = 10
session.add(base_class)
But actually only if you know the column names:
t_query = session.query(func.strftime('%Y-%m-%d', MyBaseClass.period),
func.sum(MyBaseClass.foo), \
func.sum(MyBaseClass.other_name*MyBaseClass.value_one))
Is possible to repeat the last query (t_query) without knowing the column names? I've already tried different cases with no luck:
func.sum(MyBaseClass[column_list[0]]*MyBaseClass.[column_list[1]])
The only thing that actually work is doing the extended text sql like:
text_query = text("SELECT strftime('%Y-%m-%d', period) as period, sum(foo) as foo, sum({0}*{1}) as bar FROM {2} ".format(column_list[0], column_list[1], "MyBaseClass")

Simple getattr will do the trick:
t_query = session.query(func.strftime('%Y-%m-%d', getattr(MyBaseClass, "period")),
func.sum(getattr(MyBaseClass, "foo")),
func.sum(getattr(MyBaseClass, "other_name") * getattr(MyBaseClass, "value_one"))
)

Related

Not able handle "on_conflict_do_nothing" in Sqlalchemy in Mysql [duplicate]

Is there an elegant way to do an INSERT ... ON DUPLICATE KEY UPDATE in SQLAlchemy? I mean something with a syntax similar to inserter.insert().execute(list_of_dictionaries) ?
ON DUPLICATE KEY UPDATE post version-1.2 for MySQL
This functionality is now built into SQLAlchemy for MySQL only. somada141's answer below has the best solution:
https://stackoverflow.com/a/48373874/319066
ON DUPLICATE KEY UPDATE in the SQL statement
If you want the generated SQL to actually include ON DUPLICATE KEY UPDATE, the simplest way involves using a #compiles decorator.
The code (linked from a good thread on the subject on reddit) for an example can be found on github:
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Insert
#compiles(Insert)
def append_string(insert, compiler, **kw):
s = compiler.visit_insert(insert, **kw)
if 'append_string' in insert.kwargs:
return s + " " + insert.kwargs['append_string']
return s
my_connection.execute(my_table.insert(append_string = 'ON DUPLICATE KEY UPDATE foo=foo'), my_values)
But note that in this approach, you have to manually create the append_string. You could probably change the append_string function so that it automatically changes the insert string into an insert with 'ON DUPLICATE KEY UPDATE' string, but I'm not going to do that here due to laziness.
ON DUPLICATE KEY UPDATE functionality within the ORM
SQLAlchemy does not provide an interface to ON DUPLICATE KEY UPDATE or MERGE or any other similar functionality in its ORM layer. Nevertheless, it has the session.merge() function that can replicate the functionality only if the key in question is a primary key.
session.merge(ModelObject) first checks if a row with the same primary key value exists by sending a SELECT query (or by looking it up locally). If it does, it sets a flag somewhere indicating that ModelObject is in the database already, and that SQLAlchemy should use an UPDATE query. Note that merge is quite a bit more complicated than this, but it replicates the functionality well with primary keys.
But what if you want ON DUPLICATE KEY UPDATE functionality with a non-primary key (for example, another unique key)? Unfortunately, SQLAlchemy doesn't have any such function. Instead, you have to create something that resembles Django's get_or_create(). Another StackOverflow answer covers it, and I'll just paste a modified, working version of it here for convenience.
def get_or_create(session, model, defaults=None, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
params = dict((k, v) for k, v in kwargs.iteritems() if not isinstance(v, ClauseElement))
if defaults:
params.update(defaults)
instance = model(**params)
return instance
I should mention that ever since the v1.2 release, the SQLAlchemy 'core' has a solution to the above with that's built in and can be seen under here (copied snippet below):
from sqlalchemy.dialects.mysql import insert
insert_stmt = insert(my_table).values(
id='some_existing_id',
data='inserted value')
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
data=insert_stmt.inserted.data,
status='U'
)
conn.execute(on_duplicate_key_stmt)
Based on phsource's answer, and for the specific use-case of using MySQL and completely overriding the data for the same key without performing a DELETE statement, one can use the following #compiles decorated insert expression:
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Insert
#compiles(Insert)
def append_string(insert, compiler, **kw):
s = compiler.visit_insert(insert, **kw)
if insert.kwargs.get('on_duplicate_key_update'):
fields = s[s.find("(") + 1:s.find(")")].replace(" ", "").split(",")
generated_directive = ["{0}=VALUES({0})".format(field) for field in fields]
return s + " ON DUPLICATE KEY UPDATE " + ",".join(generated_directive)
return s
It's depends upon you. If you want to replace then pass OR REPLACE in prefixes
def bulk_insert(self,objects,table):
#table: Your table class and objects are list of dictionary [{col1:val1, col2:vale}]
for counter,row in enumerate(objects):
inserter = table.__table__.insert(prefixes=['OR IGNORE'], values=row)
try:
self.db.execute(inserter)
except Exception as E:
print E
if counter % 100 == 0:
self.db.commit()
self.db.commit()
Here commit interval can be changed to speed up or speed down
My way
import typing
from datetime import datetime
from sqlalchemy.dialects import mysql
class MyRepository:
def model(self):
return MySqlAlchemyModel
def upsert(self, data: typing.List[typing.Dict]):
if not data:
return
model = self.model()
if hasattr(model, 'created_at'):
for item in data:
item['created_at'] = datetime.now()
stmt = mysql.insert(getattr(model, '__table__')).values(data)
for_update = []
for k, v in data[0].items():
for_update.append(k)
dup = {k: getattr(stmt.inserted, k) for k in for_update}
stmt = stmt.on_duplicate_key_update(**dup)
self.db.session.execute(stmt)
self.db.session.commit()
Usage:
myrepo.upsert([
{
"field11": "value11",
"field21": "value21",
"field31": "value31",
},
{
"field12": "value12",
"field22": "value22",
"field32": "value32",
},
])
The other answers have this covered but figured I'd reference another good example for mysql I found in this gist. This also includes the use of LAST_INSERT_ID, which may be useful depending on your innodb auto increment settings and whether your table has a unique key. Lifting the code here for easy reference but please give the author a star if you find it useful.
from app import db
from sqlalchemy import func
from sqlalchemy.dialects.mysql import insert
def upsert(model, insert_dict):
"""model can be a db.Model or a table(), insert_dict should contain a primary or unique key."""
inserted = insert(model).values(**insert_dict)
upserted = inserted.on_duplicate_key_update(
id=func.LAST_INSERT_ID(model.id), **{k: inserted.inserted[k]
for k, v in insert_dict.items()})
res = db.engine.execute(upserted)
return res.lastrowid
ORM
use upset func based on on_duplicate_key_update
class Model():
__input_data__ = dict()
def __init__(self, **kwargs) -> None:
self.__input_data__ = kwargs
self.session = Session(engine)
def save(self):
self.session.add(self)
self.session.commit()
def upsert(self, *, ingore_keys = []):
column_keys = self.__table__.columns.keys()
udpate_data = dict()
for key in self.__input_data__.keys():
if key not in column_keys:
continue
else:
udpate_data[key] = self.__input_data__[key]
insert_stmt = insert(self.__table__).values(**udpate_data)
all_ignore_keys = ['id']
if isinstance(ingore_keys, list):
all_ignore_keys =[*all_ignore_keys, *ingore_keys]
else:
all_ignore_keys.append(ingore_keys)
udpate_columns = dict()
for key in self.__input_data__.keys():
if key not in column_keys or key in all_ignore_keys:
continue
else:
udpate_columns[key] = insert_stmt.inserted[key]
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
**udpate_columns
)
# self.session.add(self)
self.session.execute(on_duplicate_key_stmt)
self.session.commit()
class ManagerAssoc(ORM_Base, Model):
def __init__(self, **kwargs):
self.id = idWorker.get_id()
column_keys = self.__table__.columns.keys()
udpate_data = dict()
for key in kwargs.keys():
if key not in column_keys:
continue
else:
udpate_data[key] = kwargs[key]
ORM_Base.__init__(self, **udpate_data)
Model.__init__(self, **kwargs, id = self.id)
....
# you can call it as following:
manager_assoc.upsert()
manager.upsert(ingore_keys = ['manager_id'])
Got a simpler solution:
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Insert
#compiles(Insert)
def replace_string(insert, compiler, **kw):
s = compiler.visit_insert(insert, **kw)
s = s.replace("INSERT INTO", "REPLACE INTO")
return s
my_connection.execute(my_table.insert(replace_string=""), my_values)
I just used plain sql as:
insert_stmt = "REPLACE INTO tablename (column1, column2) VALUES (:column_1_bind, :columnn_2_bind) "
session.execute(insert_stmt, data)
As none of these solutions seem all the elegant. A brute force way is to query to see if the row exists. If it does delete the row and then insert otherwise just insert. Obviously some overhead involved but it does not rely on modifying the raw sql and it works on non orm stuff.

SqlAlchemy TypeDecorator, column_expression and aggregate functions

I found my problem solution on SqlAlchemy docs
from sqlalchemy.dialects.postgresql import BYTEA
class PGPString(BYTEA):
def __init__(self, passphrase, length=None):
super(PGPString, self).__init__(length)
self.passphrase = passphrase
def bind_expression(self, bindvalue):
# convert the bind's type from PGPString to
# String, so that it's passed to psycopg2 as is without
# a dbapi.Binary wrapper
bindvalue = type_coerce(bindvalue, String)
return func.pgp_sym_encrypt(bindvalue, self.passphrase)
def column_expression(self, col):
return func.pgp_sym_decrypt(col, self.passphrase)
It produces query as expected:
SELECT pgp_sym_decrypt(employees.salary, $1) AS salary_crypt FROM employees
But, when I try to wrap PGPString column with any aggregate expression
session.query(func.sum(Employee.salary))
below query is produced by the expression above:
SELECT pgp_sym_decrypt(sum(salary_rate_employee_link.salary_crypt), $1) AS sum_1
and what I expected
SELECT sum(pgp_sym_decrypt(salary_rate_employee_link.salary_crypt, $1)) AS sum_1
As you can see the order of db functions is not one I expected. So, I would like to find a simple way to make it properly work
Well, the only solution, I came up by now is kind of a workaround
class PGPString(BYTEA):
def __init__(self, passphrase, length=None):
super(PGPString, self).__init__(length)
self.passphrase = passphrase
def bind_expression(self, bindvalue):
# convert the bind's type from PGPString to
# String, so that it's passed to psycopg2 as is without
# a dbapi.Binary wrapper
bindvalue = type_coerce(bindvalue, String)
return func.pgp_sym_encrypt(bindvalue, self.passphrase)
def column_expression(self, col):
clauses = getattr(col, 'clauses', [col])
columns = [
db.func.pgp_sym_decrypt(clause, self.passphrase)
for clause in clauses
]
if isinstance(col, FunctionElement):
if col.base_columns:
base_column, *base_columns = col.base_columns
columns = type(base_column)(*columns)
for base_column in base_columns:
columns = type(base_column)(columns)
else:
columns = columns[0]
return columns

SQLAlchemy: how to extend hybrid attributes?

I'm working with a MSSQL database with no control over the DB setup nor the (read-only) data in it. One table is represented in SQLAlchemy like this:
class pdAnlage(pdBase):
__tablename__ = "Anlage"
typ = Column(CHAR(4), primary_key=True)
nummer = Column(CHAR(4), primary_key=True)
In accessing the database, I need a property "name" that is just a concatenation of "typ" and "nummer" with a dot between them. So I did this:
#hybrid_property
def name(self):
return self.typ + '.' + self.nummer
Looks simple and works as expected. There are two caveats though, one general and one special. The general one: The table is quite big, and i'd like to make queries against Anlage.name, like this:
db.query(Anlage).filter(Anlage.name.like('A%.B'))
db.query(Anlage).filter(Anlage.name == 'X.Y')
This works but it is inefficient as the SQL server first has to concatenate all "typ" and "nummer" columns of the (large) table before doing the test. So I've defined a classmethods like this one:
#classmethod
def name_like(self, pattern):
p = pattern.split('.', 2)
if len(p) == 1 or not p[1]:
return self.typ.like(p[0])
else:
return and_(self.typ.like(p[0]), self.nummer.like(p[1]))
This isn't elegant, but it does the job just fine. It would be nicer to overload "==" and "like()", is there a way to do that?
Now to the special case: Both name and typ columns can contain trailing spaces in the DB. But the name property must not have spaces, especially not before the dot. So I tried to rewrite the name hybrid property like this:
#hybrid_property
def name(self):
return self.typ.rstrip() + '.' + self.nummer.rstrip()
This doesn't work because SQLAlchemy doesn't know how to translate the rstrip() python method to the MSSQL RTRIM() function. How can I accomplish that?
You could implement a custom comparator that handles string operands in a special way (and others as necessary):
from sqlalchemy.ext.hybrid import Comparator
_sep = '.'
def _partition(s):
typ, sep, nummer = s.partition(_sep)
return typ, nummer
class NameComparator(Comparator):
def __init__(self, typ, nummer):
self.typ = typ
self.nummer = nummer
super().__init__(func.rtrim(typ) + _sep + func.rtrim(nummer))
def operate(self, op, other, **kwgs):
if isinstance(other, str):
typ, nummer = _partition(other)
expr = op(self.typ, typ, **kwgs)
if nummer:
expr = and_(expr, op(self.nummer, nummer, **kwgs))
return expr
else:
# Default to using the "slow" method of concatenating first that
# hides the columns from the index created for the primary key.
return op(self.__clause_element__(), other, **kwgs)
and use it with your hybrid attribute:
class pdAnlage(Base):
__tablename__ = "Anlage"
typ = Column(CHAR(4), primary_key=True)
nummer = Column(CHAR(4), primary_key=True)
#hybrid_property
def name(self):
return self.typ.rstrip() + _sep + self.nummer.rstrip()
#name.comparator
def name(cls):
return NameComparator(cls.typ, cls.nummer)

running transactions in db.Model subclassed put

I'm trying to create a google app engine data model with the following attributes:
store string, value pair into BigTable
if string, value pair DOES NOT exist, create the record
if string, value pair DOES exist, update the record, incrementing a counter
code:
class stringListRecord(db.Model):
type = db.StringProperty();
value = db.StringProperty();
refs = db.IntegerProperty(default=1);
def __init__(self, *args, **kw):
key = db.GqlQuery("SELECT __key__ FROM stringListRecord WHERE type = :1 AND value = :2", kw['type'], kw['value']).get();
if key != None:
kw['key'] = key;
db.Model.__init__(self, *args, **kw);
def increment_counter(self, key):
obj = db.get(key);
obj.refs += 1;
db.Model.put(obj);
def put(self):
if self.key() != None:
self.increment_counter(self.key());
#db.run_in_transaction(self.increment_counter, self.key());
else:
db.Model.put(self);
When I run the commented out code, i.e. db.run_in_transaction() I get:
Only ancestor queries are allowed inside transactions.
Is there a better way to get this functionality out of GAE?

SQLAlchemy versioning cares about class import order

I was following the guide here:
http://www.sqlalchemy.org/docs/orm/examples.html?highlight=versioning#versioned-objects
and have come across an issue. I have defined my relationships like:
generic_ticker = relation('MyClass', backref=backref("stuffs"))
with strings so it doesn't care about the import order of my model modules. This all works fine normally, but when I use the versioning meta I get the following error:
sqlalchemy.exc.InvalidRequestError: When initializing mapper Mapper|MyClass|stuffs, expression 'Trader' failed to locate a name ("name 'MyClass' is not defined"). If this is a class name, consider adding this relationship() to the class after both dependent classes have been defined.
I tracked down the error to:
File "/home/nick/workspace/gm3/gm3/lib/history_meta.py", line 90, in __init__
mapper = class_mapper(cls)
File "/home/nick/venv/tg2env/lib/python2.6/site-packages/sqlalchemy/orm/util.py", line 622, in class_mapper
mapper = mapper.compile()
class VersionedMeta(DeclarativeMeta):
def __init__(cls, classname, bases, dict_):
DeclarativeMeta.__init__(cls, classname, bases, dict_)
try:
mapper = class_mapper(cls)
_history_mapper(mapper)
except UnmappedClassError:
pass
I fixed the problem by putting the try: except stuff in a lambda and running them all after all the imports have happened. This works but seems a bit rubbish, any ideas of how to fix this is a better way?
Thanks!
Update
The problem is not actually about import order. The versioning example is designed such that mapper requires compilation in costructor of each versioned class. And compilation fails when related classes are not yet defined. In case of circular relations there is no way to make it working by changing definition order of mapped classes.
Update 2
As the above update states (I didn't know you could edit other people's posts on here :)) this is likely due to circular references. In which case may be someone will find my hack useful (I'm using it with turbogears) (Replace VersionedMeta and add in create_mappers global in history_meta)
create_mappers = []
class VersionedMeta(DeclarativeMeta):
def __init__(cls, classname, bases, dict_):
DeclarativeMeta.__init__(cls, classname, bases, dict_)
#I added this code in as it was crashing otherwise
def make_mapper():
try:
mapper = class_mapper(cls)
_history_mapper(mapper)
except UnmappedClassError:
pass
create_mappers.append(lambda: make_mapper())
Then you can do something like the following in your models __init__.py
# Import your model modules here.
from myproj.lib.history_meta import create_mappers
from myproj.model.misc import *
from myproj.model.actor import *
from myproj.model.stuff1 import *
from myproj.model.instrument import *
from myproj.model.stuff import *
#setup the history
[func() for func in create_mappers]
That way it create the mappers only after all the classes have been defined.
Update 3
Slightly unrelated but I came across a duplicate primary key error in some circumstances (committing 2 changes to the same object in one go). My workaround has been to add a new primary auto-incrementing key. Of course you can't have more than 1 with mysql so I had to de-primary key the existing stuff used to create the history table. Check out my overall code (including a hist_id and getting rid of the foreign key constraint):
"""Stolen from the offical sqlalchemy recpies
"""
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import mapper, class_mapper, attributes, object_mapper
from sqlalchemy.orm.exc import UnmappedClassError, UnmappedColumnError
from sqlalchemy import Table, Column, ForeignKeyConstraint, Integer
from sqlalchemy.orm.interfaces import SessionExtension
from sqlalchemy.orm.properties import RelationshipProperty
from sqlalchemy.types import DateTime
import datetime
from sqlalchemy.orm.session import Session
def col_references_table(col, table):
for fk in col.foreign_keys:
if fk.references(table):
return True
return False
def _history_mapper(local_mapper):
cls = local_mapper.class_
# set the "active_history" flag
# on on column-mapped attributes so that the old version
# of the info is always loaded (currently sets it on all attributes)
for prop in local_mapper.iterate_properties:
getattr(local_mapper.class_, prop.key).impl.active_history = True
super_mapper = local_mapper.inherits
super_history_mapper = getattr(cls, '__history_mapper__', None)
polymorphic_on = None
super_fks = []
if not super_mapper or local_mapper.local_table is not super_mapper.local_table:
cols = []
for column in local_mapper.local_table.c:
if column.name == 'version':
continue
col = column.copy()
col.unique = False
#don't auto increment stuff from the normal db
if col.autoincrement:
col.autoincrement = False
#sqllite falls over with auto incrementing keys if we have a composite key
if col.primary_key:
col.primary_key = False
if super_mapper and col_references_table(column, super_mapper.local_table):
super_fks.append((col.key, list(super_history_mapper.base_mapper.local_table.primary_key)[0]))
cols.append(col)
if column is local_mapper.polymorphic_on:
polymorphic_on = col
#if super_mapper:
# super_fks.append(('version', super_history_mapper.base_mapper.local_table.c.version))
cols.append(Column('hist_id', Integer, primary_key=True, autoincrement=True))
cols.append(Column('version', Integer))
cols.append(Column('changed', DateTime, default=datetime.datetime.now))
if super_fks:
cols.append(ForeignKeyConstraint(*zip(*super_fks)))
table = Table(local_mapper.local_table.name + '_history', local_mapper.local_table.metadata,
*cols, mysql_engine='InnoDB')
else:
# single table inheritance. take any additional columns that may have
# been added and add them to the history table.
for column in local_mapper.local_table.c:
if column.key not in super_history_mapper.local_table.c:
col = column.copy()
super_history_mapper.local_table.append_column(col)
table = None
if super_history_mapper:
bases = (super_history_mapper.class_,)
else:
bases = local_mapper.base_mapper.class_.__bases__
versioned_cls = type.__new__(type, "%sHistory" % cls.__name__, bases, {})
m = mapper(
versioned_cls,
table,
inherits=super_history_mapper,
polymorphic_on=polymorphic_on,
polymorphic_identity=local_mapper.polymorphic_identity
)
cls.__history_mapper__ = m
if not super_history_mapper:
cls.version = Column('version', Integer, default=1, nullable=False)
create_mappers = []
class VersionedMeta(DeclarativeMeta):
def __init__(cls, classname, bases, dict_):
DeclarativeMeta.__init__(cls, classname, bases, dict_)
#I added this code in as it was crashing otherwise
def make_mapper():
try:
mapper = class_mapper(cls)
_history_mapper(mapper)
except UnmappedClassError:
pass
create_mappers.append(lambda: make_mapper())
def versioned_objects(iter):
for obj in iter:
if hasattr(obj, '__history_mapper__'):
yield obj
def create_version(obj, session, deleted = False):
obj_mapper = object_mapper(obj)
history_mapper = obj.__history_mapper__
history_cls = history_mapper.class_
obj_state = attributes.instance_state(obj)
attr = {}
obj_changed = False
for om, hm in zip(obj_mapper.iterate_to_root(), history_mapper.iterate_to_root()):
if hm.single:
continue
for hist_col in hm.local_table.c:
if hist_col.key == 'version' or hist_col.key == 'changed' or hist_col.key == 'hist_id':
continue
obj_col = om.local_table.c[hist_col.key]
# get the value of the
# attribute based on the MapperProperty related to the
# mapped column. this will allow usage of MapperProperties
# that have a different keyname than that of the mapped column.
try:
prop = obj_mapper.get_property_by_column(obj_col)
except UnmappedColumnError:
# in the case of single table inheritance, there may be
# columns on the mapped table intended for the subclass only.
# the "unmapped" status of the subclass column on the
# base class is a feature of the declarative module as of sqla 0.5.2.
continue
# expired object attributes and also deferred cols might not be in the
# dict. force it to load no matter what by using getattr().
if prop.key not in obj_state.dict:
getattr(obj, prop.key)
a, u, d = attributes.get_history(obj, prop.key)
if d:
attr[hist_col.key] = d[0]
obj_changed = True
elif u:
attr[hist_col.key] = u[0]
else:
# if the attribute had no value.
attr[hist_col.key] = a[0]
obj_changed = True
if not obj_changed:
# not changed, but we have relationships. OK
# check those too
for prop in obj_mapper.iterate_properties:
if isinstance(prop, RelationshipProperty) and \
attributes.get_history(obj, prop.key).has_changes():
obj_changed = True
break
if not obj_changed and not deleted:
return
attr['version'] = obj.version
hist = history_cls()
for key, value in attr.iteritems():
setattr(hist, key, value)
obj.version += 1
session.add(hist)
class VersionedListener(SessionExtension):
def before_flush(self, session, flush_context, instances):
for obj in versioned_objects(session.dirty):
create_version(obj, session)
for obj in versioned_objects(session.deleted):
create_version(obj, session, deleted = True)
I fixed the problem by putting the try: except stuff in a lambda and
running them all after all the imports have happened.
Great!

Categories

Resources