Postgresql partition and sqlalchemy - python

SQLAlchemy doc explain how to create a partitioned table. But it does not explains how to create partitions.
So if I have this :
#Skipping create_engine and metadata
Base = declarative_base()
class Measure(Base):
__tablename__ = 'measures'
__table_args__ = {
postgresql_partition_by: 'RANGE (log_date)'
}
city_id = Column(Integer, not_null=True)
log_date = Columne(Date, not_null=True)
peaktemp = Column(Integer)
unitsales = Column(Integer)
class Measure2020(Base):
"""How am I suppposed to declare this ? """
I know that most of the I'll be doing SELECT * FROM measures WHERE logdate between XX and YY. But that seems interesting.

You can use a MeasureMixin that both classes can inherit from.
And then use an event to attach the table partition.
from sqlalchemy import event
class MeasureMixin:
city_id = Column(Integer, not_null=True)
log_date = Column(Date, not_null=True)
peaktemp = Column(Integer)
unitsales = Column(Integer)
class Measure(MeasureMixin, Base):
__tablename__ = 'measures'
__table_args__ = {
postgresql_partition_by: 'RANGE (log_date)'
}
class Measure2020(MeasureMixin, Base):
__tablename__ = 'measures2020'
Measure2020.__table__.add_is_dependent_on(Measure.__table__)
event.listen(
Measure2020.__table__,
"after_create",
DDL("""ALTER TABLE measures ATTACH PARTITION measures2020
VALUES FROM ('2020-01-01') TO ('2021-01-01');""")
)

I had a similar problem. I found #moshevi's answer quite useful, and ended up generalising it a bit (as I had many tables to partition).
First, create a metaclass such as this:
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.sql.ddl import DDL
from sqlalchemy import event
class PartitionByYearMeta(DeclarativeMeta):
def __new__(cls, clsname, bases, attrs, *, partition_by):
#classmethod
def get_partition_name(cls_, key):
# 'measures' -> 'measures_2020' (customise as needed)
return f'{cls_.__tablename__}_{key}'
#classmethod
def create_partition(cls_, key):
if key not in cls_.partitions:
Partition = type(
f'{clsname}{key}', # Class name, only used internally
bases,
{'__tablename__': cls_.get_partition_name(key)}
)
Partition.__table__.add_is_dependent_on(cls_.__table__)
event.listen(
Partition.__table__,
'after_create',
DDL(
# For non-year ranges, modify the FROM and TO below
f"""
ALTER TABLE {cls_.__tablename__}
ATTACH PARTITION {Partition.__tablename__}
FOR VALUES FROM ('{key}-01-01') TO ('{key+1}-01-01');
"""
)
)
cls_.partitions[key] = Partition
return cls_.partitions[key]
attrs.update(
{
# For non-RANGE partitions, modify the `postgresql_partition_by` key below
'__table_args__': attrs.get('__table_args__', ())
+ (dict(postgresql_partition_by=f'RANGE({partition_by})'),),
'partitions': {},
'partitioned_by': partition_by,
'get_partition_name': get_partition_name,
'create_partition': create_partition
}
)
return super().__new__(cls, clsname, bases, attrs)
Next, for any table in your model that you want to partition:
class MeasureMixin:
# The columns need to be pulled out into this mixin
# Note: any foreign key columns will need to be wrapped like this:
#declared_attr
def city_id(self):
return Column(ForeignKey('cities.id'), not_null=True)
log_date = Column(Date, not_null=True)
peaktemp = Column(Integer)
unitsales = Column(Integer)
class Measure(MeasureMixin, Base, metaclass=PartitionByYearMeta, partition_by='logdate'):
__tablename__ = 'measures'
This makes it easy to add more tables and partition by any number of values.
Creating a new partition on the fly works like this:
# Make sure you commit any session that is currently open, even for select queries:
session.commit()
Partition = Measure.create_partition(2020)
if not engine.dialect.has_table(Partition.__table__.name):
Partition.__table__.create(bind=engine)
Now the partition for key 2020 is created and values for that year can be inserted.

Maybe a bit late, but I would like to share what I built upon #moshevi 's and #Seb 's answers:
In my IoT use-case, I required actual sub-partitioning (first level year, second level nodeid). Also I wanted to generalize it slightly.
This is what I came up with:
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.sql.ddl import DDL
from sqlalchemy import event
class PartitionByMeta(DeclarativeMeta):
def __new__(cls, clsname, bases, attrs, *, partition_by, partition_type):
#classmethod
def get_partition_name(cls_, suffix):
return f'{cls_.__tablename__}_{suffix}'
#classmethod
def create_partition(cls_, suffix, partition_stmt, subpartition_by=None, subpartition_type=None):
if suffix not in cls_.partitions:
partition = PartitionByMeta(
f'{clsname}{suffix}',
bases,
{'__tablename__': cls_.get_partition_name(suffix)},
partition_type = subpartition_type,
partition_by=subpartition_by,
)
partition.__table__.add_is_dependent_on(cls_.__table__)
event.listen(
partition.__table__,
'after_create',
DDL(
# For non-year ranges, modify the FROM and TO below
# LIST: IN ('first', 'second');
# RANGE: FROM ('{key}-01-01') TO ('{key+1}-01-01')
f"""
ALTER TABLE {cls_.__tablename__}
ATTACH PARTITION {partition.__tablename__}
{partition_stmt};
"""
)
)
cls_.partitions[suffix] = partition
return cls_.partitions[suffix]
if partition_by is not None:
attrs.update(
{
'__table_args__': attrs.get('__table_args__', ())
+ (dict(postgresql_partition_by=f'{partition_type.upper()}({partition_by})'),),
'partitions': {},
'partitioned_by': partition_by,
'get_partition_name': get_partition_name,
'create_partition': create_partition
}
)
return super().__new__(cls, clsname, bases, attrs)
Which is to be used as follows, assuming the respective VehicleDataMixin class to be created as introduced by #moshevi
class VehicleData(VehicleDataMixin, Project, metaclass=PartitionByMeta, partition_by='timestamp',partition_type='RANGE'):
__tablename__ = 'vehicle_data'
__table_args__ = (
Index('ts_ch_nod_idx', "timestamp", "nodeid", "channelid", postgresql_using='brin'),
UniqueConstraint('timestamp','nodeid','channelid', name='ts_ch_nod_constr')
)
Which can then be subpartitoned iteratively like so (to be adapted)
for y in range(2017, 2021):
# Creating tables for all known nodeids
tbl_vehid_y = VehicleData.create_partition(
f"{y}", partition_stmt=f"""FOR VALUES FROM ('{y}-01-01') TO ('{y+1}-01-01')""",
subpartition_by='nodeid', subpartition_type='LIST'
)
for i in {3, 4, 7, 9}:
# Creating all the years below these nodeids including a default partition
tbl_vehid_y.create_partition(
f"nid{i}", partition_stmt=f"""FOR VALUES IN ('{i}')"""
)
# Defaults (nodeid) per year partition
tbl_vehid_y.create_partition("def", partition_stmt="DEFAULT")
# Default to any other year than anticipated
VehicleData.create_partition("def", partition_stmt="DEFAULT")
partition_by='timestamp' <= This is the column to partition by
partition_type='RANGE' <= This is the (PSQL specific) partition type
partition_stmt=f"""FOR VALUES IN ('{i}')""" <= This is the (PSQL specific) partitioning statement.

For database partitioning (PostgreSQL or MySQL), you can try using the architect package. It works with a range of ORM libraties, including SQLAlchemy. Here is an example of PostgreSQL - https://architect.readthedocs.io/features/partition/postgresql.html. Several partitioning types are supported, so hopefully it meets the requirements of your case.

Related

How to IGNORE duplicate keys in oder to avoid errors when adding a new object to session [duplicate]

I have a record that I want to exist in the database if it is not there, and if it is there already (primary key exists) I want the fields to be updated to the current state. This is often called an upsert.
The following incomplete code snippet demonstrates what will work, but it seems excessively clunky (especially if there were a lot more columns). What is the better/best way?
Base = declarative_base()
class Template(Base):
__tablename__ = 'templates'
id = Column(Integer, primary_key = True)
name = Column(String(80), unique = True, index = True)
template = Column(String(80), unique = True)
description = Column(String(200))
def __init__(self, Name, Template, Desc):
self.name = Name
self.template = Template
self.description = Desc
def UpsertDefaultTemplate():
sess = Session()
desired_default = Template("default", "AABBCC", "This is the default template")
try:
q = sess.query(Template).filter_by(name = desiredDefault.name)
existing_default = q.one()
except sqlalchemy.orm.exc.NoResultFound:
#default does not exist yet, so add it...
sess.add(desired_default)
else:
#default already exists. Make sure the values are what we want...
assert isinstance(existing_default, Template)
existing_default.name = desired_default.name
existing_default.template = desired_default.template
existing_default.description = desired_default.description
sess.flush()
Is there a better or less verbose way of doing this? Something like this would be great:
sess.upsert_this(desired_default, unique_key = "name")
although the unique_key kwarg is obviously unnecessary (the ORM should be able to easily figure this out) I added it just because SQLAlchemy tends to only work with the primary key. eg: I've been looking at whether Session.merge would be applicable, but this works only on primary key, which in this case is an autoincrementing id which is not terribly useful for this purpose.
A sample use case for this is simply when starting up a server application that may have upgraded its default expected data. ie: no concurrency concerns for this upsert.
SQLAlchemy supports ON CONFLICT with two methods on_conflict_do_update() and on_conflict_do_nothing().
Copying from the documentation:
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(user_email='a#b.com', data='inserted data')
stmt = stmt.on_conflict_do_update(
index_elements=[my_table.c.user_email],
index_where=my_table.c.user_email.like('%#gmail.com'),
set_=dict(data=stmt.excluded.data)
)
conn.execute(stmt)
SQLAlchemy does have a "save-or-update" behavior, which in recent versions has been built into session.add, but previously was the separate session.saveorupdate call. This is not an "upsert" but it may be good enough for your needs.
It is good that you are asking about a class with multiple unique keys; I believe this is precisely the reason there is no single correct way to do this. The primary key is also a unique key. If there were no unique constraints, only the primary key, it would be a simple enough problem: if nothing with the given ID exists, or if ID is None, create a new record; else update all other fields in the existing record with that primary key.
However, when there are additional unique constraints, there are logical issues with that simple approach. If you want to "upsert" an object, and the primary key of your object matches an existing record, but another unique column matches a different record, then what do you do? Similarly, if the primary key matches no existing record, but another unique column does match an existing record, then what? There may be a correct answer for your particular situation, but in general I would argue there is no single correct answer.
That would be the reason there is no built in "upsert" operation. The application must define what this means in each particular case.
Nowadays, SQLAlchemy provides two helpful functions on_conflict_do_nothing and on_conflict_do_update. Those functions are useful but require you to swich from the ORM interface to the lower-level one - SQLAlchemy Core.
Although those two functions make upserting using SQLAlchemy's syntax not that difficult, these functions are far from providing a complete out-of-the-box solution to upserting.
My common use case is to upsert a big chunk of rows in a single SQL query/session execution. I usually encounter two problems with upserting:
For example, higher level ORM functionalities we've gotten used to are missing. You cannot use ORM objects but instead have to provide ForeignKeys at the time of insertion.
I'm using this following function I wrote to handle both of those issues:
def upsert(session, model, rows):
table = model.__table__
stmt = postgresql.insert(table)
primary_keys = [key.name for key in inspect(table).primary_key]
update_dict = {c.name: c for c in stmt.excluded if not c.primary_key}
if not update_dict:
raise ValueError("insert_or_update resulted in an empty update_dict")
stmt = stmt.on_conflict_do_update(index_elements=primary_keys,
set_=update_dict)
seen = set()
foreign_keys = {col.name: list(col.foreign_keys)[0].column for col in table.columns if col.foreign_keys}
unique_constraints = [c for c in table.constraints if isinstance(c, UniqueConstraint)]
def handle_foreignkeys_constraints(row):
for c_name, c_value in foreign_keys.items():
foreign_obj = row.pop(c_value.table.name, None)
row[c_name] = getattr(foreign_obj, c_value.name) if foreign_obj else None
for const in unique_constraints:
unique = tuple([const,] + [row[col.name] for col in const.columns])
if unique in seen:
return None
seen.add(unique)
return row
rows = list(filter(None, (handle_foreignkeys_constraints(row) for row in rows)))
session.execute(stmt, rows)
I use a "look before you leap" approach:
# first get the object from the database if it exists
# we're guaranteed to only get one or zero results
# because we're filtering by primary key
switch_command = session.query(Switch_Command).\
filter(Switch_Command.switch_id == switch.id).\
filter(Switch_Command.command_id == command.id).first()
# If we didn't get anything, make one
if not switch_command:
switch_command = Switch_Command(switch_id=switch.id, command_id=command.id)
# update the stuff we care about
switch_command.output = 'Hooray!'
switch_command.lastseen = datetime.datetime.utcnow()
session.add(switch_command)
# This will generate either an INSERT or UPDATE
# depending on whether we have a new object or not
session.commit()
The advantage is that this is db-neutral and I think it's clear to read. The disadvantage is that there's a potential race condition in a scenario like the following:
we query the db for a switch_command and don't find one
we create a switch_command
another process or thread creates a switch_command with the same primary key as ours
we try to commit our switch_command
There are multiple answers and here comes yet another answer (YAA). Other answers are not that readable due to the metaprogramming involved. Here is an example that
Uses SQLAlchemy ORM
Shows how to create a row if there are zero rows using on_conflict_do_nothing
Shows how to update the existing row (if any) without creating a new row using on_conflict_do_update
Uses the table primary key as the constraint
A longer example in the original question what this code is related to.
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy import text
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.orm import Session
class PairState(Base):
__tablename__ = "pair_state"
# This table has 1-to-1 relationship with Pair
pair_id = sa.Column(sa.ForeignKey("pair.id"), nullable=False, primary_key=True, unique=True)
pair = orm.relationship(Pair,
backref=orm.backref("pair_state",
lazy="dynamic",
cascade="all, delete-orphan",
single_parent=True, ), )
# First raw event in data stream
first_event_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=False, server_default=text("TO_TIMESTAMP(0)"))
# Last raw event in data stream
last_event_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=False, server_default=text("TO_TIMESTAMP(0)"))
# The last hypertable entry added
last_interval_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=False, server_default=text("TO_TIMESTAMP(0)"))
#staticmethod
def create_first_event_if_not_exist(dbsession: Session, pair_id: int, ts: datetime.datetime):
"""Sets the first event value if not exist yet."""
dbsession.execute(
insert(PairState).
values(pair_id=pair_id, first_event_at=ts).
on_conflict_do_nothing()
)
#staticmethod
def update_last_event(dbsession: Session, pair_id: int, ts: datetime.datetime):
"""Replaces the the column last_event_at for a named pair."""
# Based on the original example of https://stackoverflow.com/a/49917004/315168
dbsession.execute(
insert(PairState).
values(pair_id=pair_id, last_event_at=ts).
on_conflict_do_update(constraint=PairState.__table__.primary_key, set_={"last_event_at": ts})
)
#staticmethod
def update_last_interval(dbsession: Session, pair_id: int, ts: datetime.datetime):
"""Replaces the the column last_interval_at for a named pair."""
dbsession.execute(
insert(PairState).
values(pair_id=pair_id, last_interval_at=ts).
on_conflict_do_update(constraint=PairState.__table__.primary_key, set_={"last_interval_at": ts})
)
The below works fine for me with redshift database and will also work for combined primary key constraint.
SOURCE : this
Just few modifications required for creating SQLAlchemy engine in the function
def start_engine()
from sqlalchemy import Column, Integer, Date ,Metadata
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects import postgresql
Base = declarative_base()
def start_engine():
engine = create_engine(os.getenv('SQLALCHEMY_URI',
'postgresql://localhost:5432/upsert'))
connect = engine.connect()
meta = MetaData(bind=engine)
meta.reflect(bind=engine)
return engine
class DigitalSpend(Base):
__tablename__ = 'digital_spend'
report_date = Column(Date, nullable=False)
day = Column(Date, nullable=False, primary_key=True)
impressions = Column(Integer)
conversions = Column(Integer)
def __repr__(self):
return str([getattr(self, c.name, None) for c in self.__table__.c])
def compile_query(query):
compiler = query.compile if not hasattr(query, 'statement') else
query.statement.compile
return compiler(dialect=postgresql.dialect())
def upsert(session, model, rows, as_of_date_col='report_date', no_update_cols=[]):
table = model.__table__
stmt = insert(table).values(rows)
update_cols = [c.name for c in table.c
if c not in list(table.primary_key.columns)
and c.name not in no_update_cols]
on_conflict_stmt = stmt.on_conflict_do_update(
index_elements=table.primary_key.columns,
set_={k: getattr(stmt.excluded, k) for k in update_cols},
index_where=(getattr(model, as_of_date_col) < getattr(stmt.excluded, as_of_date_col))
)
print(compile_query(on_conflict_stmt))
session.execute(on_conflict_stmt)
session = start_engine()
upsert(session, DigitalSpend, initial_rows, no_update_cols=['conversions'])
This allows access to the underlying models based on string names
def get_class_by_tablename(tablename):
"""Return class reference mapped to table.
https://stackoverflow.com/questions/11668355/sqlalchemy-get-model-from-table-name-this-may-imply-appending-some-function-to
:param tablename: String with name of table.
:return: Class reference or None.
"""
for c in Base._decl_class_registry.values():
if hasattr(c, '__tablename__') and c.__tablename__ == tablename:
return c
sqla_tbl = get_class_by_tablename(table_name)
def handle_upsert(record_dict, table):
"""
handles updates when there are primary key conflicts
"""
try:
self.active_session().add(table(**record_dict))
except:
# Here we'll assume the error is caused by an integrity error
# We do this because the error classes are passed from the
# underlying package (pyodbc / sqllite) SQLAlchemy doesn't mask
# them with it's own code - this should be updated to have
# explicit error handling for each new db engine
# <update>add explicit error handling for each db engine</update>
active_session.rollback()
# Query for conflic class, use update method to change values based on dict
c_tbl_primary_keys = [i.name for i in table.__table__.primary_key] # List of primary key col names
c_tbl_cols = dict(sqla_tbl.__table__.columns) # String:Col Object crosswalk
c_query_dict = {k:record_dict[k] for k in c_tbl_primary_keys if k in record_dict} # sub-dict from data of primary key:values
c_oo_query_dict = {c_tbl_cols[k]:v for (k,v) in c_query_dict.items()} # col-object:query value for primary key cols
c_target_record = session.query(sqla_tbl).filter(*[k==v for (k,v) in oo_query_dict.items()]).first()
# apply new data values to the existing record
for k, v in record_dict.items()
setattr(c_target_record, k, v)
This works for me with sqlite3 and postgres. Albeit it might fail with combined primary key constraints and will most likely fail with additional unique constraints.
try:
t = self._meta.tables[data['table']]
except KeyError:
self._log.error('table "%s" unknown', data['table'])
return
try:
q = insert(t, values=data['values'])
self._log.debug(q)
self._db.execute(q)
except IntegrityError:
self._log.warning('integrity error')
where_clause = [c.__eq__(data['values'][c.name]) for c in t.c if c.primary_key]
update_dict = {c.name: data['values'][c.name] for c in t.c if not c.primary_key}
q = update(t, values=update_dict).where(*where_clause)
self._log.debug(q)
self._db.execute(q)
except Exception as e:
self._log.error('%s: %s', t.name, e)
As we had problems with generated default-ids and references which lead to ForeignKeyViolation-Errors like
update or delete on table "..." violates foreign key constraint
Key (id)=(...) is still referenced from table "...".
we had to exclude the id for the update dict, as otherwise the it will be always generated as new default value.
In addition the method is returning the created/updated entity.
from sqlalchemy.dialects.postgresql import insert # Important to use the postgresql insert
def upsert(session, data, key_columns, model):
stmt = insert(model).values(data)
# Important to exclude the ID for update!
exclude_for_update = [model.id.name, *key_columns]
update_dict = {c.name: c for c in stmt.excluded if c.name not in exclude_for_update}
stmt = stmt.on_conflict_do_update(
index_elements=key_columns,
set_=update_dict
).returning(model)
orm_stmt = (
select(model)
.from_statement(stmt)
.execution_options(populate_existing=True)
)
return session.execute(orm_stmt).scalar()
Example:
class UpsertUser(Base):
__tablename__ = 'upsert_user'
id = Column(Id, primary_key=True, default=uuid.uuid4)
name: str = Column(sa.String, nullable=False)
user_sid: str = Column(sa.String, nullable=False, unique=True)
house_admin = relationship('UpsertHouse', back_populates='admin', uselist=False)
class UpsertHouse(Base):
__tablename__ = 'upsert_house'
id = Column(Id, primary_key=True, default=uuid.uuid4)
admin_id: Id = Column(Id, ForeignKey('upsert_user.id'), nullable=False)
admin: UpsertUser = relationship('UpsertUser', back_populates='house_admin', uselist=False)
# Usage
upserted_user = upsert(session, updated_user, [UpsertUser.user_sid.name], UpsertUser)
Note: Only tested on postgresql but could work also for other DBs which support ON DUPLICATE KEY UPDATE e.g. MySQL
In case of sqlite, the sqlite_on_conflict='REPLACE' option can be used when defining a UniqueConstraint, and sqlite_on_conflict_unique for unique constraint on a single column. Then session.add will work in a way just like upsert. See the official documentation.
I use this code for upsert
Before using this code, you should add primary keys to table in database.
from sqlalchemy import create_engine
from sqlalchemy import MetaData, Table
from sqlalchemy.inspection import inspect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.dialects.postgresql import insert
def upsert(df, engine, table_name, schema=None, chunk_size = 1000):
metadata = MetaData(schema=schema)
metadata.bind = engine
table = Table(table_name, metadata, schema=schema, autoload=True)
# olny use common columns between df and table.
table_columns = {column.name for column in table.columns}
df_columns = set(df.columns)
intersection_columns = table_columns.intersection(df_columns)
df1 = df[intersection_columns]
records = df1.to_dict('records')
# get list of fields making up primary key
primary_keys = [key.name for key in inspect(table).primary_key]
with engine.connect() as conn:
chunks = [records[i:i + chunk_size] for i in range(0, len(records), chunk_size)]
for chunk in chunks:
stmt = insert(table).values(chunk)
update_dict = {c.name: c for c in stmt.excluded if not c.primary_key}
s = stmt.on_conflict_do_update(
index_elements= primary_keys,
set_=update_dict)
conn.execute(s)

Upsert / Replace with Flask, SqlAlchemy and PostgreSQL [duplicate]

I have a record that I want to exist in the database if it is not there, and if it is there already (primary key exists) I want the fields to be updated to the current state. This is often called an upsert.
The following incomplete code snippet demonstrates what will work, but it seems excessively clunky (especially if there were a lot more columns). What is the better/best way?
Base = declarative_base()
class Template(Base):
__tablename__ = 'templates'
id = Column(Integer, primary_key = True)
name = Column(String(80), unique = True, index = True)
template = Column(String(80), unique = True)
description = Column(String(200))
def __init__(self, Name, Template, Desc):
self.name = Name
self.template = Template
self.description = Desc
def UpsertDefaultTemplate():
sess = Session()
desired_default = Template("default", "AABBCC", "This is the default template")
try:
q = sess.query(Template).filter_by(name = desiredDefault.name)
existing_default = q.one()
except sqlalchemy.orm.exc.NoResultFound:
#default does not exist yet, so add it...
sess.add(desired_default)
else:
#default already exists. Make sure the values are what we want...
assert isinstance(existing_default, Template)
existing_default.name = desired_default.name
existing_default.template = desired_default.template
existing_default.description = desired_default.description
sess.flush()
Is there a better or less verbose way of doing this? Something like this would be great:
sess.upsert_this(desired_default, unique_key = "name")
although the unique_key kwarg is obviously unnecessary (the ORM should be able to easily figure this out) I added it just because SQLAlchemy tends to only work with the primary key. eg: I've been looking at whether Session.merge would be applicable, but this works only on primary key, which in this case is an autoincrementing id which is not terribly useful for this purpose.
A sample use case for this is simply when starting up a server application that may have upgraded its default expected data. ie: no concurrency concerns for this upsert.
SQLAlchemy supports ON CONFLICT with two methods on_conflict_do_update() and on_conflict_do_nothing().
Copying from the documentation:
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(user_email='a#b.com', data='inserted data')
stmt = stmt.on_conflict_do_update(
index_elements=[my_table.c.user_email],
index_where=my_table.c.user_email.like('%#gmail.com'),
set_=dict(data=stmt.excluded.data)
)
conn.execute(stmt)
SQLAlchemy does have a "save-or-update" behavior, which in recent versions has been built into session.add, but previously was the separate session.saveorupdate call. This is not an "upsert" but it may be good enough for your needs.
It is good that you are asking about a class with multiple unique keys; I believe this is precisely the reason there is no single correct way to do this. The primary key is also a unique key. If there were no unique constraints, only the primary key, it would be a simple enough problem: if nothing with the given ID exists, or if ID is None, create a new record; else update all other fields in the existing record with that primary key.
However, when there are additional unique constraints, there are logical issues with that simple approach. If you want to "upsert" an object, and the primary key of your object matches an existing record, but another unique column matches a different record, then what do you do? Similarly, if the primary key matches no existing record, but another unique column does match an existing record, then what? There may be a correct answer for your particular situation, but in general I would argue there is no single correct answer.
That would be the reason there is no built in "upsert" operation. The application must define what this means in each particular case.
Nowadays, SQLAlchemy provides two helpful functions on_conflict_do_nothing and on_conflict_do_update. Those functions are useful but require you to swich from the ORM interface to the lower-level one - SQLAlchemy Core.
Although those two functions make upserting using SQLAlchemy's syntax not that difficult, these functions are far from providing a complete out-of-the-box solution to upserting.
My common use case is to upsert a big chunk of rows in a single SQL query/session execution. I usually encounter two problems with upserting:
For example, higher level ORM functionalities we've gotten used to are missing. You cannot use ORM objects but instead have to provide ForeignKeys at the time of insertion.
I'm using this following function I wrote to handle both of those issues:
def upsert(session, model, rows):
table = model.__table__
stmt = postgresql.insert(table)
primary_keys = [key.name for key in inspect(table).primary_key]
update_dict = {c.name: c for c in stmt.excluded if not c.primary_key}
if not update_dict:
raise ValueError("insert_or_update resulted in an empty update_dict")
stmt = stmt.on_conflict_do_update(index_elements=primary_keys,
set_=update_dict)
seen = set()
foreign_keys = {col.name: list(col.foreign_keys)[0].column for col in table.columns if col.foreign_keys}
unique_constraints = [c for c in table.constraints if isinstance(c, UniqueConstraint)]
def handle_foreignkeys_constraints(row):
for c_name, c_value in foreign_keys.items():
foreign_obj = row.pop(c_value.table.name, None)
row[c_name] = getattr(foreign_obj, c_value.name) if foreign_obj else None
for const in unique_constraints:
unique = tuple([const,] + [row[col.name] for col in const.columns])
if unique in seen:
return None
seen.add(unique)
return row
rows = list(filter(None, (handle_foreignkeys_constraints(row) for row in rows)))
session.execute(stmt, rows)
I use a "look before you leap" approach:
# first get the object from the database if it exists
# we're guaranteed to only get one or zero results
# because we're filtering by primary key
switch_command = session.query(Switch_Command).\
filter(Switch_Command.switch_id == switch.id).\
filter(Switch_Command.command_id == command.id).first()
# If we didn't get anything, make one
if not switch_command:
switch_command = Switch_Command(switch_id=switch.id, command_id=command.id)
# update the stuff we care about
switch_command.output = 'Hooray!'
switch_command.lastseen = datetime.datetime.utcnow()
session.add(switch_command)
# This will generate either an INSERT or UPDATE
# depending on whether we have a new object or not
session.commit()
The advantage is that this is db-neutral and I think it's clear to read. The disadvantage is that there's a potential race condition in a scenario like the following:
we query the db for a switch_command and don't find one
we create a switch_command
another process or thread creates a switch_command with the same primary key as ours
we try to commit our switch_command
There are multiple answers and here comes yet another answer (YAA). Other answers are not that readable due to the metaprogramming involved. Here is an example that
Uses SQLAlchemy ORM
Shows how to create a row if there are zero rows using on_conflict_do_nothing
Shows how to update the existing row (if any) without creating a new row using on_conflict_do_update
Uses the table primary key as the constraint
A longer example in the original question what this code is related to.
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy import text
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.orm import Session
class PairState(Base):
__tablename__ = "pair_state"
# This table has 1-to-1 relationship with Pair
pair_id = sa.Column(sa.ForeignKey("pair.id"), nullable=False, primary_key=True, unique=True)
pair = orm.relationship(Pair,
backref=orm.backref("pair_state",
lazy="dynamic",
cascade="all, delete-orphan",
single_parent=True, ), )
# First raw event in data stream
first_event_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=False, server_default=text("TO_TIMESTAMP(0)"))
# Last raw event in data stream
last_event_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=False, server_default=text("TO_TIMESTAMP(0)"))
# The last hypertable entry added
last_interval_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=False, server_default=text("TO_TIMESTAMP(0)"))
#staticmethod
def create_first_event_if_not_exist(dbsession: Session, pair_id: int, ts: datetime.datetime):
"""Sets the first event value if not exist yet."""
dbsession.execute(
insert(PairState).
values(pair_id=pair_id, first_event_at=ts).
on_conflict_do_nothing()
)
#staticmethod
def update_last_event(dbsession: Session, pair_id: int, ts: datetime.datetime):
"""Replaces the the column last_event_at for a named pair."""
# Based on the original example of https://stackoverflow.com/a/49917004/315168
dbsession.execute(
insert(PairState).
values(pair_id=pair_id, last_event_at=ts).
on_conflict_do_update(constraint=PairState.__table__.primary_key, set_={"last_event_at": ts})
)
#staticmethod
def update_last_interval(dbsession: Session, pair_id: int, ts: datetime.datetime):
"""Replaces the the column last_interval_at for a named pair."""
dbsession.execute(
insert(PairState).
values(pair_id=pair_id, last_interval_at=ts).
on_conflict_do_update(constraint=PairState.__table__.primary_key, set_={"last_interval_at": ts})
)
The below works fine for me with redshift database and will also work for combined primary key constraint.
SOURCE : this
Just few modifications required for creating SQLAlchemy engine in the function
def start_engine()
from sqlalchemy import Column, Integer, Date ,Metadata
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects import postgresql
Base = declarative_base()
def start_engine():
engine = create_engine(os.getenv('SQLALCHEMY_URI',
'postgresql://localhost:5432/upsert'))
connect = engine.connect()
meta = MetaData(bind=engine)
meta.reflect(bind=engine)
return engine
class DigitalSpend(Base):
__tablename__ = 'digital_spend'
report_date = Column(Date, nullable=False)
day = Column(Date, nullable=False, primary_key=True)
impressions = Column(Integer)
conversions = Column(Integer)
def __repr__(self):
return str([getattr(self, c.name, None) for c in self.__table__.c])
def compile_query(query):
compiler = query.compile if not hasattr(query, 'statement') else
query.statement.compile
return compiler(dialect=postgresql.dialect())
def upsert(session, model, rows, as_of_date_col='report_date', no_update_cols=[]):
table = model.__table__
stmt = insert(table).values(rows)
update_cols = [c.name for c in table.c
if c not in list(table.primary_key.columns)
and c.name not in no_update_cols]
on_conflict_stmt = stmt.on_conflict_do_update(
index_elements=table.primary_key.columns,
set_={k: getattr(stmt.excluded, k) for k in update_cols},
index_where=(getattr(model, as_of_date_col) < getattr(stmt.excluded, as_of_date_col))
)
print(compile_query(on_conflict_stmt))
session.execute(on_conflict_stmt)
session = start_engine()
upsert(session, DigitalSpend, initial_rows, no_update_cols=['conversions'])
This allows access to the underlying models based on string names
def get_class_by_tablename(tablename):
"""Return class reference mapped to table.
https://stackoverflow.com/questions/11668355/sqlalchemy-get-model-from-table-name-this-may-imply-appending-some-function-to
:param tablename: String with name of table.
:return: Class reference or None.
"""
for c in Base._decl_class_registry.values():
if hasattr(c, '__tablename__') and c.__tablename__ == tablename:
return c
sqla_tbl = get_class_by_tablename(table_name)
def handle_upsert(record_dict, table):
"""
handles updates when there are primary key conflicts
"""
try:
self.active_session().add(table(**record_dict))
except:
# Here we'll assume the error is caused by an integrity error
# We do this because the error classes are passed from the
# underlying package (pyodbc / sqllite) SQLAlchemy doesn't mask
# them with it's own code - this should be updated to have
# explicit error handling for each new db engine
# <update>add explicit error handling for each db engine</update>
active_session.rollback()
# Query for conflic class, use update method to change values based on dict
c_tbl_primary_keys = [i.name for i in table.__table__.primary_key] # List of primary key col names
c_tbl_cols = dict(sqla_tbl.__table__.columns) # String:Col Object crosswalk
c_query_dict = {k:record_dict[k] for k in c_tbl_primary_keys if k in record_dict} # sub-dict from data of primary key:values
c_oo_query_dict = {c_tbl_cols[k]:v for (k,v) in c_query_dict.items()} # col-object:query value for primary key cols
c_target_record = session.query(sqla_tbl).filter(*[k==v for (k,v) in oo_query_dict.items()]).first()
# apply new data values to the existing record
for k, v in record_dict.items()
setattr(c_target_record, k, v)
This works for me with sqlite3 and postgres. Albeit it might fail with combined primary key constraints and will most likely fail with additional unique constraints.
try:
t = self._meta.tables[data['table']]
except KeyError:
self._log.error('table "%s" unknown', data['table'])
return
try:
q = insert(t, values=data['values'])
self._log.debug(q)
self._db.execute(q)
except IntegrityError:
self._log.warning('integrity error')
where_clause = [c.__eq__(data['values'][c.name]) for c in t.c if c.primary_key]
update_dict = {c.name: data['values'][c.name] for c in t.c if not c.primary_key}
q = update(t, values=update_dict).where(*where_clause)
self._log.debug(q)
self._db.execute(q)
except Exception as e:
self._log.error('%s: %s', t.name, e)
As we had problems with generated default-ids and references which lead to ForeignKeyViolation-Errors like
update or delete on table "..." violates foreign key constraint
Key (id)=(...) is still referenced from table "...".
we had to exclude the id for the update dict, as otherwise the it will be always generated as new default value.
In addition the method is returning the created/updated entity.
from sqlalchemy.dialects.postgresql import insert # Important to use the postgresql insert
def upsert(session, data, key_columns, model):
stmt = insert(model).values(data)
# Important to exclude the ID for update!
exclude_for_update = [model.id.name, *key_columns]
update_dict = {c.name: c for c in stmt.excluded if c.name not in exclude_for_update}
stmt = stmt.on_conflict_do_update(
index_elements=key_columns,
set_=update_dict
).returning(model)
orm_stmt = (
select(model)
.from_statement(stmt)
.execution_options(populate_existing=True)
)
return session.execute(orm_stmt).scalar()
Example:
class UpsertUser(Base):
__tablename__ = 'upsert_user'
id = Column(Id, primary_key=True, default=uuid.uuid4)
name: str = Column(sa.String, nullable=False)
user_sid: str = Column(sa.String, nullable=False, unique=True)
house_admin = relationship('UpsertHouse', back_populates='admin', uselist=False)
class UpsertHouse(Base):
__tablename__ = 'upsert_house'
id = Column(Id, primary_key=True, default=uuid.uuid4)
admin_id: Id = Column(Id, ForeignKey('upsert_user.id'), nullable=False)
admin: UpsertUser = relationship('UpsertUser', back_populates='house_admin', uselist=False)
# Usage
upserted_user = upsert(session, updated_user, [UpsertUser.user_sid.name], UpsertUser)
Note: Only tested on postgresql but could work also for other DBs which support ON DUPLICATE KEY UPDATE e.g. MySQL
In case of sqlite, the sqlite_on_conflict='REPLACE' option can be used when defining a UniqueConstraint, and sqlite_on_conflict_unique for unique constraint on a single column. Then session.add will work in a way just like upsert. See the official documentation.
I use this code for upsert
Before using this code, you should add primary keys to table in database.
from sqlalchemy import create_engine
from sqlalchemy import MetaData, Table
from sqlalchemy.inspection import inspect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.dialects.postgresql import insert
def upsert(df, engine, table_name, schema=None, chunk_size = 1000):
metadata = MetaData(schema=schema)
metadata.bind = engine
table = Table(table_name, metadata, schema=schema, autoload=True)
# olny use common columns between df and table.
table_columns = {column.name for column in table.columns}
df_columns = set(df.columns)
intersection_columns = table_columns.intersection(df_columns)
df1 = df[intersection_columns]
records = df1.to_dict('records')
# get list of fields making up primary key
primary_keys = [key.name for key in inspect(table).primary_key]
with engine.connect() as conn:
chunks = [records[i:i + chunk_size] for i in range(0, len(records), chunk_size)]
for chunk in chunks:
stmt = insert(table).values(chunk)
update_dict = {c.name: c for c in stmt.excluded if not c.primary_key}
s = stmt.on_conflict_do_update(
index_elements= primary_keys,
set_=update_dict)
conn.execute(s)

Dynamically setting __tablename__ for sharding in SQLAlchemy?

In order to handle a growing database table, we are sharding on table name. So we could have database tables that are named like this:
table_md5one
table_md5two
table_md5three
All tables have the exact same schema.
How do we use SQLAlchemy and dynamically specify the tablename for the class that corresponds to this? Looks like the declarative_base() classes need to have tablename pre-specified.
There will eventually be too many tables to manually specify derived classes from a parent/base class. We want to be able to build a class that can have the tablename set up dynamically (maybe passed as a parameter to a function.)
OK, we went with the custom SQLAlchemy declaration rather than the declarative one.
So we create a dynamic table object like this:
from sqlalchemy import MetaData, Table, Column
def get_table_object(self, md5hash):
metadata = MetaData()
table_name = 'table_' + md5hash
table_object = Table(table_name, metadata,
Column('Column1', DATE, nullable=False),
Column('Column2', DATE, nullable=False)
)
clear_mappers()
mapper(ActualTableObject, table_object)
return ActualTableObject
Where ActualTableObject is the class mapping to the table.
In Augmenting the Base you find a way of using a custom Base class that can, for example, calculate the __tablename__ attribure dynamically:
class Base(object):
#declared_attr
def __tablename__(cls):
return cls.__name__.lower()
The only problem here is that I don't know where your hash comes from, but this should give a good starting point.
If you require this algorithm not for all your tables but only for one you could just use the declared_attr on the table you are interested in sharding.
Because I insist to use declarative classes with their __tablename__ dynamically specified by given parameter, after days of failing with other solutions and hours of studying SQLAlchemy internals, I come up with the following solution that I believe is simple, elegant and race-condition free.
def get_model(suffix):
DynamicBase = declarative_base(class_registry=dict())
class MyModel(DynamicBase):
__tablename__ = 'table_{suffix}'.format(suffix=suffix)
id = Column(Integer, primary_key=True)
name = Column(String)
...
return MyModel
Since they have their own class_registry, you will not get that warning saying:
This declarative base already contains a class with the same class name and module name as mypackage.models.MyModel, and will be replaced in the string-lookup table.
Hence, you will not be able to reference them from other models with string lookup. However, it works perfectly fine to use these on-the-fly declared models for foreign keys as well:
ParentModel1 = get_model(123)
ParentModel2 = get_model(456)
class MyChildModel(BaseModel):
__tablename__ = 'table_child'
id = Column(Integer, primary_key=True)
name = Column(String)
parent_1_id = Column(Integer, ForeignKey(ParentModel1.id))
parent_2_id = Column(Integer, ForeignKey(ParentModel2.id))
parent_1 = relationship(ParentModel1)
parent_2 = relationship(ParentModel2)
If you only use them to query/insert/update/delete without any reference left such as foreign key reference from another table, they, their base classes and also their class_registry will be garbage collected, so no trace will be left.
you can write a function with tablename parameter and send back the class with setting appropriate attributes.
def get_class(table_name):
class GenericTable(Base):
__tablename__ = table_name
ID= Column(types.Integer, primary_key=True)
def funcation(self):
......
return GenericTable
Then you can create a table using:
get_class("test").__table__.create(bind=engine) # See sqlachemy.engine
Try this
import zlib
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, BigInteger, DateTime, String
from datetime import datetime
BASE = declarative_base()
ENTITY_CLASS_DICT = {}
class AbsShardingClass(BASE):
__abstract__ = True
def get_class_name_and_table_name(hashid):
return 'ShardingClass%s' % hashid, 'sharding_class_%s' % hashid
def get_sharding_entity_class(hashid):
"""
#param hashid: hashid
#type hashid: int
#rtype AbsClientUserAuth
"""
if hashid not in ENTITY_CLASS_DICT:
class_name, table_name = get_class_name_and_table_name(hashid)
cls = type(class_name, (AbsShardingClass,),
{'__tablename__': table_name})
ENTITY_CLASS_DICT[hashid] = cls
return ENTITY_CLASS_DICT[hashid]
cls = get_sharding_entity_class(1)
print session.query(cls).get(100)
Instead of using imperative creating Table object, you can use usual declarative_base and make a closure to set a table name as the following:
def make_class(Base, table_name):
class User(Base):
__tablename__ = table_name
id = Column(Integer, primary_key=True)
name= Column(String)
return User
Base = declarative_base()
engine = make_engine()
custom_named_usertable = make_class(Base, 'custom_name')
Base.metadata.create_all(engine)
session = make_session(engine)
new_user = custom_named_usertable(name='Adam')
session.add(new_user)
session.commit()
session.close()
engine.dispose()
just you need to create class object for Base.
from sqlalchemy.ext.declarative import declarative_base, declared_attr
class Base(object):
#declared_attr
def __tablename__(cls):
return cls.__name.lower()
Base = declarative_base(cls=Base)

Can the same #property present both scalar and collection behavior in SQLAlchemy?

I'm converting a library to use SQLAlchemy as the datastore. I like the flexibility of the PickleType column, but it doesn't seem to work well when pickling SA objects (table rows). Even if I overload setstate and getstate to do a query + session merge when unpickling, there's no referential integrity across that pickle boundary. That means that I can't query collections of objects.
class Bar(Base):
id = Column(Integer, primary_key=True)
__tablename__ = 'bars'
foo_id = Column(Integer, ForeignKey('foos.id'), primary_key=True)
class Foo(Base):
__tablename__ = 'foos'
values = Column(PickleType)
#values = relationship(Bar) # list interface (one->many), but can't assign a scalar or use a dictionary
def __init__(self):
self.values = [Bar(), Bar()]
# only allowed with PickleType column
#self.values = Bar()
#self.values = {'one' : Bar()}
#self.values = [ [Bar(), Bar()], [Bar(), Bar()]]
# get all Foo's with a Bar whose id=1
session.query(Foo).filter(Foo.values.any(Bar.id == 1)).all()
One workaround would be to implement my own mutable object type as is done here. I'm imagining having some kind of flattening scheme which traverses the collections and appends them to a simpler one->many relationship. Perhaps the flattened list might have to be weakrefs to the pickled collection's objects?
Tracking changes and references sounds like no fun and I can't find any examples of people pickling SA rows anywhere else (perhaps indicative of bad design on my part?). Any advice?
EDIT 1:
After some discussion I've simplified the request. I'm looking for a single property that can behave as either a scalar or a collection. Here is my (failing) attempt:
from sqlalchemy import MetaData, Column, Integer, PickleType, String, ForeignKey, create_engine
from sqlalchemy.orm import relationship, Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.collections import attribute_mapped_collection
# from http://www.sqlalchemy.org/trac/browser/examples/vertical
from sqlalchemy_examples.vertical import dictlike_polymorphic as dictlike
metadata = MetaData()
Base = declarative_base()
engine = create_engine('sqlite://', echo=True)
Base.metadata.bind = engine
session = Session(engine)
class AnimalFact(dictlike.PolymorphicVerticalProperty, Base):
"""key/value attribute whose value can be one of several types"""
__tablename__ = 'animalfacts'
type_map = {#str: ('string', 'str_value'),
list: ('list', 'list_value'),
tuple: ('tuple', 'tuple_value')}
id = Column(Integer, primary_key=True)
animal_id = Column(Integer, ForeignKey('animal.id'), primary_key=True)
key = Column(String, primary_key=True)
type = Column(String)
#str_value = Column(String)
list_value = relationship('StringEntry')
tuple_value = relationship('StringEntry2')
class Animal(Base, dictlike.VerticalPropertyDictMixin):
__tablename__ = 'animal'
_property_type = AnimalFact
_property_mapping = 'facts'
id = Column(Integer, primary_key=True)
name = Column(String)
facts = relationship(AnimalFact, backref='animal',
collection_class=attribute_mapped_collection('key'))
def __init__(self, name):
self.name = name
class StringEntry(Base):
__tablename__ = 'stringentry'
id = Column(Integer, primary_key=True)
animalfacts_id = Column(Integer, ForeignKey('animalfacts.id'))
value = Column(String)
def __init__(self, value):
self.value = value
class StringEntry2(Base):
__tablename__ = 'stringentry2'
id = Column(Integer, primary_key=True)
animalfacts_id = Column(Integer, ForeignKey('animalfacts.id'))
value = Column(String)
def __init__(self, value):
self.value = value
Base.metadata.create_all()
a = Animal('aardvark')
a['eyes'] = [StringEntry('left side'), StringEntry('right side')] # works great
a['eyes'] = (StringEntry2('left side'), StringEntry2('right side')) # works great
#a['cute'] = 'sort of' # failure
The PickleType is really a hacky way around edge cases where you have some arbitrary object you'd just like to shove away. It's a given that when you use PickleType, you're giving up any relational advantages, including being able to filter/query on them, etc.
So putting an ORM mapped object in a Pickle is basically a terrible idea.
If you want a collection of scalar values, use traditional mappings and relationship() in combination with association_proxy. See http://docs.sqlalchemy.org/en/rel_0_7/orm/extensions/associationproxy.html#simplifying-scalar-collections .
"or dictionaries". Use attribute_mapped_collection: http://docs.sqlalchemy.org/en/rel_0_7/orm/collections.html#dictionary-collections
"dictionaries plus scalars": combine both attribute_mapped_collection and association_proxy: http://docs.sqlalchemy.org/en/rel_0_7/orm/extensions/associationproxy.html#proxying-to-dictionary-based-collections
Edit 1:
Well, you dug into a really esoteric and complex example there. association_proxy is a much easier way to get around these cases where you want an object to act like a scalar, so here's that, without all that crazy boilerplate of the "vertical" example, which I'd avoid as it is really too complex. Your example seemed undecided about primary key style so I went with the composite version. Surrogate + composite can't be mixed in a single table (well it can, but its relationally incorrect. The key should be the smallest unit that identifies a row - http://en.wikipedia.org/wiki/Unique_key is a good top level read into various subjects regarding this).
from sqlalchemy import Integer, String, Column, create_engine, ForeignKey, ForeignKeyConstraint
from sqlalchemy.orm import relationship, Session
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.associationproxy import association_proxy
Base = declarative_base()
class AnimalFact(Base):
"""key/value attribute whose value can be either a string or a list of strings"""
__tablename__ = 'animalfacts'
# use either surrogate PK id, or the composite animal_id/key - but
# not both. id/animal_id/key all together is not a proper key.
# Personally I'd go for "id" here, but here's the composite version.
animal_id = Column(Integer, ForeignKey('animal.id'), primary_key=True)
key = Column(String, primary_key=True)
# data
str_value = Column(String)
_list_value = relationship('StringEntry')
# proxy list strings
list_proxy = association_proxy('_list_value', 'value')
def __init__(self, key, value):
self.key = key
self.value = value
#property
def value(self):
if self.str_value is not None:
return self.str_value
else:
return self.list_proxy
#value.setter
def value(self, value):
if isinstance(value, basestring):
self.str_value = value
elif isinstance(value, list):
self.list_proxy = value
else:
assert False
class Animal(Base):
__tablename__ = 'animal'
id = Column(Integer, primary_key=True)
name = Column(String)
_facts = relationship(AnimalFact, backref='animal',
collection_class=attribute_mapped_collection('key'))
facts = association_proxy('_facts', 'value')
def __init__(self, name):
self.name = name
# dictionary interface around "facts".
# I'd just use "animal.facts" here, but here's how to skip that.
def __getitem__(self, key):
return self.facts.__getitem__(key)
def __setitem__(self, key, value):
self.facts.__setitem__(key, value)
def __delitem__(self, key):
self.facts.__delitem__(key)
def __contains__(self, key):
return self.facts.__contains__(key)
def keys(self):
return self.facts.keys()
class StringEntry(Base):
__tablename__ = 'myvalue'
id = Column(Integer, primary_key=True)
animal_id = Column(Integer)
key = Column(Integer)
value = Column(String)
# because AnimalFact has a composite PK, we need
# a composite FK.
__table_args__ = (ForeignKeyConstraint(
['key', 'animal_id'],
['animalfacts.key', 'animalfacts.animal_id']),
)
def __init__(self, value):
self.value = value
engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
# create a new animal
a = Animal('aardvark')
a['eyes'] = ['left side', 'right side']
a['cute'] = 'sort of'
session.add(a)
session.commit()
session.close()
for animal in session.query(Animal):
print animal.name, ",".join(["%s" % animal[key] for key in animal.keys()])

SQLalchemy: Mapping columns into different properties

I am trying to store some simulation measurements (times and values) using sqlalchemy. Here are the relevant table definitions. If there is a more sensible table definition, I'd love to see it.
from sqlalchemy import create_engine, schema, orm
engine = create_engine('sqlite:///:memory:', echo=True)
metadata = schema.MetaData(bind=engine)
container_table = schema.Table('containers', metadata,
schema.Column('id', schema.types.Integer, primary_key=True))
measurement_table = schema.Table('measurements', metadata,
schema.Column('id', schema.types.Integer, primary_key=True),
schema.Column('container_id', schema.types.Integer,
schema.ForeignKey('containers.id')),
schema.Column('time', schema.types.Float),
schema.Column('value', schema.types.Float))
metadata.create_all()
The times will be unique for each container, and the below properties should be ordered by time.
I would like to be able to both read and assign these properties:
c = Container()
times = range(10)
values = [t**2 for t in times]
c.times = times
c.values = values
But I don't know how to do the mapping. I assume that if it's possible, it will look something like this:
class Container(object):
times = some_sort_of_proxy()
values = some_sort_of_proxy()
orm.mapper(Container, container_table, properties={
# Magic
})
How do I go about doing this? Is this a reasonable mapping, or do I need to have a different underlying table structure?
class EmailAddress(object):
#property
def email(self):
return self._email
#email.setter
def email(self, email):
self._email = email
mapper(EmailAddress, addresses_table, properties={
'_email': addresses_table.c.email
})

Categories

Resources