Generic way to get primary key from declaratively defined instance in SQLAlchemy - python

Does SQLAlchemy offer a generic way to get the primary key from a declaratively defined instance, so that if:
Base = declarative_base()
class MyClass(Base):
__tablename__ = 'mytable'
key = Column(Integer, primary_key=True)
I can do:
>>> a = MyClass(key=1)
>>> a.generic_get_primary_key() # <-- does it exist ??
1

You can use inspection for that purpose:
http://docs.sqlalchemy.org/en/latest/core/inspection.html
Passing an instance of a mapped object to inspect, returns an InstanceState, describing that object.
This state also contains the identity:
Base = declarative_base()
class MyClass(Base):
__tablename__ = 'mytable'
key = Column(Integer, primary_key=True)
a = MyClass(key=1)
from sqlalchemy.inspection import inspect
pk = inspect(a).identity
print pk
Will give:
(1,)
Since primary keys can consist of multiple columns, the identity in general is a tuple containing all the column values that are part of the primary key.
In your case, that's simply the key.

If you need to retrieve a list primary keys of a class not an instance the following describes how to do it.
You can use the inspect method. This will return an Inspect instance on which you can do your analysis as follows. In this example I use the Inspect instance to return to me all attribute names of each primary_key of MyClass.
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
Base = declarative_base()
class MyClass(Base):
__tablename__ = "myclass"
key = Column(Integer, primary_key=True)
name = Column(String, primary_key=True)
from sqlalchemy import inspect
ins = inspect(MyClass)
print("Tuple of primary keys: ", ins.primary_key)
# Let's loop over them and print the attribute key name of each
for x in ins.primary_key:
print(x.key)
Returns
> Tuple of primary keys: (Column('key', Integer(), table=<myclass>, primary_key=True, nullable=False), Column('name', String(), table=<myclass>, primary_key=True, nullable=False))
> key
> name

I did it by getting the primary key name and next, with getattr, passing the class instance and the name of primary key like this:enter image description here
from sqlalchemy.inspection import inspect
# I use this class for all my tables in sqlalchemy
class BaseTablesClass():
# First I get the name of the primary key, just the first value of array because
# a table can have more than a primary key
def primary_key_name(self):
return inspect(self).primary_key[0].name
# Next in this line I get the value of primary key by the name that I get with the
# method of primary key name
def primary_key_value(self):
return getattr(self, self.primary_key_name())

Use inspect function:
inspect(obj).identity
This will work include "transient" and "pending":
inspect(obj.__class__).primary_key_from_instance(obj)

Related

how to create a auto-generated value to snowflake using sqlalchemist?

I'm trying to create a db using sqlalchemist to connect with snowflake and alembic to migrations for an app created in FastAPI. I created some models and all works fine to create this one in snowflake for examples:
create or replace TABLE PRICE_SERVICE.FP7.LOCATION (
ID NUMBER(38,0) NOT NULL autoincrement,
CREATED_AT TIMESTAMP_NTZ(9),
UPDATED_AT TIMESTAMP_NTZ(9),
ADDRESS VARCHAR(16777216),
LATITUDE VARCHAR(16777216) NOT NULL,
LONGITUDE VARCHAR(16777216) NOT NULL,
unique (LATITUDE),
unique (LONGITUDE),
primary key (ID)
);
but when I try to create a new obj to this table and I'm getting:
sqlalchemy.orm.exc.FlushError: Instance <Location at 0x7fead79677c0> has a NULL identity key. If this is an auto-generated value, check that the database table allows generation of new primary key values, and that the mapped Column object is configured to expect these generated values. Ensure also that this flush() is not occurring at an inappropriate time, such as within a load() event.
my model is:
class Location(Base):
id = Column(Integer, primary_key=True)
address = Column(String)
latitude = Column(String, unique=True, nullable=False)
longitude = Column(String, unique=True, nullable=False)
buildings = relationship("Building", back_populates="location")
quotes = relationship("Quote", back_populates="location")
binds = relationship("Bind", back_populates="location")
and I'm trying to do this:
def create_location(db: Session, data: Dict[str, Any]) -> Location:
location = Location(
address=data["address"], # type: ignore
latitude=data["lat"], # type: ignore
longitude=data["lng"], # type: ignore
)
db.add(location)
db.commit()
return location
also I tried using:
id = Column(Integer, Sequence("id_seq"), primary_key=True)
but I got:
sqlalchemy.exc.StatementError: (sqlalchemy.exc.ProgrammingError) (snowflake.connector.errors.ProgrammingError) 000904 (42000): SQL compilation error: error line 1 at position 7
backend_1 | invalid identifier 'ID_SEQ.NEXTVAL'
You forgot to define the Sequence in your model. When you define the Sequence value on table creation in Snowflake a Sequence is generated at the schema level.
from sqlalchemy import Column, Integer, Sequence
...
class Location(Base):
id = Column(Integer, Sequence("Location_Id"), primary_key=True,
autoincrement=True)
address = Column(String)
...
Make sure your user role has usage permission for that sequence and that should take care of your issue setting the next value for your primary key.
An approach that helps me with table primary keys is defining a mixin class that uses declared_attr to automatically define my primary keys based on the table name.
from sqlalchemy import Column, Integer, Sequence
from slqalchemy.ext.declarative import declared_attr
class SomeMixin(object):
#declared_attr
def record_id(cls):
"""
Use table name to define pk
""""
return Column(
f"{cls.__tablename__} Id",
Integer(),
primary_key=True,
autoincrement=True
)
Then you pass said mixin into your model
from sqlalchemy import Column, Integer, String, Sequence
from wherever import SomeMixin
class Location(Base, SomeMixin):
address = Column(String)
...
Now Location.record_id gets set through the sequence you defined in the mixin.
Hope this helped

Option to ignore extra keywords in an sqlalchemy Mapped Class constructor?

Per below, I am trying initialize a sqlalchemy Mapped Class from a python dictionary that has extra keys. Is it possible to have the Mapped Class automatically ignore the extra keys instead of throwing an error? Likewise, can the Mapped Class have default values if the keys are not present?
from sqlalchemy import Column, Integer, String
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
And here is the init part:
my_example_user = {'id'=1, 'name'='john', 'extra_key'= 1234}
User(**my_example_user)
Which throws an invalid key error
Thoughts?
SQLAlchemy Mapper objects have an attrs property which is a dictionary of the names of the fields of your mapped class.
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import class_mapper
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
user = {
'name': 'Eihli',
'skill': 11
}
user_mapper = class_mapper(User)
mapped_user = User(**user)
# Boom! TypeError: 'skill' is an invalid keyword argument for User
mapped_user = User(**{
k: v for k, v in user.items()
if k in user_mapper.attrs.keys()
})
# Success!
No need to mess around with maintaining an exclude lists or mucking about with dict or getting in the way of super calls.
If you're trying to generate models with nested data, you'll have to do things a little different. Otherwise you'll get an "Unhashable type 'dict'" error.
Here's an example of a helper to inspect the mapper and get the keys of the relationships.
def from_json(model, data):
mapper = class_mapper(model)
keys = mapper.attrs.keys()
relationships = inspect(mapper).relationships
args = {k: v for k, v in data.items()
if k in keys and k not in relationships}
return model(**args)
In short, define constructor which does not pass arguments up to its superclass:
class User(Base):
# ...
def __init__(self, **entries):
# NOTE: Do not call superclass
# (which is otherwise a default behaviour).
#super(User, self).__init__(**entries)
self.__dict__.update(entries)
I hit the same problem in transition from peewee which requires the opposite - to pass arguments to its superclass (and, therefore, constructor was already defined). So, I just tried commenting the line out and things start to work.
UPDATE
Also, make sure that entries do not contain (and, therefore, overwrite) any meta field in User class defined for SQLAlchemy defined, for example, those ORM relationships. It's kind of obvious (SQLAlchemy), but when mistake is made, it might not be easy to spot the problem.
Are we guaranteed that the __init__ of the superclass which is in place will never have other desired effects than setting the __dict__ entries? I didn't feel quite comfortable bypassing the superclass call completely, so my attempt at solving this was as follows, passing on only the entries which correspond to column names:
class User(Base):
# ...
def __init__(self, **entries):
'''Override to avoid TypeError when passed spurious column names'''
col_names = set([col.name for col in self.__table__.columns])
superentries = {k : entries[k] for k in col_names.intersection(entries.keys())}
super().__init__(**superentries)
Also to pass extra keywords and call Base.__init__() method you can exclude extrakeys from super() and after that do what you want:
from sqlalchemy import Column, Integer, String
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, **kwargs):
extra_kw_list = ['key1', 'key2']
super(User, self).__init__(**{x: y for x, y in kwargs.items()
if x not in extra_kw_list})
#do something you need here
item1, item2 = kwargs['key1'], kwargs['key2']
If your model has relationships, you can use your model's Mapper object, as #eric-ihli mentioned. Here is another way (note the __init__ method):
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import backref, relationship
from my_app.db_models import Base
class Employee(Base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True, autoincrement=True)
department_id = Column(Integer, ForeignKey("department.id"), index=True)
email = Column(String, unique=True, index=True, nullable=False)
name = Column(String)
department = relationship(
"Department", backref=backref("employees", cascade="all, delete-orphan")
)
def __init__(self, **kwargs):
allowed_args = self.__mapper__.class_manager # returns a dict
kwargs = {k: v for k, v in kwargs.items() if k in allowed_args}
super().__init__(**kwargs)
This way, you can create an employee model like this:
from contextlib import closing
from my_app.db_models import Department, Employee, SessionLocal
with closing(SessionLocal()) as db:
dept = db.query(Department).filter(Department.name == 'HR').first()
employee = Employee(name='John Smith', email='john#smith.com', department=dept)
db.add(employee)
db.commit()
Based on R Yakovlev's answer, you can make the list of the elements dynamic:
from sqlalchemy import Column, Integer, String
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, **kwargs):
keep_kwargs = {k: v for k, v in kwargs.items() if k in user_columns}
super(User, self).__init__(**keep_kwargs)
user_columns = [_ for _ in User.__dict__.keys() if not _.startswith('_')]
I wanted to try find a way to embed the user_columns in the object, like with a #hybrid_property, yet not have it called every time it's used.
I expect that is possible but exceeded my time limit.

How to update sqlalchemy orm object by a python dict

the dict's key names are mapping to the sqlalchemy object attrs
ex:
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
can update from id = 3, {name: "diana"} or id = 15, {name: "marchel", fullname: "richie marchel"}
You can use setattr() to update attributes on an existing SQLAlchemy object dynamically:
user = session.query(User).get(someid)
for key, value in yourdict.items():
setattr(user, key, value)
I have another solution here. It would be handy to define model method as following.
class ModelName(db.Model):
"""
docstring here
"""
...
def update(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
I hope it would solve your problem.
Thank you
Depending on your usecase (if you don't need to validate or infer anything from the model), you can save one DB call by using filter_by with id to get a specific row, and update it using a dictionary like you initially wanted.
user_query = session.query(User).filter_by(id=someid)
data_to_update = dict(name="marchel", fullname="richie marchel")
user_query.update(data_to_update)
You might also need to add synchronize_session=False keyword argument to your update call, depending on the type of your session (if you use scoped_session):
user_query.update(data_to_update, synchronize_session=False)
base on answer of #martijn-pieters,
you can not only dynamic update column with setattr, but also can use dynamic table and column combine with getattr and setattr
example:
# models.py
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
# update.py
import models
def dynamic_update(dynamic_table, col_id, dynamic_cols):
"""
dynamic_table: name of the table, "User" for example
col_id: id of which column you want to update
dynamic_cols: key value pairs {name: "diana"}
"""
if hasattr(models, dynamic_table):
table = getattr(models, dynamic_table)
col_info = table.query.filter_by(id=col_id).first()
for (key, value) in dynamic_cols.items():
if hasattr(table, key):
setattr(col_info, key, value)
session.commit()
BTW, you can get more info about setattr, getattr, hasattr from python offical doc
https://docs.python.org/2/library/functions.html#setattr
https://docs.python.org/2/library/functions.html#getattr
https://docs.python.org/2/library/functions.html#hasattr
In sqlalchemy 2.0 API, you can use:
stmt = update(User).where(User.name == "john").values(**your_data)
session.execute(stmt)
I think the simplest way is to use sqlalchemy update with a filter
def update_item(db: Session, item_id: int, item: ItemUpdate):
db.query(Item).filter(id=item_id).update(item.dict())
db.commit()
Ensure you are always filtering on a primary key to avoid updating more than a single row. This could be done as a check in the code before committing the session.
I found this solution while working with flask-sqlalchemy then test it on sqlalchemy & it works as well:
dict = {name: "marchel", fullname: "richie marchel"}
session.execute(update(User).filter_by(id=3).values(**dict))
session.commit()

Dynamically setting __tablename__ for sharding in SQLAlchemy?

In order to handle a growing database table, we are sharding on table name. So we could have database tables that are named like this:
table_md5one
table_md5two
table_md5three
All tables have the exact same schema.
How do we use SQLAlchemy and dynamically specify the tablename for the class that corresponds to this? Looks like the declarative_base() classes need to have tablename pre-specified.
There will eventually be too many tables to manually specify derived classes from a parent/base class. We want to be able to build a class that can have the tablename set up dynamically (maybe passed as a parameter to a function.)
OK, we went with the custom SQLAlchemy declaration rather than the declarative one.
So we create a dynamic table object like this:
from sqlalchemy import MetaData, Table, Column
def get_table_object(self, md5hash):
metadata = MetaData()
table_name = 'table_' + md5hash
table_object = Table(table_name, metadata,
Column('Column1', DATE, nullable=False),
Column('Column2', DATE, nullable=False)
)
clear_mappers()
mapper(ActualTableObject, table_object)
return ActualTableObject
Where ActualTableObject is the class mapping to the table.
In Augmenting the Base you find a way of using a custom Base class that can, for example, calculate the __tablename__ attribure dynamically:
class Base(object):
#declared_attr
def __tablename__(cls):
return cls.__name__.lower()
The only problem here is that I don't know where your hash comes from, but this should give a good starting point.
If you require this algorithm not for all your tables but only for one you could just use the declared_attr on the table you are interested in sharding.
Because I insist to use declarative classes with their __tablename__ dynamically specified by given parameter, after days of failing with other solutions and hours of studying SQLAlchemy internals, I come up with the following solution that I believe is simple, elegant and race-condition free.
def get_model(suffix):
DynamicBase = declarative_base(class_registry=dict())
class MyModel(DynamicBase):
__tablename__ = 'table_{suffix}'.format(suffix=suffix)
id = Column(Integer, primary_key=True)
name = Column(String)
...
return MyModel
Since they have their own class_registry, you will not get that warning saying:
This declarative base already contains a class with the same class name and module name as mypackage.models.MyModel, and will be replaced in the string-lookup table.
Hence, you will not be able to reference them from other models with string lookup. However, it works perfectly fine to use these on-the-fly declared models for foreign keys as well:
ParentModel1 = get_model(123)
ParentModel2 = get_model(456)
class MyChildModel(BaseModel):
__tablename__ = 'table_child'
id = Column(Integer, primary_key=True)
name = Column(String)
parent_1_id = Column(Integer, ForeignKey(ParentModel1.id))
parent_2_id = Column(Integer, ForeignKey(ParentModel2.id))
parent_1 = relationship(ParentModel1)
parent_2 = relationship(ParentModel2)
If you only use them to query/insert/update/delete without any reference left such as foreign key reference from another table, they, their base classes and also their class_registry will be garbage collected, so no trace will be left.
you can write a function with tablename parameter and send back the class with setting appropriate attributes.
def get_class(table_name):
class GenericTable(Base):
__tablename__ = table_name
ID= Column(types.Integer, primary_key=True)
def funcation(self):
......
return GenericTable
Then you can create a table using:
get_class("test").__table__.create(bind=engine) # See sqlachemy.engine
Try this
import zlib
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, BigInteger, DateTime, String
from datetime import datetime
BASE = declarative_base()
ENTITY_CLASS_DICT = {}
class AbsShardingClass(BASE):
__abstract__ = True
def get_class_name_and_table_name(hashid):
return 'ShardingClass%s' % hashid, 'sharding_class_%s' % hashid
def get_sharding_entity_class(hashid):
"""
#param hashid: hashid
#type hashid: int
#rtype AbsClientUserAuth
"""
if hashid not in ENTITY_CLASS_DICT:
class_name, table_name = get_class_name_and_table_name(hashid)
cls = type(class_name, (AbsShardingClass,),
{'__tablename__': table_name})
ENTITY_CLASS_DICT[hashid] = cls
return ENTITY_CLASS_DICT[hashid]
cls = get_sharding_entity_class(1)
print session.query(cls).get(100)
Instead of using imperative creating Table object, you can use usual declarative_base and make a closure to set a table name as the following:
def make_class(Base, table_name):
class User(Base):
__tablename__ = table_name
id = Column(Integer, primary_key=True)
name= Column(String)
return User
Base = declarative_base()
engine = make_engine()
custom_named_usertable = make_class(Base, 'custom_name')
Base.metadata.create_all(engine)
session = make_session(engine)
new_user = custom_named_usertable(name='Adam')
session.add(new_user)
session.commit()
session.close()
engine.dispose()
just you need to create class object for Base.
from sqlalchemy.ext.declarative import declarative_base, declared_attr
class Base(object):
#declared_attr
def __tablename__(cls):
return cls.__name.lower()
Base = declarative_base(cls=Base)

Can the same #property present both scalar and collection behavior in SQLAlchemy?

I'm converting a library to use SQLAlchemy as the datastore. I like the flexibility of the PickleType column, but it doesn't seem to work well when pickling SA objects (table rows). Even if I overload setstate and getstate to do a query + session merge when unpickling, there's no referential integrity across that pickle boundary. That means that I can't query collections of objects.
class Bar(Base):
id = Column(Integer, primary_key=True)
__tablename__ = 'bars'
foo_id = Column(Integer, ForeignKey('foos.id'), primary_key=True)
class Foo(Base):
__tablename__ = 'foos'
values = Column(PickleType)
#values = relationship(Bar) # list interface (one->many), but can't assign a scalar or use a dictionary
def __init__(self):
self.values = [Bar(), Bar()]
# only allowed with PickleType column
#self.values = Bar()
#self.values = {'one' : Bar()}
#self.values = [ [Bar(), Bar()], [Bar(), Bar()]]
# get all Foo's with a Bar whose id=1
session.query(Foo).filter(Foo.values.any(Bar.id == 1)).all()
One workaround would be to implement my own mutable object type as is done here. I'm imagining having some kind of flattening scheme which traverses the collections and appends them to a simpler one->many relationship. Perhaps the flattened list might have to be weakrefs to the pickled collection's objects?
Tracking changes and references sounds like no fun and I can't find any examples of people pickling SA rows anywhere else (perhaps indicative of bad design on my part?). Any advice?
EDIT 1:
After some discussion I've simplified the request. I'm looking for a single property that can behave as either a scalar or a collection. Here is my (failing) attempt:
from sqlalchemy import MetaData, Column, Integer, PickleType, String, ForeignKey, create_engine
from sqlalchemy.orm import relationship, Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.collections import attribute_mapped_collection
# from http://www.sqlalchemy.org/trac/browser/examples/vertical
from sqlalchemy_examples.vertical import dictlike_polymorphic as dictlike
metadata = MetaData()
Base = declarative_base()
engine = create_engine('sqlite://', echo=True)
Base.metadata.bind = engine
session = Session(engine)
class AnimalFact(dictlike.PolymorphicVerticalProperty, Base):
"""key/value attribute whose value can be one of several types"""
__tablename__ = 'animalfacts'
type_map = {#str: ('string', 'str_value'),
list: ('list', 'list_value'),
tuple: ('tuple', 'tuple_value')}
id = Column(Integer, primary_key=True)
animal_id = Column(Integer, ForeignKey('animal.id'), primary_key=True)
key = Column(String, primary_key=True)
type = Column(String)
#str_value = Column(String)
list_value = relationship('StringEntry')
tuple_value = relationship('StringEntry2')
class Animal(Base, dictlike.VerticalPropertyDictMixin):
__tablename__ = 'animal'
_property_type = AnimalFact
_property_mapping = 'facts'
id = Column(Integer, primary_key=True)
name = Column(String)
facts = relationship(AnimalFact, backref='animal',
collection_class=attribute_mapped_collection('key'))
def __init__(self, name):
self.name = name
class StringEntry(Base):
__tablename__ = 'stringentry'
id = Column(Integer, primary_key=True)
animalfacts_id = Column(Integer, ForeignKey('animalfacts.id'))
value = Column(String)
def __init__(self, value):
self.value = value
class StringEntry2(Base):
__tablename__ = 'stringentry2'
id = Column(Integer, primary_key=True)
animalfacts_id = Column(Integer, ForeignKey('animalfacts.id'))
value = Column(String)
def __init__(self, value):
self.value = value
Base.metadata.create_all()
a = Animal('aardvark')
a['eyes'] = [StringEntry('left side'), StringEntry('right side')] # works great
a['eyes'] = (StringEntry2('left side'), StringEntry2('right side')) # works great
#a['cute'] = 'sort of' # failure
The PickleType is really a hacky way around edge cases where you have some arbitrary object you'd just like to shove away. It's a given that when you use PickleType, you're giving up any relational advantages, including being able to filter/query on them, etc.
So putting an ORM mapped object in a Pickle is basically a terrible idea.
If you want a collection of scalar values, use traditional mappings and relationship() in combination with association_proxy. See http://docs.sqlalchemy.org/en/rel_0_7/orm/extensions/associationproxy.html#simplifying-scalar-collections .
"or dictionaries". Use attribute_mapped_collection: http://docs.sqlalchemy.org/en/rel_0_7/orm/collections.html#dictionary-collections
"dictionaries plus scalars": combine both attribute_mapped_collection and association_proxy: http://docs.sqlalchemy.org/en/rel_0_7/orm/extensions/associationproxy.html#proxying-to-dictionary-based-collections
Edit 1:
Well, you dug into a really esoteric and complex example there. association_proxy is a much easier way to get around these cases where you want an object to act like a scalar, so here's that, without all that crazy boilerplate of the "vertical" example, which I'd avoid as it is really too complex. Your example seemed undecided about primary key style so I went with the composite version. Surrogate + composite can't be mixed in a single table (well it can, but its relationally incorrect. The key should be the smallest unit that identifies a row - http://en.wikipedia.org/wiki/Unique_key is a good top level read into various subjects regarding this).
from sqlalchemy import Integer, String, Column, create_engine, ForeignKey, ForeignKeyConstraint
from sqlalchemy.orm import relationship, Session
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.associationproxy import association_proxy
Base = declarative_base()
class AnimalFact(Base):
"""key/value attribute whose value can be either a string or a list of strings"""
__tablename__ = 'animalfacts'
# use either surrogate PK id, or the composite animal_id/key - but
# not both. id/animal_id/key all together is not a proper key.
# Personally I'd go for "id" here, but here's the composite version.
animal_id = Column(Integer, ForeignKey('animal.id'), primary_key=True)
key = Column(String, primary_key=True)
# data
str_value = Column(String)
_list_value = relationship('StringEntry')
# proxy list strings
list_proxy = association_proxy('_list_value', 'value')
def __init__(self, key, value):
self.key = key
self.value = value
#property
def value(self):
if self.str_value is not None:
return self.str_value
else:
return self.list_proxy
#value.setter
def value(self, value):
if isinstance(value, basestring):
self.str_value = value
elif isinstance(value, list):
self.list_proxy = value
else:
assert False
class Animal(Base):
__tablename__ = 'animal'
id = Column(Integer, primary_key=True)
name = Column(String)
_facts = relationship(AnimalFact, backref='animal',
collection_class=attribute_mapped_collection('key'))
facts = association_proxy('_facts', 'value')
def __init__(self, name):
self.name = name
# dictionary interface around "facts".
# I'd just use "animal.facts" here, but here's how to skip that.
def __getitem__(self, key):
return self.facts.__getitem__(key)
def __setitem__(self, key, value):
self.facts.__setitem__(key, value)
def __delitem__(self, key):
self.facts.__delitem__(key)
def __contains__(self, key):
return self.facts.__contains__(key)
def keys(self):
return self.facts.keys()
class StringEntry(Base):
__tablename__ = 'myvalue'
id = Column(Integer, primary_key=True)
animal_id = Column(Integer)
key = Column(Integer)
value = Column(String)
# because AnimalFact has a composite PK, we need
# a composite FK.
__table_args__ = (ForeignKeyConstraint(
['key', 'animal_id'],
['animalfacts.key', 'animalfacts.animal_id']),
)
def __init__(self, value):
self.value = value
engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
# create a new animal
a = Animal('aardvark')
a['eyes'] = ['left side', 'right side']
a['cute'] = 'sort of'
session.add(a)
session.commit()
session.close()
for animal in session.query(Animal):
print animal.name, ",".join(["%s" % animal[key] for key in animal.keys()])

Categories

Resources