I have the following script which attempts to create 3 tables if they don't exist yet in my database.
I am using MySQL as database engine.
class Journal:
USER = '******'
PASSWORD = '******'
HOST = '******'
DB_NAME = 'trades_test'
def __init__(self):
self.engine = create_engine(
f'mysql+mysqlconnector://{self.USER}:{self.PASSWORD}#{self.HOST}/{self.DB_NAME}'
)
self.create_openings_table()
self.create_closings_table()
self.create_adjustments_table()
def create_openings_table(self):
meta = MetaData(self.engine)
self.openings = Table(
'openings',
meta,
Column('trade_id',
INTEGER(unsigned=True),
primary_key=True,
autoincrement=True),
Column('opened_at', DATE(), nullable=False),
Column('underlying', VARCHAR(5), nullable=False),
Column('underlying_price', FLOAT(2), nullable=False),
Column('iv_rank', SMALLINT(), nullable=False),
Column('strategy', VARCHAR(20), nullable=False),
Column('quantity', SMALLINT(), nullable=False),
Column('expiration_date', DATE(), nullable=False),
Column('option_types', JSON()),
Column('strikes', JSON(), nullable=False),
Column('premium', FLOAT(2), nullable=False),
Column('prob_of_profit', FLOAT(2), nullable=False),
Column('margin', FLOAT(2), nullable=False),
Column('notes', TEXT()))
meta.create_all()
def create_closings_table(self):
meta = MetaData(self.engine)
self.closings = Table(
'closings',
meta,
Column('id',
INTEGER(unsigned=True),
primary_key=True,
autoincrement=True),
# FOREIGN KEY - fk_closings_trade_id
Column('trade_id', ForeignKey('openings.trade_id')),
Column('closed_at', DATE(), nullable=False),
Column('underlying_price', FLOAT(2), nullable=False),
Column('iv_rank', SMALLINT(), nullable=False),
Column('premium', FLOAT(2), nullable=False),
Column('margin', FLOAT(2), nullable=False),
Column('notes', TEXT()),
)
meta.create_all()
def create_adjustments_table(self):
meta = MetaData(self.engine)
self.adjustments = Table(
'adjustments',
meta,
Column('id',
INTEGER(unsigned=True),
primary_key=True,
autoincrement=True),
# FOREIGN KEY - fk_adj_trade_id
Column('trade_id', ForeignKey('openings.trade_id')),
Column('adjusted_at', DATE(), nullable=False),
Column('underlying_price', FLOAT(2), nullable=False),
Column('iv_rank', SMALLINT(), nullable=False),
Column('quantity', SMALLINT()),
Column('premium', FLOAT(2)),
Column('option_types', JSON()),
Column('strikes', JSON()),
Column('expiration_date', DATE()),
Column('margin', FLOAT(2)),
Column('notes', TEXT()),
)
meta.create_all()
This code produces this error:
Traceback (most recent call last):
File "/Users/or/Desktop/Or/Options/journal/journal.py", line 105, in <module>
Journal()
File "/Users/or/Desktop/Or/Options/journal/journal.py", line 16, in __init__
self.create_closings_table()
File "/Users/or/Desktop/Or/Options/journal/journal.py", line 61, in create_closings_table
meta.create_all()
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 4744, in create_all
bind._run_ddl_visitor(
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 3008, in _run_ddl_visitor
conn._run_ddl_visitor(visitorcallable, element, **kwargs)
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 2016, in _run_ddl_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 483, in traverse_single
return meth(obj, **kw)
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 822, in visit_metadata
collection = sort_tables_and_constraints(
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 1286, in sort_tables_and_constraints
dependent_on = fkc.referred_table
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 3671, in referred_table
return self.elements[0].column.table
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 1093, in __get__
obj.__dict__[self.__name__] = result = self.fget(obj)
File "/Users/or/opt/anaconda3/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 2376, in column
raise exc.NoReferencedTableError(
sqlalchemy.exc.NoReferencedTableError: Foreign key associated with column 'closings.trade_id' could not find table 'openings' with which to generate a foreign key to target column 'trade_id'
I would like the primary key of the first table (trade_id) to serve as a foreign key in the other two tables.
I've also seen other ways of constructing the tables, mainly combined with Flask, that create subclasses of a Model class and fill out the details of the table there, what would be the more correct way to construct a small database application like this?
The issue with the primary key happens because the MetaData object is a storage object for a series of tables. When defining a foreign key it looks into the MetaData object for the relevant table to map to. As you are redefining the MetaData object in the different create functions they are all stored in a different MetaData object. Due to this the foreign key cannot be looked up during the creation of the second table with a relation to the first table.
The solution is to define the MetaData object once and refer each Table object to this MetaData object.
See Working with Database Metadata for more information about this.
Also you don't have to call create_all in each create function but it can be called once at the end of __init__.
Regarding your last question about the different methods is that your method is mainly SQLAlchemy Core. When using subclasses of Base you are getting into SQLAlchemy ORM. Here the differences are explained a little bit more: What is the difference between SQLAlchemy Core and ORM
Related
I'm trying to do internationalization, and I've encountered one thing I can't seem to figure out. Fair to say I am total novice using SQLAlchemy (coming from Django world).
I am using SQL Alchemy Core (v.1.4.36). (PostgreSQL) (async sessions). Let's assume I have the following tables:
categories = Table(
'category',
catalog_metadata,
Column('id', Integer, primary_key=True, autoincrement=True)
)
category_translation = Table(
'category_translation',
catalog_metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('name', String(50), nullable=False),
Column('language', String(2), nullable=False),
Column('original_id', Integer, ForeignKey('category.id', ondelete="CASCADE"))
)
product = Table(
'product',
catalog_metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('category_id', Integer, ForeignKey('category.id', ondelete="CASCADE")),
nullable=True),
)
product_translation = Table(
'product_translation',
catalog_metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('language', String(2), nullable=False),
Column('original_id', Integer, ForeignKey('product.id', ondelete="CASCADE")),
Column('name', String(50), nullable=False),
Column('description', Text)
)
Explaining in case is not obvious: I have two main tables category and product. Each one of them has "translatable" fields that are being exposed in the secondary tables category_translation and product_translation respectively. The main goal behind this is, based on specific language, retrieve from the DB information based on the requested language and load it on a Category and Product class. Mapper defined next:
mapper.map_imperatively(
model.Category,
categories,
properties={
'products': relationship(model.Product, backref="category"),
'translations': relationship(
model.CategoryTranslation,
backref="category",
collection_class=attribute_mapped_collection('language')
)
},
)
mapper.map_imperatively(model.CategoryTranslation, category_translation)
mapper.map_imperatively(model.ProductTranslation, product_translation)
mapper.map_imperatively(
model.Product,
product,
properties={
'translations': relationship(
model.ProductTranslation,
backref="product",
collection_class=attribute_mapped_collection('language')
)
},
)
The implementation for the model classes is irrelevant, but you can assume it has the needed fields defined. If you must know, I am using FastAPI and pydantic to serialize output. However, that is not the problem.
What I want to know is how can I set the translated fields to the mapped classes when querying the database?
Meaning, that the instantiated objects for model.Category and model.Product have the name and the name, description fields filled, respectively.
As of now I am doing this query:
select(model_cls).join(translation_cls, (model_cls.id == translation_cls.original_id) & (translation_cls.language == requestedLanguage))
Where model_cls is one the main tables, and translation_cls is its respective translation table. For instance:
select(model.Category).join(model.CategoryTranslation, (model.Category.id == model.CategoryTranslation.original_id) & (model.CategoryTranslation.language == requestedLanguage))
Think that when requesting a product, we may need to join and set attributes for product and for its related category. The response may need to look like this:
{
"id": 1,
"name": "TranslatedProductName",
"category": {
"id": 1,
"name": "TranslatedCategoryNme",
"description": "TranslatedCategoryDescription"
}
}
Hope I've explained myself. If anyone needs more info or explaining, please comment
I am using flask with alembic and i have the two tables below linked by a Foreign key constraint:
table_one = Table("table_one", meta.Base.metadata,
Column("id", BigInteger, primary_key=True),
Column("filename", VARCHAR(65535)),
Column("mission", VARCHAR(65535)),
)
table_two = Table("table_two", meta.Base.metadata,
Column("id", BigInteger, primary_key=True),
Column("file_id", BigInteger, ForeignKey("table_one.id")),
Column("username", ArrowType(timezone=True)),
I am trying to get rid of table_one with the alembic revision below
def upgrade():
op.drop_table('table_one')
op.drop_constraint('table_two_id_fkey', 'table_two', type_='foreignkey')
op.drop_column('table_two', 'file_id')
op.drop_column('table_two', 'id')
def downgrade():
op.add_column('table_two', sa.Column('id', sa.BIGINT(), autoincrement=True, nullable=False))
op.add_column('table_two', sa.Column('file_id', sa.BIGINT(), autoincrement=False, nullable=True))
op.create_foreign_key('table_two_file_id_fkey', 'table_two', 'table_one', ['file_id'], ['id'])
op.create_table('table_one',
sa.Column('id', sa.BIGINT(), autoincrement=True, nullable=False),
sa.Column('filename', sa.VARCHAR(length=65535), autoincrement=False, nullable=True),
sa.Column('mission', sa.VARCHAR(length=65535), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='table_one_pkey')
)
but unfortunately there seem to be an issue with the cascade and i am facing the error below:
psycopg2.errors.DependentObjectsStillExist: cannot drop table table_one because other objects depend on it
DETAIL: constraint table_two_file_id_fkey on table table_tow depends on table table_one
HINT: Use DROP ... CASCADE to drop the dependent objects too.
Does anyone have an idea how on to solve this issue?
Incase anyone is trying to drop a foreign key as the question title says:
You can remove a foreign key using the drop_constraint() function in alembic
op.drop_constraint(constraint_name="FK_<target>_<source>", table_name="<source>")
I'm trying to create tables on-the-fly from existing data...however, the table I need has dual Primary Keys. I can't find how to satisfy the restrictions.
I.e. I start with the following two tables...
self.DDB_PAT_BASE = Table('DDB_PAT_BASE', METADATA,
Column('PATID', INTEGER(), primary_key=True),
Column('PATDB', INTEGER(), primary_key=True),
Column('FAMILYID', INTEGER()),
)
self.DDB_ERX_MEDICATION_BASE = Table('DDB_ERX_MEDICATION_BASE', METADATA,
Column('ErxID', INTEGER(), primary_key=True),
Column('ErxGuid', VARCHAR(length=36)),
Column('LastDownload', DATETIME()),
Column('LastUpload', DATETIME()),
Column('Source', INTEGER()),
)
When I try the following, it works...
t = Table('testtable', METADATA,
Column('ErxID', INTEGER(), ForeignKey('DDB_ERX_MEDICATION_BASE.ErxID')),
)
t.create()
However, both the following give me the error...
t = Table('testtable', METADATA,
Column('PATID', INTEGER(), ForeignKey('DDB_PAT_BASE.PATID')),
)
t.create()
t = Table('testtable', METADATA,
Column('PATID', INTEGER(), ForeignKey('DDB_PAT_BASE.PATID')),
Column('PATDB', INTEGER(), ForeignKey('DDB_PAT_BASE.PATDB')),
)
t.create()
sqlalchemy.exc.OperationalError: (pymssql.OperationalError) (1776, "There are no primary or candidate keys in the referenced table 'DDB_PAT_BASE' that match the referencing column list in the foreign key 'FK__testtabl__PATID__3FD3A585'.DB-Lib error message 20018, severity 16:\nGeneral SQL Server error: Check messages from the SQL Server\nDB-Lib error message 20018, severity 16:\nGeneral SQL Server error: Check messages from the SQL Server\n") [SQL: '\nCREATE TABLE [testtable] (\n\t[PATID] INTEGER NULL, \n\tFOREIGN KEY([PATID]) REFERENCES [DDB_PAT_BASE] ([PATID])\n)\n\n']
The table you are pointing to has a composite primary key, not multiple primary keys. Hence. you need to create a composite foreign key, not two foreign keys pointing to each half of the composite primary key:
t = Table('testtable', METADATA,
Column('PATID', INTEGER()),
Column('PATDB', INTEGER()),
ForeignKeyConstraint(['PATID', 'PATDB'], ['DDB_PAT_BASE.PATID', 'DDB_PAT_BASE.PATDB']),
)
t.create()
The situation is a little bit simplified. I have two migration files for sqlalchemy-migrate:
In First I create table volume_usage_cache, then autoload it, create copy of its columns and print it:
from sqlalchemy import Column, DateTime
from sqlalchemy import Boolean, BigInteger, MetaData, Integer, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Create new table
volume_usage_cache = Table('volume_usage_cache', meta,
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('curr_write_bytes', BigInteger(), default=0),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_usage_cache.create()
volume_usage_cache = Table('volume_usage_cache', meta, autoload=True)
columns = []
[columns.append(column.copy()) for column in volume_usage_cache.columns]
print columns
And I get in log what I expected:
[Column('deleted', Boolean(), table=None), Column('id', Integer(), table=None,
primary_key=True, nullable=False), Column('curr_write_bytes', BigInteger(),
table=None, default=ColumnDefault(0))]
But if I make a copy of columns in Second migration file (that is runed after First):
from sqlalchemy import MetaData, String, Integer, Boolean, Table, Column, Index
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table("volume_usage_cache", meta, autoload=True)
columns = []
for column in table.columns:
columns.append(column.copy())
print columns
I get a different result:
[Column('deleted', INTEGER(), table=None, default=ColumnDefault(0)),
Column(u'id', INTEGER(), table=None, primary_key=True, nullable=False),
Column(u'curr_write_bytes', NullType(), table=None)]
Why curr_write_bytes column has NullType?
The are two problems:
First:
In First file we are using old metadata that already contains all columns with need types
So if we create new MetaData instance, SqlAlchemy will load info about table from database and will get the same result as in Second file.
Second:
There is no support in sqlAlchemy for BigInteger column type (in sqlite). And Sqlite doesn't support types of column at all. So we can create table with column BigInteger (and it will work), but after autoload type of such column will be automatically converted to NullType.
Hi have have the following tables
nfiletable = Table(
'NFILE', base.metadata,
Column('fileid', Integer, primary_key=True),
Column('path', String(300)),
Column('filename', String(50)),
Column('filesize', Integer),
schema='NATIVEFILES')#,autoload=True,autoload_with=engine)
sheetnames_table=Table(
'SHEETNAMES', base.metadata, schema='NATIVEFILES',
autoload=True, autoload_with=engine)
nfile_sheet_table=Table(
'NFILE_SHEETNAME',base.metadata,
Column('fileid', Integer, ForeignKey(nfiletable.c.fileid)),
Column('sheetid', Integer, ForeignKey(sheetnames_table.c.sheet_id)),
schema='NATIVEFILES')
and mappers:
nfile_mapper=mapper(Nfile,nfiletable)
mapper(Sheet, sheetnames_table, properties={
'files': relation(
Nfile, secondary=nfile_sheet_table,
primaryjoin=(sheetnames_table.c.sheet_id==nfile_sheet_table.c.sheetid),
secondaryjoin=(nfile_sheet_table.c.fileid==nfiletable.c.fileid),
foreign_keys=[nfile_sheet_table.c.sheetid,nfile_sheet_table.c.fileid],
backref='sheets')
})
when i do the following
upl = Session.query(Nfile).filter_by(fileid=k).one()
sheetdb=[]
for sheet in sheetstoadd:
s = sheetcache[sheetname]
sheetdb.append(s)
upl.sheets = sheetdb
Session.save(upl)
Session.flush()
the line upl.sheets = sheetdb takes forever.
It seems that all files for each sheet in sheetdb are loaded from the db.
How can I prevent this?
if NFile.sheets references a huge collection, put "lazy='dynamic'" on the backref:
mapper(Sheet, sheetnames_table, properties={
'files': relation(
Nfile, secondary=nfile_sheet_table,
backref=backref('sheets', lazy='dynamic'))
})
All the primaryjoin/secondaryjoin/foreign_keys stuff is also not needed since your nfile_sheet_table has ForeignKey constructs on it.