Reflecting Oracle DB with SQLalchemy, doesn't reflect the columns [duplicate] - python

I'm a new learner on Python and SQLAlchemy, and I met a curious problem as below.
user = Table('users', meta, autoload=True, autoload_with=engine)
then I
print(user.columns)
it works fine, the output are user.ID, user.Name, etc. But then:
Session = sessionmaker(bind=engine)
session = Session()
session.query(user).order_by(user.id)
shows error:
AttributeError: 'Table' object has no attribute 'id'
I change the "id" to "Name", it's the same error.
I also tried the filter_by method, the same error.
Why this happened?

You could use:
session.query(user).order_by(user.c.id)

Since this is the top answer when you search for this error, I will drop my solution for this as well here.
For my problem, I had to reference the class instead of the table in my models remote_side
so it had to be "Transaction.id" instead of transactions.id
class Transaction(Base):
__tablename__ = "transactions"
...
offset_transaction = relationship(
...
remote_side="Transaction.id",
)
...

For people tumbling into this after many things have changed, this is a solution that works now.
I'm also a new to this, and for me all of those answers failed to work, but using select instead of query worked. Sorry for different table and column names, but this was from an actual query.
Using MSSQL database with ODCB driver.
from sqlalchemy import select, create_engine, MetaData, Table, URL
from sqlalchemy.orm import Session
connect_url = URL.create(
'mssql+pyodbc',
host = 'server',
port = 'port',
database = 'database',
query = dict(driver = 'SQL Server Native Client 11.0'))
engine = create_engine(connect_url)
connection = engine.connect()
metadata = MetaData()
reqgroups = Table('ReqGroup', metadata, autoload_with=engine)
# Print the column names. This works and shows REQGROUPID as one of the columns
print(reqgroups.columns.keys())
with Session(engine) as session:
# Filter with columns REQGROUPID
stmt = select(reqgroups).filter_by(REQGROUPID = "Tarve_ts")
ResultSet = session.execute(stmt).fetchone()
print(ResultSet)

Related

SQLAlchemy getting a row using filter() but not with get() function

I'm using SQLAlchemy==1.3.18 and Flask==1.1.2
My problem is that I want to update a row that's inserted using a Stored Procedure. Here is my process:
My engine to the DB:
from sqlalchemy import create_engine
engine = create_engine('mysql+pymysql://db_user:db_password#db_host:db_port')
My model
class Employee(declararive_base()):
__tablename__ = "EMPLOYEE"
id = Column(Integer, primary_key=True)
name = Column(String(30))
last_name = Column(String(30))
I execute a Stored Procedure using this function:
def call_procedure(function_name, params):
connection = cloudsql.Engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(function_name, params)
results = list(cursor.fetchall())
cursor.close()
connection.commit()
return results
finally:
connection.close()
Then, I recover the id of the inserted record and set the value on this variable new_employee_id.
After that I try to execute this: session.query(Employee).get(new_employee_id) and I get None.
But if I do the query using session.query(Employee).filter(Employee.id == new_employee_id).all()
I get the record.
Any ideas about this?
I'm thinking that maybe the get method works over the index and since the table is not reindexed again at that moment, the query cannot get the record. What happens with the filter is that it searches on the whole table and can get the record.
Update: My model class was added to this question.
Update2: I added how I create my engine on SQLAlchemy.

SQLAlchemy Create View in PostgresQL

I am trying create a view with SQLAlchemy with Postgresql as the underlying DB. The separate select query to create the view works well and returns results but when I use it in the create view, I get the error sqlalchemy.exc.NoSuchTableError: popular which means the view is not being selected. I am getting the error when I try to select from the view. Creating the view does not throw any error but it does not create the view. Here is my code:
from sqlalchemy import *
import sqlalchemy as db
from sqlalchemy import func
from sqlalchemy import desc
from sqlalchemy import Table
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Executable, ClauseElement
try:
engine = db.create_engine('postgresql://user:pass#localhost:5432/db_name')
connection = engine.connect()
except:
print('Error establishing DB connection')
# Import metadata
metadata = db.MetaData()
# Import articles, authors and log tables
art = db.Table('articles', metadata, autoload=True, autoload_with=engine)
aut = db.Table('authors', metadata, autoload=True, autoload_with=engine)
log = db.Table('log', metadata, autoload=True, autoload_with=engine)
class CreateView(Executable, ClauseElement):
def __init__(self, name, select):
self.name = name
self.select = select
#compiles(CreateView)
def visit_create_view(element, compiler, **kw):
return "CREATE VIEW %s AS %s" % (
element.name,
compiler.process(element.select, literal_binds=True)
)
# Method to create view with top three articles
def view_top_three():
top_three_view = CreateView('popular', db.select([art.columns.title, func.count(log.columns.path)]) \
.where(func.concat('/article/', art.columns.slug) == log.columns.path) \
.where(log.columns.path != "/") \
.group_by(log.columns.path, art.columns.title) \
.order_by(desc(func.count(log.columns.path))) \
.limit(3))
engine.execute(top_three_view)
v = Table('popular', metadata, autoload=True, autoload_with=engine)
for r in engine.execute(v.select()):
print(r)
# Call the method which creates view and selects from view
view_top_three()
Any help will be appreciated.
Since your CreateView inherits from Executable, and ClauseElement, it is not considered a data changing operation. In other words
engine.execute(top_three_view)
executes the CREATE VIEW statement and then implicitly rollbacks, when the connection is returned to the pool.
Instead it should be a subclass of DDLElement, as shown in the usage recipes wiki. Simply changing the baseclass will allow SQLAlchemy autocommit to work properly.
I found the solution. The issue was to do with autocommit. Setting autocommit to true when creating the engine solved the issue as follows:
engine = db.create_engine('postgresql://user:pass#localhost:5432/db_name').execution_options(autocommit=True)
Special mention to #ilja-everilä

How to do a proper upsert using sqlalchemy on postgresql?

I would like to do an upsert using the "new" functionality added by postgresql 9.5, using sqlalchemy core. While it is implemented, I'm pretty confused by the syntax, which I can't adapt to my needs.
Here is a sample code of what I would like to be able to do :
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'test'
a_id = Column('id',Integer, primary_key=True)
a = Column("a",Integer)
engine = create_engine('postgres://name:password#localhost/test')
User().metadata.create_all(engine)
meta = MetaData(engine)
meta.reflect()
table = Table('test', meta, autoload=True)
conn = engine.connect()
from sqlalchemy.dialects.postgresql import insert as psql_insert
stmt = psql_insert(table).values({
table.c['id']: bindparam('id'),
table.c['a']: bindparam('a'),
})
stmt = stmt.on_conflict_do_update(
index_elements=[table.c['id']],
set_={'a': bindparam('a')},
)
list_of_dictionary = [{'id':1, 'a':1, }, {'id':2, 'a':2,}]
conn.execute(stmt, list_of_dictionary)
I basically want to insert a bulk of rows, and if one id is already taken, I want to update it with the value I initially wanted to insert.
However sqlalchemy throw me this error :
CompileError: bindparam() name 'a' is reserved for automatic usage in the VALUES or SET clause of this insert/update statement. Please use a name other than column name when using bindparam() with insert() or update() (for example, 'b_a').
While it is a known issue (see https://groups.google.com/forum/#!topic/sqlalchemy/VwiUlF1cz_o), I didn't found any proper answer that does not require to modify either the keys of list_of_dictionary or the name of your columns.
I want to know if there is a way of constructing stmt in a way to have a consistent behavior that does not depends on whether the keys of the variable list_of_dictionary are the name of the columns of the inserted table (my code works without error in those cases).
this does the trick for me:
from sqlalchemy import create_engine
from sqlalchemy import MetaData, Table
from sqlalchemy.dialects import postgresql
from sqlalchemy.inspection import inspect
def upsert(engine, schema, table_name, records=[]):
metadata = MetaData(schema=schema)
metadata.bind = engine
table = Table(table_name, metadata, schema=schema, autoload=True)
# get list of fields making up primary key
primary_keys = [key.name for key in inspect(table).primary_key]
# assemble base statement
stmt = postgresql.insert(table).values(records)
# define dict of non-primary keys for updating
update_dict = {
c.name: c
for c in stmt.excluded
if not c.primary_key
}
# cover case when all columns in table comprise a primary key
# in which case, upsert is identical to 'on conflict do nothing.
if update_dict == {}:
warnings.warn('no updateable columns found for table')
# we still wanna insert without errors
insert_ignore(table_name, records)
return None
# assemble new statement with 'on conflict do update' clause
update_stmt = stmt.on_conflict_do_update(
index_elements=primary_keys,
set_=update_dict,
)
# execute
with engine.connect() as conn:
result = conn.execute(update_stmt)
return result
For anyone looking for an ORM solution, the following worked for me:
def upsert(
sa_sessionmaker: Union[sessionmaker, scoped_session],
model: DeclarativeMeta,
get_values: Dict[str, Any],
update_values: Dict[str, Any],
) -> Any:
"""Upserts (updates if exists, else inserts) a SQLAlchemy model object.
Note that get_values must uniquely identify a single model object (row) for this
function to work.
Args:
sa_sessionmaker: SQLAlchemy sessionmaker to connect to the database.
model: Model declarative metadata.
get_values: Arguments used to try to retrieve an existing object.
update_values: Desired attributes for the object fetched via get_values,
or the new object if nothing was fetched.
Returns:
Model object subject to upsert.
"""
with sa_sessionmaker() as session:
instance = session.query(model).filter_by(**get_values).one_or_none()
if instance:
for attr, new_val in update_values.items():
setattr(instance, attr, new_val)
else:
create_kwargs = get_values | update_values
session.add(model(**create_kwargs))
session.commit()
instance = session.query(model).filter_by(**get_values).one_or_none()
return instance
A few remarks:
If the primary key of the object is known, using Session.merge() is likely a better alternative than the function above. In that sense, the function above assumes that the primary key is not known (and hence not part of get_values)
sa_sessionmaker is a factory for Session objects (see the docs)
model takes a SQLAlchemy declarative metadata (i.e., a "table" see the docs)
Python >= 3.9 required for the implementation above. If your environment requires a previous version of Python, replace create_kwargs = get_values | update_values with create_kwargs = {**get_values, **update_values}

SQLAlchemy not finding Postgres table connected with postgres_fdw

Please excuse any terminology typos, don't have a lot of experience with databases other than SQLite. I'm trying to replicate what I would do in SQLite where I could ATTACH a database to a second database and query across all the tables. I wasn't using SQLAlchemy with SQLite
I'm working with SQLAlchemy 1.0.13, Postgres 9.5 and Python 3.5.2 (using Anaconda) on Win7/54. I have connected two databases (on localhost) using postgres_fdw and imported a few of the tables from the secondary database. I can successfully manually query the connected table with SQL in PgAdminIII and from Python using psycopg2. With SQLAlchemy I've tried:
# Same connection string info that psycopg2 used
engine = create_engine(conn_str, echo=True)
class TestTable(Base):
__table__ = Table('test_table', Base.metadata,
autoload=True, autoload_with=engine)
# Added this when I got the error the first time
# test_id is a primary key in the secondary table
Column('test_id', Integer, primary_key=True)
and get the error:
sqlalchemy.exc.ArgumentError: Mapper Mapper|TestTable|test_table could not
assemble any primary key columns for mapped table 'test_table'
Then I tried:
insp = reflection.Inspector.from_engine(engine)
print(insp.get_table_names())
and the attached tables aren't listed (the tables from the primary database do show up). Is there a way to do what I am trying to accomplish?
In order to map a table SQLAlchemy needs there to be at least one column denoted as a primary key column. This does not mean that the column need actually be a primary key column in the eyes of the database, though it is a good idea. Depending on how you've imported the table from your foreign schema it may not have a representation of a primary key constraint, or any other constraints for that matter. You can work around this by either overriding the reflected primary key column in the Table instance (not in the mapped classes body), or better yet tell the mapper what columns comprise the candidate key:
engine = create_engine(conn_str, echo=True)
test_table = Table('test_table', Base.metadata,
autoload=True, autoload_with=engine)
class TestTable(Base):
__table__ = test_table
__mapper_args__ = {
'primary_key': (test_table.c.test_id, ) # candidate key columns
}
To inspect foreign table names use the PGInspector.get_foreign_table_names() method:
print(insp.get_foreign_table_names())
Building on sibling answer by #ilja.
When using the SQLAlchemy automap feature to automatically generate mapped classes and relationships from an existing database schema, I found that the __mapper_args__ solution didn't create the model.
This alternative method where you manually define the private key will correctly enable automap to create your model.
from sqlalchemy import Column, create_engine, Text
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.schema import Table
Base = automap_base()
engine = create_engine(conn_str, convert_unicode=True)
pk = Column('uid', Text, primary_key=True)
test_table = Table(
'test_table', Base.metadata, pk, autoload=True, autoload_with=engine
)
# Inspect postgres schema
Base.prepare(engine, reflect=True)
print(dict(Base.classes))
print(test_table)

Sqlalchemy if table does not exist

I wrote a module which is to create an empty database file
def create_database():
engine = create_engine("sqlite:///myexample.db", echo=True)
metadata = MetaData(engine)
metadata.create_all()
But in another function, I want to open myexample.db database, and create tables to it if it doesn't already have that table.
EG of the first, subsequent table I would create would be:
Table(Variable_TableName, metadata,
Column('Id', Integer, primary_key=True, nullable=False),
Column('Date', Date),
Column('Volume', Float))
(Since it is initially an empty database, it will have no tables in it, but subsequently, I can add more tables to it. Thats what i'm trying to say.)
Any suggestions?
I've managed to figure out what I intended to do. I used engine.dialect.has_table(engine, Variable_tableName) to check if the database has the table inside. IF it doesn't, then it will proceed to create a table in the database.
Sample code:
engine = create_engine("sqlite:///myexample.db") # Access the DB Engine
if not engine.dialect.has_table(engine, Variable_tableName): # If table don't exist, Create.
metadata = MetaData(engine)
# Create a table with the appropriate Columns
Table(Variable_tableName, metadata,
Column('Id', Integer, primary_key=True, nullable=False),
Column('Date', Date), Column('Country', String),
Column('Brand', String), Column('Price', Float),
# Implement the creation
metadata.create_all()
This seems to be giving me what i'm looking for.
Note that in 'Base.metadata' documentation it states about create_all:
Conditional by default, will not attempt to recreate tables already
present in the target database.
And if you can see that create_all takes these arguments: create_all(self, bind=None, tables=None, checkfirst=True), and according to documentation:
Defaults to True, don't issue CREATEs for tables already present in
the target database.
So if I understand your question correctly, you can just skip the condition.
The accepted answer prints a warning that engine.dialect.has_table() is only for internal use and not part of the public API. The message suggests this as an alternative, which works for me:
import os
import sqlalchemy
# Set up a connection to a SQLite3 DB
test_db = os.getcwd() + "/test.sqlite"
db_connection_string = "sqlite:///" + test_db
engine = create_engine(db_connection_string)
# The recommended way to check for existence
sqlalchemy.inspect(engine).has_table("BOOKS")
See also the SQL Alchemy docs.
For those who define the table first in some models.table file, among other tables.
This is a code snippet for finding the class that represents the table we want to create ( so later we can use the same code to just query it )
But together with the if written above, I still run the code with checkfirst=True
ORMTable.__table__.create(bind=engine, checkfirst=True)
models.table
class TableA(Base):
class TableB(Base):
class NewTableC(Base):
id = Column('id', Text)
name = Column('name', Text)
form
Then in the form action file:
engine = create_engine("sqlite:///myexample.db")
if not engine.dialect.has_table(engine, table_name):
# Added to models.tables the new table I needed ( format Table as written above )
table_models = importlib.import_module('models.tables')
# Grab the class that represents the new table
# table_name = 'NewTableC'
ORMTable = getattr(table_models, table_name)
# checkfirst=True to make sure it doesn't exists
ORMTable.__table__.create(bind=engine, checkfirst=True)
engine.dialect.has_table does not work for me on cx_oracle.
I am getting AttributeError: 'OracleDialect_cx_oracle' object has no attribute 'default_schema_name'
I wrote a workaround function:
from sqlalchemy.engine.base import Engine
def orcl_tab_or_view_exists(in_engine: Engine, in_object: str, in_object_name: str,)-> bool:
"""Checks if Oracle table exists in current in_engine connection
in_object: 'table' | 'view'
in_object_name: table_name | view_name
"""
obj_query = """SELECT {o}_name FROM all_{o}s WHERE owner = SYS_CONTEXT ('userenv', 'current_schema') AND {o}_name = '{on}'
""".format(o=in_object, on=in_object_name.upper())
with in_engine.connect() as connection:
result = connection.execute(obj_query)
return len(list(result)) > 0
This is the code working for me to create all tables of all model classes defined with Base class
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
class YourTable(Base):
__tablename__ = 'your_table'
id = Column(Integer, primary_key = True)
DB_URL="mysql+mysqldb://<user>:<password>#<host>:<port>/<db_name>"
scoped_engine = create_engine(DB_URL)
Base = declarative_base()
Base.metadata.create_all(scoped_engine)

Categories

Resources