sqalchemy update bindparam primary key - python

The following code throws "sqlalchemy.exc.CompileError: Unconsumed column names: _id".
User = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('score', Integer)
)
values = [
{'score': 2, '_id': 1},
{'score': 3, '_id': 3}
]
query = User.update().where(User.c.id == bindparam('_id')).values(score=bindparam('score'))
await db.execute_many(query, values)
db is an instance of databases.Database. Notice that I have to the name '_id' because SQLalchemy says 'id' is reserved.
Is there any solution other than updating each row individullay?

Database.execute_many() calls Connection.execute_many() which breaks your query up into separate individual queries (one per element in values), here's the method (source):
async def execute_many(
self, query: typing.Union[ClauseElement, str], values: list
) -> None:
queries = [self._build_query(query, values_set) for values_set in values]
async with self._query_lock:
await self._connection.execute_many(queries)
Note that it calls the _build_query() method (source):
#staticmethod
def _build_query(
query: typing.Union[ClauseElement, str], values: dict = None
) -> ClauseElement:
if isinstance(query, str):
query = text(query)
return query.bindparams(**values) if values is not None else query
elif values:
return query.values(**values)
return query
As you aren't passing a str query and you are passing values, control enters the elif values: condition handling where the individual dict of values is unpacked into the .values() method on your query (which is Update.values()). That essentially makes the query it's trying to compile this:
query = (
User.update()
.where(User.c.id == bindparam("_id"))
.values(score=bindparam("score"))
.values(score=2, _id=1)
)
That second values clause results in a new Update with new bind params that are trying to set values for both score and _id. This causes compilation of the query to fail as there is no _id column on the table.
So the MCVE to reproduce the error is really this:
from sqlalchemy.dialects import postgresql
User.update().values(score=2, _id=1).compile(dialect=postgresql.dialect())
Which raises:
Traceback (most recent call last):
File ".\main.py", line 31, in <module>
User.update().values(score=2, _id=1).compile(dialect=postgresql.dialect())
File "<string>", line 1, in <lambda>
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\elements.py", line 462, in compile
return self._compiler(dialect, bind=bind, **kw)
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\elements.py", line 468, in _compiler
return dialect.statement_compiler(dialect, self, **kw)
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\compiler.py", line 571, in __init__
Compiled.__init__(self, dialect, statement, **kwargs)
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\compiler.py", line 319, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\visitors.py", line 92, in _compiler_dispatch
return meth(self, **kw)
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\compiler.py", line 2569, in visit_update
self, update_stmt, crud.ISUPDATE, **kw
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\crud.py", line 62, in _setup_crud_params
return _get_crud_params(compiler, stmt, **kw)
File "C:\Users\peter\Documents\git\stackoverflow\58668615-sqalchemy-update-bindparam-primary-key\.venv\lib\site-packages\sqlalchemy\sql\crud.py", line 177, in _get_crud_params
% (", ".join("%s" % c for c in check))
sqlalchemy.exc.CompileError: Unconsumed column names: _id
To summarise the issue, you build a query with bind params passed to both Update.where() and Update.values(). You then pass that query and your values to Database.execute_many() where they unpack the individual elements of your values list into a second call of Update.values() on your query which replaces your query with one that tries to set a value for an _id column which doesn't exist.
Is there any solution other than updating each row individullay?
Well the query works just fine when using sqlalchemy engine as well as query:
# using a sqlalchemy engine
engine.execute(query, values)
Otherwise, what should work is sending the query in as a string to Database.execute_many() as that will mean the query gets handled in the if isinstance(query, str): part of the _build_query() method which will avoid the second .values() call being made on the query:
db.execute_many(str(query), values)

Related

sqlalchemy core with postgresql in python, conneting.execute(..) error

I am learning sqlalchemy core with postgresql database in python.
I tried to run the following script and got this error message:
from sqlalchemy import create_engine
from sqlalchemy import Table, MetaData, String
engine = create_engine('postgresql://postgres:123456#localhost:5432/red30')
with engine.connect() as connection:
meta = MetaData(engine)
sales_table = Table('sales', meta)
# Create
insert_statement = sales_table.insert().values(order_num=1105911,
order_type='Retail',
cust_name='Syman Mapstone',
prod_number='EB521',
prod_name='Understanding Artificial Intelligence',
quantity=3,
price=19.5,
discount=0,
order_total=58.5)
connection.execute(insert_statement)
# Read
select_statement = sales_table.select().limit(10)
result_set = connection.execute(select_statement)
for r in result_set:
print(r)
# Update
update_statement = sales_table.update().where(sales_table.c.order_num==1105910).values(quantity=2, order_total=39)
connection.execute(update_statement)
# Confirm Update: Read
reselect_statement = sales_table.select().where(sales_table.c.order_num==1105910)
updated_set = connection.execute(reselect_statement)
for u in updated_set:
print(u)
# Delete
delete_statement = sales_table.delete().where(sales_table.c.order_num==1105910)
connection.execute(delete_statement)
# Confirm Delete: Read
not_found_set = connection.execute(reselect_statement)
print(not_found_set.rowcount)
error message:
(postgres-prac) E:\xfile\postgresql\postgres-prac>python postgres-sqlalchemy-core.py
Traceback (most recent call last):
File "postgres-sqlalchemy-core.py", line 20, in <module>
connection.execute(insert_statement)
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\engine\ba
se.py", line 1414, in execute
return meth(
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\eleme
nts.py", line 485, in _execute_on_connection
return connection._execute_clauseelement(
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\engine\ba
se.py", line 1630, in _execute_clauseelement
compiled_sql, extracted_params, cache_hit = elem._compile_w_cache(
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\eleme
nts.py", line 651, in _compile_w_cache
compiled_sql = self._compiler(
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\eleme
nts.py", line 290, in _compiler
return dialect.statement_compiler(dialect, self, **kw)
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\compi
ler.py", line 1269, in __init__
Compiled.__init__(self, dialect, statement, **kwargs)
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\compi
ler.py", line 710, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\compi
ler.py", line 755, in process
return obj._compiler_dispatch(self, **kwargs)
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\visit
ors.py", line 143, in _compiler_dispatch
return meth(self, **kw) # type: ignore # noqa: E501
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\compi
ler.py", line 5317, in visit_insert
crud_params_struct = crud._get_crud_params(
File "E:\xfile\postgresql\postgres-prac\lib\site-packages\sqlalchemy\sql\crud.
py", line 326, in _get_crud_params
raise exc.CompileError(
sqlalchemy.exc.CompileError: Unconsumed column names: order_type, quantity, cust
_name, discount, prod_number, price, order_total, order_num, prod_name
You define your table as an empty table:
sales_table = Table('sales', meta)
So when trying to insert a record with all those keywords, they cannot be mapped to columns and do not get consumed, hence the Unconsumed column names error.
You need to define the table columns in your Table creation. See the following example from the docs:
from sqlalchemy import Table, Column, Integer, String
user = Table(
"user",
metadata_obj,
Column("user_id", Integer, primary_key=True),
Column("user_name", String(16), nullable=False),
Column("email_address", String(60)),
Column("nickname", String(50), nullable=False),
)

How to get column names from custom TextClause query in SQL Alchemy?

I have an app that typically takes a SQL Alchemy selectable as an input and uses reflection.Inspector.from_engine(engine).get_columns(selectable.name, schema=selectable.schema) to get the columns for additional downstream logic.
This app also allows the user to pass a custom TextClause query as an input. Is it possible to reverse engineer the column names from the TextClause as is done with the selectable object?
What have I tried?
>>> import sqlalchemy as sa
>>> query = "SELECT col_1, col_2 FROM table"
>>> selectable = sa.text(query)
# The steps above cannot be altered
>>> type(selectable)
<class 'sqlalchemy.sql.elements.TextClause'>
>>> connection_string = "postgresql+psycopg2://<user>:<password>#localhost:5432/<db>"
>>> engine = sa.create_engine(connection_string)
>>> columns = sa.engine.reflection.Inspector.from_engine(engine).get_columns(selectable, schema=None)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/user/opt/anaconda3/envs/env/lib/python3.7/site-packages/sqlalchemy/engine/reflection.py", line 498, in get_columns
conn, table_name, schema, info_cache=self.info_cache, **kw
File "<string>", line 2, in get_columns
File "/Users/user/opt/anaconda3/envs/env/lib/python3.7/site-packages/sqlalchemy/engine/reflection.py", line 55, in cache
ret = fn(self, con, *args, **kw)
File "/Users/user/opt/anaconda3/envs/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 3578, in get_columns
connection, table_name, schema, info_cache=kw.get("info_cache")
File "<string>", line 2, in get_table_oid
File "/Users/user/opt/anaconda3/envs/env/lib/python3.7/site-packages/sqlalchemy/engine/reflection.py", line 55, in cache
ret = fn(self, con, *args, **kw)
File "/Users/user/opt/anaconda3/envs/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 3457, in get_table_oid
raise exc.NoSuchTableError(table_name)
sqlalchemy.exc.NoSuchTableError: SELECT col_1, col_2 FROM table

KeyError while executing SQL with parameters

I am getting KeyError while running below code. I am trying to pass parameters using separate parameters variable.
Code:
import teradata
host,username,password = 'hostname','uname', 'pwd'
udaExec = teradata.UdaExec (appName="APtest", version="1.0", logConsole=False)
connect = udaExec.connect(method="odbc",system=host, username=username, password=password, dsn="dsnname")
val1='NULL'
val2='NULL'
parameters={'param1':val1, 'param2': val2}
qry="""
SELECT number
FROM table
WHERE number = %(param1)s
AND col=%(param2)s
"""
connect.execute(qry, parameters)
Error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/lib/python2.7/site-packages/teradata/udaexec.py", line 675, in execute
self.internalCursor.execute(query, params, **kwargs)
File "/tmp/lib/python2.7/site-packages/teradata/udaexec.py", line 745, in execute
self._execute(self.cursor.execute, query, params, **kwargs)
File "/tmp/lib/python2.7/site-packages/teradata/udaexec.py", line 787, in _execute
logParamCharLimit)
File "/tmp/lib/python2.7/site-packages/teradata/udaexec.py", line 875, in _getParamsString
if isinstance(params[0], (list, tuple)):
KeyError: 0
If i write the query in below manner then it works but i have very long list of parameters therefore need it in separate parameter variable.
This works:
qry="""
SELECT number
FROM table
WHERE number = '%s'
AND col='%s'
""" % (val1, val2)
Apparently, teradata does not support dictionaries for parameters. Use a list instead.
parameters = [val1, val2]
qry="""
SELECT number
FROM table
WHERE number = %s
AND col=%s
"""
connect.execute(qry, parameters)

Pythons Bottle with SQLITE3

I have been trying to practise Bottle Py. There is a tutorial about making an APP: TODO.
It works fine. But If task id exceeds 1 character that means 10 instead of 1,2,3,4,5,6,7,8,9
It shows error like below.
ProgrammingError('Incorrect number of bindings supplied. The current
statement uses 1, and there are 2 supplied.',)
Code is:
#route('/edit/<no:int>', method='GET')
def edit_item(no):
if request.GET.save:
edit = request.GET.task.strip()
status = request.GET.status.strip()
if status == 'open':
status = 1
else:
status = 0
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("UPDATE todo SET task = ?, status = ? WHERE id LIKE ?", (edit, status, no))
conn.commit()
return '<p>The item number %s was successfully updated</p>' % no
else:
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT task FROM todo WHERE id LIKE ?", (str(no)))
cur_data = c.fetchone()
return template('edit_task', old=cur_data, no=no)
Tracebacks:
1.
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/bottle.py", line 862, in _handle
return route.call(**args)
File "/usr/lib/python2.7/dist-packages/bottle.py", line 1737, in wrapper
rv = callback(*a, **ka)
File "todo.py", line 67, in edit_item
c.execute('SELECT task FROM todo WHERE id LIKE ?', no)
ValueError: parameters are of unsupported type
2.
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/bottle.py", line 862, in _handle
return route.call(**args)
File "/usr/lib/python2.7/dist-packages/bottle.py", line 1737, in wrapper
rv = callback(*a, **ka)
File "todo.py", line 67, in edit_item
c.execute('SELECT task FROM todo WHERE id LIKE ?', (no))
ValueError: parameters are of unsupported type
What to do?
This might happen because the execute function will unpack your second parameter when you do (str(no)) the outer () will not convert your tuple, you need to do (str(no),) if you have only one element in the tuple.
For instance, since it recognized as string, it will unpack "10" it into ("1", "0")

Getting error from copying row from table to another table

I am writing a piece of python code to copy one table from one mysql db to to another mysql db.
I came across some problems like first it was reading null, empty values as 'None' which I had to convert to 'NULL'.
Now it showing following error -
pymysql.err.InternalError: (1630, u"FUNCTION datetime.datetime does not exist.
Check the 'Function Name Parsing and Resolution' section in the Reference Manual")
When I print the row I can see the entry of datetime.datetime(2014, 8, 25, 8, 24, 51).
I tried solve this problem by replacing datetime.datetime by datetime.time (http://pymotw.com/2/datetime/) But that also failed.
My code is as follows :
import re
from db import conn_main ### database from where to copy
from db import conn ### database where to copy
import datetime
curr1 = conn_main.cursor()
curr2 = conn.cursor()
query = 'SELECT * FROM mytable limit 10'
curr1.execute(query)
for row in curr1:
if row[0] is None or not row[0]:
print "error: empty row",row[0]
continue
else:
print "ROW - %s\n" % str(row)
row = re.sub('None','NULL',str(row))
query = 'replace into mytable values ' + str(row)
curr2.execute(query)
curr2.commit()
curr1.close()
curr2.close()
Traceback & output row:
ROW - (1, '501733938','xyz.emails#gmail.com', None, 'https://www.facebook.com/xyz',
None, None, None, None, None, '2014-08-10T06:06:33+0000', None, 'xyz', None,
datetime.datetime(2014, 8, 25, 8, 24, 51), None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None)
Traceback (most recent call last):
File "MY_PYTHON_CODE_FILE_PATH", line 390, in <module> curr2.execute(query)
File "/usr/local/lib/python2.7/dist-packages/pymysql/cursors.py", line 132, in execute result = self._query(query)
File "/usr/local/lib/python2.7/dist-packages/pymysql/cursors.py", line 271, in _query conn.query(q)
File "/usr/local/lib/python2.7/dist-packages/pymysql/connections.py", line 726, in query self._affected_rows = self._read_query_result(unbuffered=unbuffered)
File "/usr/local/lib/python2.7/dist-packages/pymysql/connections.py", line 861, in _read_query_result result.read()
File "/usr/local/lib/python2.7/dist-packages/pymysql/connections.py", line 1064, in read first_packet = self.connection._read_packet()
File "/usr/local/lib/python2.7/dist-packages/pymysql/connections.py", line 826, in _read_packet packet.check_error()
File "/usr/local/lib/python2.7/dist-packages/pymysql/connections.py", line 370, in check_error raise_mysql_exception(self._data)
File "/usr/local/lib/python2.7/dist-packages/pymysql/err.py", line 116, in raise_mysql_exception _check_mysql_exception(errinfo)
File "/usr/local/lib/python2.7/dist-packages/pymysql/err.py", line 112, in _check_mysql_exception raise InternalError(errno, errorvalue)
Can someone help remove this error ... or suggest any other better way to copy table from one database to another in python.
According to mysql manual up to v 5.7 there is neither function DATETIME (even if there is a datetime TYPE) nor package support (datetime.*). I assume it's the python str that generates datetime.datetime from the binary representation of the datetime in your source database
The problem is: you're trying to use python string representation of some_object as SQL. That's wrong.
str(datetime) will look like
datetime.datetime(year, month, day, hour, minute, seconds, milliseconds)
it's not valid sql string.
If you know what this column number is, you can replace it with string value, like this:
row[dt_index] = row[dt_index].isoformat()
or with concrete format your database accepts, e.g:
row[dt_index] = row[dt_index].strftime('%Y-%m-%d %H:%M:%S')
But I suggest use some libraries or parameterised queries.
Building SQL such a way is very bad and unsafe solution.

Categories

Resources