I am trying to update a boolean value within a database using Python and SQLAlchemy. Here is my code:
def update_record_to_hide_or_show(e2e_id, hide_error, event_time, study_id):
connection_string = _get_connection_string()
db = create_engine(connection_string)
roi_e2e_events = define_roi_e2e_events_table()
with db.connect() as conn:
if hide_error == "True":
update = roi_e2e_events.update().values(hide_error=True).where(roi_e2e_events.c.e2e_id == e2e_id)\
.where(roi_e2e_events.c.event_time == event_time)\
.where(roi_e2e_events.c.study_id == study_id)
print(update)
result = conn.execute(update)
else:
update = roi_e2e_events.update().values(hide_error=False).where(roi_e2e_events.c.e2e_id == e2e_id) \
.where(roi_e2e_events.c.event_time == event_time). \
where(roi_e2e_events.c.study_id == study_id)
result = conn.execute(update)
return result
I am able to enter the first portion of the conditional without problem, there is not an execution error displayed as I attempt to submit the query to the database, I've created the Metadata in a separate function and the update query looks like this:
UPDATE roi_e2e_events SET hide_error=:hide_error WHERE roi_e2e_events.e2e_id = :e2e_id_1 AND roi_e2e_events.event_time = :event_time_1 AND roi_e2e_events.study_id = :study_id_1
I don't see that the boolean value is changed to "True" after running this, what am I doing wrong here?
Without an example of the table + schema or what the arguments for this function look like (particularly hide_error), it's a little hard to be sure, but it looks like therecould be an issue on the hide_error == "True" line, since it's checking if that hide_error is the string "True", rather than the boolean True.
If it is in fact a boolean, we can actually get around the whole issue of checking what value it is by just using the not operator. Something like this:
def update_record_to_hide_or_show(e2e_id, hide_error, event_time, study_id):
connection_string = _get_connection_string()
db = create_engine(connection_string)
roi_e2e_events = define_roi_e2e_events_table()
with db.connect() as conn:
# no if statement here
update_query = roi_e2e_events.update().values(
hide_error=not hide_error # but notice the `not` here
).where(
roi_e2e_events.c.e2e_id == e2e_id
).where(
roi_e2e_events.c.event_time == event_time
).where(
roi_e2e_events.c.study_id == study_id
)
result = conn.execute(update)
return result
Also, if hide_error is being retrieved from the database, you can bundle it all up into a single UPDATE query like this
from sqlalchemy import not_ # this is used to invert the value of a boolean column in a query - on the database, rather than a value we have stored locally in a variable
def update_record_to_hide_or_show(e2e_id, event_time, study_id): # notice that `hide_error` is gone
connection_string = _get_connection_string()
db = create_engine(connection_string)
roi_e2e_events = define_roi_e2e_events_table()
with db.connect() as conn:
# still no if statement here
update_query = roi_e2e_events.update().values(
hide_error=not_(roi_e2e_events.c.hide_error) # here we use column, rather than its value, much like the `where` clauses
).where(
roi_e2e_events.c.e2e_id == e2e_id
).where(
roi_e2e_events.c.event_time == event_time
).where(
roi_e2e_events.c.study_id == study_id
)
result = conn.execute(update)
return result
where the update_query should look something like this:
UPDATE roi_e2e_events
SET hide_error=NOT roi_e2e_events.hide_error
WHERE roi_e2e_events.e2e_id = :e2e_id_1
AND roi_e2e_events.event_time = :event_time_1
AND roi_e2e_events.study_id = :study_id_1
Related
This question already has answers here:
How to use variables in SQL statement in Python?
(5 answers)
Closed 8 months ago.
If coding in python: lshank_quat_x = None will generate "None" directly in SQL insert statement, which causes an error. Is there another way to fix this problem except of making value = "NULL"? (value should not be a string).
Here's the code:
lines = []
# lshank
for i in range(len(sig.IMU.time)):
# lshank
lshank_accel_x = sig.IMU.Lshank.a[i][0]
lshank_accel_y = sig.IMU.Lshank.a[i][1]
lshank_accel_z = sig.IMU.Lshank.a[i][2]
lshank_gyro_x = sig.IMU.Lshank.w[i][0]
lshank_gyro_y = sig.IMU.Lshank.w[i][1]
lshank_gyro_z = sig.IMU.Lshank.w[i][2]
lshank_mag_x = sig.IMU.Lshank.m[i][0]
lshank_mag_y = sig.IMU.Lshank.m[i][1]
lshank_mag_z = sig.IMU.Lshank.m[i][2]
lshank_quat_x = "NULL"
lshank_quat_y = "NULL"
lshank_quat_z = "NULL"
# rshank
rshank_accel_x = sig.IMU.Rshank.a[i][0]
rshank_accel_y = sig.IMU.Rshank.a[i][1]
rshank_accel_z = sig.IMU.Rshank.a[i][2]
rshank_gyro_x = sig.IMU.Rshank.w[i][0]
rshank_gyro_y = sig.IMU.Rshank.w[i][1]
rshank_gyro_z = sig.IMU.Rshank.w[i][2]
rshank_mag_x = sig.IMU.Rshank.m[i][0]
rshank_mag_y = sig.IMU.Rshank.m[i][1]
rshank_mag_z = sig.IMU.Rshank.m[i][2]
rshank_quat_x = "NULL"
rshank_quat_y = "NULL"
rshank_quat_z = "NULL"
# sacrum
sacrum_accel_x = sig.IMU.Sacrum.a[i][0]
sacrum_accel_y = sig.IMU.Sacrum.a[i][1]
sacrum_accel_z = sig.IMU.Sacrum.a[i][2]
sacrum_gyro_x = sig.IMU.Sacrum.w[i][0]
sacrum_gyro_y = sig.IMU.Sacrum.w[i][1]
sacrum_gyro_z = sig.IMU.Sacrum.w[i][2]
sacrum_mag_x = sig.IMU.Sacrum.m[i][0]
sacrum_mag_y = sig.IMU.Sacrum.m[i][1]
sacrum_mag_z = sig.IMU.Sacrum.m[i][2]
sacrum_quat_x = "NULL"
sacrum_quat_y = "NULL"
sacrum_quat_z = "NULL"
# ground force
grf_x = sig.force[i][0]
grf_y = sig.force[i][1]
grf_z = sig.force[i][2]
insert_query = f"INSERT INTO {tableName} ({', '.join(field for field in var_list)}) VALUES ({sig.IMU.Lshank.time[i]}, {lshank_accel_x}, {lshank_accel_y}, {lshank_accel_z}, {lshank_gyro_x}, {lshank_gyro_y}, {lshank_gyro_z}, {lshank_mag_x}, {lshank_mag_y}, {lshank_mag_z},{lshank_quat_x},{lshank_quat_y},{lshank_quat_z},{sig.IMU.Rshank.time[i]},{rshank_accel_x}, {rshank_accel_y}, {rshank_accel_z},{rshank_gyro_x},{rshank_gyro_y},{rshank_gyro_z},{rshank_mag_x},{rshank_mag_y},{rshank_mag_z},{rshank_quat_x},{rshank_quat_y},{rshank_quat_z},{sig.IMU.Sacrum.time[i]},{sacrum_accel_x}, {sacrum_accel_y}, {sacrum_accel_z},{sacrum_gyro_x},{sacrum_gyro_y},{sacrum_gyro_z},{sacrum_mag_x},{sacrum_mag_y},{sacrum_mag_z},{sacrum_quat_x},{sacrum_quat_y},{sacrum_quat_z},{sig.time[i]},{grf_x},{grf_y},{grf_z});\n"
lines.append(insert_query)
When writing SQL statements in Python, you should typically not use string formatting, but use the features of the language for variable substition. Not only does this solve problems like the one you're asking about, but it also prevents unsafe 'SQL injection' problems.
You didn't include in your example how you plan to execute that SQL query, but you say you're using MySQL. An Example:
conn = mysql.connector.connect(user='my_user', database='my_database')
cursor = conn.cursor()
fields = ', '.join('%s' for _ in len(var_list))
cursor.query(f'INSERT INTO %s ({fields}) VALUES ({fields})', (*var_list, *value_list))
This assumes var_list has the names of the columns you're after and value_list has the values, in a list of the same length.
If you provide more details on exactly what you're trying to pass in, the example might change - but the key thing is this, for MySQL, %s will be replaced with a value from the tuple passed along with the query. That's why the example generates a string with that number of %s.
For your issue, if you pass None, the .query() method will replace that with the appropriate value, in the case of MySQL NULL.
I am implementing unit test on one of the classes of my project. The method that I want to test is queryCfsNoteVariations:
class PdfRaportDaoImpl:
def queryCfsNoteVariations(self, reportId):
sql = """
select v.* from item_value_table v
where v.table_id in
(select table_id from table_table t
where t.report_id=%s and table_name='CFS')
"""
cfsItemList = dbFind(sql, (reportId))
sql = "select * from variations_cfs_note"
cfsNoteVariations = dbFind(sql)
if cfsNoteVariations == None or len(cfsNoteVariations) == 0:
raise Exception("cfs note variations is null!")
cfsNoteVariationList = []
for itemInfo in cfsItemList:
for cfsNoteVariation in cfsNoteVariations:
if (
cfsNoteVariation["item_name_cfs"].lower()
== itemInfo["item_name"].lower()
):
cfsNoteVariationList.append(cfsNoteVariation["item_name_cfs_note"])
if len(cfsNoteVariationList) > 0:
return cfsNoteVariationList, itemInfo["item_note"]
return None, None
Which has a path: /com/pdfgather/PDFReportDao.py
In my test I am doing patch on dbFind() method which is located in /com/pdfgather/GlobalHelper.py. My current test looks like this:
from com.pdfgather.PDFReportDao import PdfReportDaoImpl
#patch("com.pdfgather.GlobalHelper.dbFind")
def test_query_cfs_note_variations(self, mock_find):
mock_find.side_effect = iter([
[{"item_name" : "foo"}, {"item_name" : "hey"}],
[{"item_name_cfs": "foo"},
{"item_name_cfs": "foo"},
{"item_name_cfs": "hey"}]]
])
report_id = 3578
result = TestingDao.dao.queryCfsNoteVariations(report_id)
# Printing result
print(result)
However I am not getting my desired result which is getting inside a loop and returning from inside a loop. Instead the dbFind is returning nothing (but it shouldn't as I already preassigned returning values for dbFind).
Thanks in advance!
Python refers com.pdfgather.PDFReportDao.dbFind and com.pdfgather.GlobalHelper.dbFind as two different classes. The second one is the import you want to patch. Try changing your patch to:
#patch("com.pdfgather.PDFReportDao.dbFind")
Looking for a second set of eyes here. I cannot figure out why the following loop will not continue past the first iteration.
The 'servicestocheck' sqlalchemy query returns 45 rows in my test, but I cannot iterate through the results like I'm expecting... and no errors are returned. All of the functionality works on the first iteration.
Anyone have any ideas?
def serviceAssociation(current_contact_id,perm_contact_id):
servicestocheck = oracleDB.query(PORTAL_CONTACT).filter(
PORTAL_CONTACT.contact_id == current_contact_id
).order_by(PORTAL_CONTACT.serviceID).count()
print(servicestocheck) # returns 45 items
servicestocheck = oracleDB.query(PORTAL_CONTACT).filter(
PORTAL_CONTACT.contact_id = current_contact_id
).order_by(PORTAL_CONTACT.serviceID).all()
for svc in servicestocheck:
#
# Check to see if already exists
#
check_existing_association = mysqlDB.query(
CONTACTTOSERVICE).filter(CONTACTTOSERVICE.contact_id ==
perm_contact_id,CONTACTTOSERVICE.serviceID ==
svc.serviceID).first()
#
# If no existing association
#
if check_existing_association is None:
print ("Prepare Association")
assoc_contact_id = perm_contact_id
assoc_serviceID = svc.serviceID
assoc_role_billing = False
assoc_role_technical = False
assoc_role_commercial = False
if svc.contact_type == 'Billing':
assoc_role_billing = True
if svc.contact_type == 'Technical':
assoc_role_technical = True
if svc.contact_type == 'Commercial':
assoc_role_commercial = True
try:
newAssociation = CONTACTTOSERVICE(
assoc_contact_id, assoc_serviceID,
assoc_role_billing,assoc_role_technical,
assoc_role_commercial)
mysqlDB.add(newAssociation)
mysqlDB.commit()
mysqlDB.flush()
except Exception as e:
print(e)
This function is called from a script, and it is called from within another loop. I can't find any issues with nested loops.
Ended up being an issue with SQLAlchemy ORM (see SqlAlchemy not returning all rows when querying table object, but returns all rows when I query table object column)
I think the issue is due to one of my tables above does not have a primary key in real life, and adding a fake one did not help. (I don't have access to the DB to add a key)
Rather than fight it further... I went ahead and wrote raw SQL to move my project along.
This did the trick:
query = 'SELECT * FROM PORTAL_CONTACT WHERE contact_id = ' + str(current_contact_id) + 'ORDER BY contact_id ASC'
servicestocheck = oracleDB.execute(query)
I have the following code which I would like to do an upsert:
def add_electricity_reading(
*, period_usage, period_started_at, is_estimated, customer_pk
):
from sqlalchemy.dialects.postgresql import insert
values = dict(
customer_pk=customer_pk,
period_usage=period_usage,
period_started_at=period_started_at,
is_estimated=is_estimated,
)
insert_stmt = insert(ElectricityMeterReading).values(**values)
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint=ElectricityMeterReading.__table_args__[0].name,
set_=dict(
period_usage=period_usage,
period_started_at=period_started_at,
is_estimated=is_estimated,
)
)
conn = DBSession.connection()
conn.execute(do_update_stmt)
return DBSession.query(ElectricityMeterReading).filter_by(**dict(
period_usage=period_usage,
period_started_at=period_started_at,
customer_pk=customer_pk,
is_estimated=is_estimated,
)).one()
def test_updates_existing_record_for_started_at_if_already_exists():
started_at = datetime.now(timezone.utc)
existing = add_electricity_reading(
period_usage=0.102,
customer_pk=customer.pk,
period_started_at=started_at,
is_estimated=True,
)
started_at = existing.period_started_at
reading = add_electricity_reading(
period_usage=0.200,
customer_pk=customer.pk,
period_started_at=started_at,
is_estimated=True,
)
# existing record was updated
assert reading.period_usage == 0.200
assert reading.id == existing.id
In my test when I add an existing record with period_usage=0.102 and then execute the query again but change to period_usage=0.2. When the final query at the bottom returns the record the period_usage is still 0.102.
Any idea why this could be happening?
This behaviour is explained in "Session Basics" under "What does the Session do?" The session holds references to objects it has loaded in a structure called the identity map, and so ensures that only 1 unique object per primary key value exists at a time during a session's lifetime. You can verify this with the following assertion in your own code:
assert existing is reading
The Core insert (or update) statements you are executing do not keep the session in sync with the changes taking place in the database the way for example Query.update() does. In order to fetch the new values you can expire the ORM loaded state of the unique object:
DBSession.expire(existing) # or reading, does not matter
# existing record was updated
assert reading.period_usage == 0.200
assert reading.id == existing.id
I am having an empty collection and have thousands of entries to process (entries might have redudancy for which I want to use both updates and inserts).
The python code (using pymongo) I wrote:
for mydoc in alldocs:
key = {'myid': mydoc['myid']}
data = process_doc(mydoc) # returns simple dictionary
db.mydocs.update(key, {"$set": data}, upsert = True)
The following code is unable to perform any insert operations. The collection still remains empty. But when I remove $set and use simply data, it works fine. Can't I use $set in upsert? The reason why I want $set was so that pre-existing fields for a BSON doesn't get affected. Can someone please guide. I really can't figure out what to do.
Reproducable code:
from pymongo import Connection
DB_CONTENT_BASE_KEY = 'contentbase'
def connect_to_db(dbname, hostname = 'localhost', portno = 27017, **kwargs):
connection = Connection(hostname, portno)
dbConnection = connection[dbname]
return dbConnection
class MetawebCustomCollectionBuilder(object):
# key ought to be a dictionary to filter results from contentbase.
def __init__(self, inDbConfig, outDbConfig, key = {}, verbose = False):
self.verbose = verbose
self.inDbConfig = inDbConfig
self.inDb = connect_to_db(**inDbConfig)
self.outDbConfig = outDbConfig
self.outDb = connect_to_db(**outDbConfig)
self.inDbContentBase = self.inDb[self.inDbConfig[DB_CONTENT_BASE_KEY]]
self.outDbContentBase = self.outDb[self.outDbConfig[DB_CONTENT_BASE_KEY]]
self.key = key
self.in_db_collection_constraints()
self.out_db_collection_constraints()
def in_db_collection_constraints(self):
self.inDbContentBase.ensure_index('mid')
if self.verbose: print("Assured index on mid for inDbContentBase...")
def out_db_collection_constraints(self):
self.outDbContentBase.ensure_index('mid')
if self.verbose: print("Assured index on mid for outDbContentBase...")
def process_in_record(self, inRecord):
outRecord = inRecord # [YET TO] continue from here...
return outRecord
def transit_collection(self):
for record in self.inDbContentBase.find(self.key):
outRecord = self.process_in_record(record)
key = {'mid':outRecord['mid']}
data = outRecord
print key
self.outDbContentBase.update(key, {"$set": data}, True)
if self.verbose: print 'Done with transiting collection from in DB to out DB'
def cleanup_out_collection(self):
pass
def in_db_sandbox(self):
# To have tests and analytics placed in here corresponding to inDb.
pass
if __name__ == '__main__':
inDbConfig = {'dbname':'metaweb', 'contentbase': 'content'}
outDbConfig = {'dbname': 'similarkind', 'contentbase': 'content'}
mccb = MetawebCustomCollectionBuilder(inDbConfig, outDbConfig, verbose = True)
mccb.transit_collection()
There must be a prexisting database inDb. From this collection I want to create a new modified collection.
Your claim is wrong
>>> import pymongo
>>> c = pymongo.Connection()
>>> db = c.mydb
>>> db.mydocs.find().count()
0
>>> db.mydocs.update({'myid': '438'}, {"$set": {'keyA':'valueA'}}, upsert = True)
>>> db.mydocs.find().count()
1
>>> db.mydocs.find_one()
{u'myid': u'438', u'keyA': u'valueA', u'_id': ObjectId('504c2fd1a694cc9624bbd6a2')}