problem: Im trying to extract values out of a mysql search to use later within the code.
I have setup a mysql function (connector_mysql) that i use to connect/run the mysql commands.
The problem i'm having is returning these values back out of the mysql function to the rest of the code.
There are two scenarios that i've tried which i think should work but dont when run.
full code sample below...
1. In the mysql function result acts like a dictionary if - using just..
result = cursor.fetchone(variable1, variable2, variable3)
return result
AND in the main function
result1 = connector_mysql(subSerialNum, ldev, today_date)
print(result1)
This appears to work and when printing result1 i get a dictionary looking output:
{'ldev_cap': '0938376656', 'ldev_usedcap': '90937763873'}
HOWEVER...
I cant then use dictionary methods to get or separate the values out.. eg
used_capacity = result1['ldev_cap']
which i would have expected that now 'used_capacity' to represent or equal 0938376656.
Instead i get an error about the object is not able to be subscripted...?
Error below:
File "/Users/graham/PycharmProjects/VmExtrat/VmExtract.py", line 160, in openRead
used_capacity = result1['ldev_cap']
TypeError: 'NoneType' object is not subscriptable
In the mysql function the result acts like a dictionary if I manipulate it and try and return multiple values with the tuple concept - using..
cursor.execute(sql_query, {'serial': subSerialNum, 'lun': ldev, 'todayD': today_date})
result = cursor.fetchone()
while result:
ldev_cap = result['ldev_cap']
ldev_usdcap = result['ldev_usdcap']
return ldev_cap, ldev_usdcap
Here, result acts like a dictionary and i'm able to assign a parameter to the key like:
ldev_cap = result['ldev_cap']
and if you print ldev_cap you get the correct figure...
If I return one figure, the main function line of:
result1 = connector_mysql(subSerialNum, ldev, today_date)
it Works....
HOWEVER...
When then trying to return multiple parameters from the mysql function by doing
return ldev_cap, ldev_usdcap
and in the main function:
capacity, usd_capacity = connector_mysql(subSerialNum, ldev, today_date)
I get errors again
File "/Users/graham/PycharmProjects/VmExtrat/VmExtract.py", line 156, in openRead
capacity, usd_capacity = connector_mysql(subSerialNum, ldev, today_date)
TypeError: 'NoneType' object is not iterable
I think i'm doing the right thing with the dictionary and the tuple but i'm obviously missing something or not doing it correctly, as i need to do this with 4-5 paramaters per sql query,
I didn't want to do multiple querys for the same thing to get the individual paramaters out.
Any help or suggestions would be greatly welcomed...
full code below.
main code:
capacity, usd_capacity = connector_mysql(subSerialNum, ldev, today_date)
print(capacity)
print(usd_capacity)
def connector_mysql(subSerialNum, ldev, today_date):
import pymysql.cursors
db_server = 'localhost'
db_name = 'CBDB'
db_pass = 'secure_password'
db_user = 'user1'
sql_query = (
"SELECT ldev_cap, ldev_usdcap FROM Ldevs WHERE sub_serial=%(serial)s "
"and ldev_id=%(lun)s and data_date=%(todayD)s")
connection = pymysql.connect(host=db_server,
user=db_user,
password=db_pass,
db=db_name,
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
cursor.execute(sql_query, {'serial': subSerialNum, 'lun': ldev, 'todayD': today_date})
result = cursor.fetchone()
#return result #used for returning dict
while result:
ldev_cap = result['ldev_cap']
ldev_usdcap = result['ldev_usdcap']
print(result)
return ldev_cap, ldev_usdcap
finally:
connection.close()
Related
I have created a database and I am trying to fetch data from it. I have a class Query and inside the class I have a function that calls a table called forecasts. The function is as follows:
def forecast(self, provider: str, zone: str='Mainland',):
self.date_start = date_start)
self.date_end = (date_end)
self.df_forecasts = pd.DataFrame()
fquery = """
SELECT dp.name AS provider_name, lf.datetime_from AS date, fr.name AS run_name, lf.value AS value
FROM load_forecasts lf
INNER JOIN bidding_zones bz ON lf.zone_id = bz.zone_id
INNER JOIN data_providers dp ON lf.provider_id = dp.provider_id
INNER JOIN forecast_runs fr ON lf.run_id = fr.run_id
WHERE bz.name = '{zone}'
AND dp.name = '{provider}'
AND date(lf.datetime_from) BETWEEN '{self.date_start}' AND '{self.date_end}'
"""
df_forecasts = pd.read_sql_query(fquery, self.connection)
return df_forecasts
In the scripts that I run I am calling the Query class giving it my inputs
query = Query(date_start, date_end)
And the function
forecast_df = query.forecast(provider='Meteologica')
I run my script in the command line in the classic way
python myscript.py '2022-11-10' '2022-11-18'
My script shows the error
sqlalchemy.exc.DataError: (psycopg2.errors.InvalidDatetimeFormat) invalid input syntax for type date: "{self.date_start}"
LINE 9: AND date(lf.datetime_from) BETWEEN '{self.date_start...
when I use this syntax, but when I manually input the string for date_start and date_end it works.
I cannot find a way to solve the problem with sqlalchemy, so I opened a cursor with psycopg2.
# Returns the datetime, value and provider name and issue date of the forecasts in the load_forecasts table
# The dates range is specified by the user when the class is called
def forecast(self, provider: str, zone: str='Mainland',):
# Opens a cursor to get the data
cursor = self.connection.cursor()
# Query to run
query = """
SELECT dp.name, lf.datetime_from, fr.name, lf.value, lf.issue_date
FROM load_forecasts lf
INNER JOIN bidding_zones bz ON lf.zone_id = bz.zone_id
INNER JOIN data_providers dp ON lf.provider_id = dp.provider_id
INNER JOIN forecast_runs fr ON lf.run_id = fr.run_id
WHERE bz.name = %s
AND dp.name = %s
AND date(lf.datetime_from) BETWEEN %s AND %s
"""
# Execute the query, bring the data and close the cursor
cursor.execute(query, (zone, provider, self.date_start, self.date_end))
self.df_forecasts = cursor.fetchall()
cursor.close()
return self.df_forecasts
If anyone finds the answer with sqlalchemy, I would love to see it!
I have this code segment in Python2:
def super_cool_method():
con = psycopg2.connect(**connection_stuff)
cur = con.cursor(cursor_factory=DictCursor)
cur.execute("Super duper SQL query")
rows = cur.fetchall()
for row in rows:
# do some data manipulation on row
return rows
that I'd like to write some unittests for. I'm wondering how to use mock.patch in order to patch out the cursor and connection variables so that they return a fake set of data? I've tried the following segment of code for my unittests but to no avail:
#mock.patch("psycopg2.connect")
#mock.patch("psycopg2.extensions.cursor.fetchall")
def test_super_awesome_stuff(self, a, b):
testing = super_cool_method()
But I seem to get the following error:
TypeError: can't set attributes of built-in/extension type 'psycopg2.extensions.cursor'
You have a series of chained calls, each returning a new object. If you mock just the psycopg2.connect() call, you can follow that chain of calls (each producing mock objects) via .return_value attributes, which reference the returned mock for such calls:
#mock.patch("psycopg2.connect")
def test_super_awesome_stuff(self, mock_connect):
expected = [['fake', 'row', 1], ['fake', 'row', 2]]
mock_con = mock_connect.return_value # result of psycopg2.connect(**connection_stuff)
mock_cur = mock_con.cursor.return_value # result of con.cursor(cursor_factory=DictCursor)
mock_cur.fetchall.return_value = expected # return this when calling cur.fetchall()
result = super_cool_method()
self.assertEqual(result, expected)
Because you hold onto references for the mock connect function, as well as the mock connection and cursor objects you can then also assert if they were called correctly:
mock_connect.assert_called_with(**connection_stuff)
mock_con.cursor.asset_called_with(cursor_factory=DictCursor)
mock_cur.execute.assert_called_with("Super duper SQL query")
If you don't need to test these, you could just chain up the return_value references to go straight to the result of cursor() call on the connection object:
#mock.patch("psycopg2.connect")
def test_super_awesome_stuff(self, mock_connect):
expected = [['fake', 'row', 1], ['fake', 'row' 2]]
mock_connect.return_value.cursor.return_value.fetchall.return_value = expected
result = super_cool_method()
self.assertEqual(result, expected)
Note that if you are using the connection as a context manager to automatically commit the transaction and you use as to bind the object returned by __enter__() to a new name (so with psycopg2.connect(...) as conn: # ...) then you'll need to inject an additional __enter__.return_value in the call chain:
mock_con_cm = mock_connect.return_value # result of psycopg2.connect(**connection_stuff)
mock_con = mock_con_cm.__enter__.return_value # object assigned to con in with ... as con
mock_cur = mock_con.cursor.return_value # result of con.cursor(cursor_factory=DictCursor)
mock_cur.fetchall.return_value = expected # return this when calling cur.fetchall()
The same applies to the result of with conn.cursor() as cursor:, the conn.cursor.return_value.__enter__.return_value object is assigned to the as target.
Since the cursor is the return value of con.cursor, you only need to mock the connection, then configure it properly. For example,
query_result = [("field1a", "field2a"), ("field1b", "field2b")]
with mock.patch('psycopg2.connect') as mock_connect:
mock_connect.cursor.return_value.fetchall.return_value = query_result
super_cool_method()
The following answer is the variation of above answers.
I was using django.db.connections cursor object.
So following code worked for me
#patch('django.db.connections')
def test_supercool_method(self, mock_connections):
query_result = [("field1a", "field2a"), ("field1b", "field2b")]
mock_connections.__getitem__.return_value.cursor.return_value.__enter__.return_value.fetchall.return_value = query_result
result = supercool_method()
self.assertIsInstance(result, list)
#patch("psycopg2.connect")
async def test_update_task_after_launch(fake_connection):
"""
"""
fake_update_count =4
fake_connection.return_value = Mock(cursor=lambda : Mock(execute=lambda x,y :"",
fetch_all=lambda:['some','fake','rows'],rowcount=fake_update_count,close=lambda:""))
I've been trying to test the below metheod specially the if block and have tried multiple things like patching, mocking in various combinations of pyodbc but I've not been able to mock the if condition.
def execute_read(self, query):
dbconn = pyodbc.connect(self.connection_string, convert_unicode=True)
with dbconn.cursor() as cur:
cursor = cur.execute(query)
if not cursor.messages:
res = cursor.fetchall()
else:
raise Exception(cursor.messages[0][1])
return res;
# unit test method
#patch.object(pyodbc, 'connect')
def test_execute_read(self, pyodbc_mock):
pyodbc_mock.return_value = MagicMock()
self.assertIsNotNone(execute_read('query'))
I've read the docs of unittest.mock, but I haven't found a way to get this above if condition covered. Thank you.
You would want to patch the Connection class (given the Cursor object is immutable) and supply a return value for covering the if block. Something that may look like:
with patch.object("pyodbc.Connection") as conn:
conn.cursor().messages = []
...
Tried this with sqlite3 and that worked for me.
Here's an example of using the patch object, something I wrote for frappe/frappe:
def test_db_update(self):
with patch.object(Database, "sql") as sql_called:
frappe.db.set_value(
self.todo1.doctype,
self.todo1.name,
"description",
f"{self.todo1.description}-edit by `test_for_update`",
)
first_query = sql_called.call_args_list[0].args[0]
second_query = sql_called.call_args_list[1].args[0]
self.assertTrue(sql_called.call_count == 2)
self.assertTrue("FOR UPDATE" in first_query)
I am trying to get the ROW_COUNT() from a MySQL stored procedure into python.
here is what I got, but I don't know what I am missing.
DELIMITER //
CREATE OR REPLACE PROCEDURE sp_refresh_mytable(
OUT row_count INT
)
BEGIN
DECLARE exit handler for SQLEXCEPTION
BEGIN
ROLLBACK;
END;
DECLARE exit handler for SQLWARNING
BEGIN
ROLLBACK;
END;
DECLARE exit handler FOR NOT FOUND
BEGIN
ROLLBACK;
END;
START TRANSACTION;
DELETE FROM mytable;
INSERT INTO mytable
(
col1
, col2
)
SELECT
col1
, col2
FROM othertable
;
SET row_count = ROW_COUNT();
COMMIT;
END //
DELIMITER ;
If I call this in via normal SQL like follows I get the correct row_count of the insert operation (e.g. 26 rows inserted):
CALL sp_refresh_mytable(#rowcount);
select #rowcount as t;
-- output: 26
Then in python/mysqlalchemy:
def call_procedure(engine, function_name, params=None):
connection = engine.raw_connection()
try:
cursor = connection.cursor()
result = cursor.callproc('sp_refresh_mytable', [0])
## try result outputs
resultfetch = cursor.fetchone()
logger.info(result)
logger.info(result[0])
logger.info(resultfetch)
cursor.close()
connection.commit()
connection.close()
logger.info(f"Running procedure {function_name} success!")
return result
except Exception as e:
logger.error(f"Running procedure {function_name} failed!")
logger.exception(e)
return None
finally:
connection.close()
So I tried logging different variations of getting the out value, but it is always 0 or None.
[INFO] db_update [0]
[INFO] db_update 0
[INFO] db_update None
What am I missing?
Thanks!
With the help of this answer I found the following solution that worked for me.
a) Working solution using engine.raw_connection() and cursor.callproc:
def call_procedure(engine, function_name):
connection = engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(function_name, [0])
cursor.execute(f"""SELECT #_{function_name}_0""")
results = cursor.fetchone() ## returns a tuple e.g. (285,)
rows_affected = results[0]
cursor.close()
connection.commit()
logger.info(f"Running procedure {function_name} success!")
return rows_affected
except Exception as e:
logger.error(f"Running procedure {function_name} failed!")
logger.exception(e)
return None
finally:
connection.close()
And with this answer I found this solution also:
b) Instead of using a raw connection, this worked as well:
def call_procedure(engine, function_name, params=None):
try:
with engine.begin() as db_conn:
db_conn.execute(f"""CALL {function_name}(#out)""")
results = db_conn.execute('SELECT #out').fetchone() ## returns a tuple e.g. (285,)
rows_affected = results[0]
logger.debug(f"Running procedure {function_name} success!")
return rows_affected
except Exception as e:
logger.error(f"Running procedure {function_name} failed!")
logger.exception(e)
return None
finally:
if db_conn: db_conn.close()
If there are any advantages or drawbacks of using one of these methods over the other, please let me know in a comment.
I just wanted to add another piece of code, since I was trying to get callproc to work (using sqlalchemy) with multiple in- and out-params.
For this case I went with the callproc method using a raw connection [solution b) in my previous answer], since this functions accepts params as a list.
It could probably be done more elegantly or more pythonic in some parts, but it was mainly for getting it to work and I will probably create a function from this so I can use it for generically calling a SP with multiple in and out params.
I included comments in the code below to make it easier to understand what is going on.
In my case I decided to put the out-params in a dict so I can pass it along to the calling app in case I need to react to the results. Of course you could also include the in-params which could make sense for error logging maybe.
## some in params
function_name = 'sp_upsert'
in_param1 = 'my param 1'
in_param2 = 'abcdefg'
in_param3 = 'some-name'
in_param4 = 'some display name'
in_params = [in_param1, in_param1, in_param1, in_param1]
## out params
out_params = [
'out1_row_count'
,'out2_row_count'
,'out3_row_count'
,'out4_row_count_ins'
,'out5_row_count_upd'
]
params = copy(in_params)
## adding the outparams as integers from out_params indices
params.extend([i for i, x in enumerate(out_params)])
## the params list will look like
## ['my param 1', 'abcdefg', 'some-name', 'some display name', 0, 1, 2, 3, 4]
logger.info(params)
## build query to get results from callproc (including in and out params)
res_qry_params = []
for i in range(len(params)):
res_qry_params.append(f"#_{function_name}_{i}")
res_qry = f"SELECT {', '.join(res_qry_params)}"
## the query to fetch the results (in and out params) will look like
## SELECT #_sp_upsert_0, #_sp_upsert_1, #_sp_upsert_2, #_sp_upsert_3, #_sp_upsert_4, #_sp_upsert_5, #_sp_upsert_6, #_sp_upsert_7, #_sp_upsert_8
logger.info(res_qry)
try:
connection = engine.raw_connection()
## calling the sp
cursor = connection.cursor()
cursor.callproc(function_name, params)
## get the results (includes in and out params), the 0/1 in the end are the row_counts from the sp
## fetchone is enough since all results come as on result record like
## ('my param 1', 'abcdefg', 'some-name', 'some display name', 1, 0, 1, 1, 0)
cursor.execute(res_qry)
results = cursor.fetchone()
logger.info(results)
## adding just the out params to a dict
res_dict = {}
for i, element in enumerate(out_params):
res_dict.update({
element: results[i + len(in_params)]
})
## the result dict in this case only contains the out param results and will look like
## { 'out1_row_count': 1,
## 'out2_row_count': 0,
## 'out3_row_count': 1,
## 'out4_row_count_ins': 1,
## 'out5_row_count_upd': 0}
logger.info(pformat(res_dict, indent=2, sort_dicts=False))
cursor.close()
connection.commit()
logger.debug(f"Running procedure {function_name} success!")
except Exception as e:
logger.error(f"Running procedure {function_name} failed!")
logger.exception(e)
Just to complete the picture, here is a shortened version of my stored procedure. After BEGIN I declare some error handlers I set the out params to default 0, otherwise they could also return as NULL/None if not set by the procedure (e.g. because no insert was made):
DELIMITER //
CREATE OR REPLACE PROCEDURE sp_upsert(
IN in_param1 VARCHAR(32),
IN in_param2 VARCHAR(250),
IN in_param3 VARCHAR(250),
IN in_param4 VARCHAR(250),
OUT out1_row_count INTEGER,
OUT out2_row_count INTEGER,
OUT out3_row_count INTEGER,
OUT out4_row_count_ins INTEGER,
OUT out5_row_count_upd INTEGER
)
BEGIN
-- declare variables, do NOT declare the out params here!
DECLARE dummy INTEGER DEFAULT 0;
-- declare error handlers (e.g. continue handler for not found)
DECLARE CONTINUE HANDLER FOR NOT FOUND SET dummy = 1;
-- set out params defaulting to 0
SET out1_row_count = 0;
SET out2_row_count = 0;
SET out3_row_count = 0;
SET out4_row_count_ins = 0;
SET out5_row_count_upd = 0;
-- do inserts and updates and set the outparam variables accordingly
INSERT INTO some_table ...;
SET out1_row_count = ROW_COUNT();
-- commit if no errors
COMMIT;
END //
DELIMITER ;
I am trying to use a tkinter option menu to select the variable to search for in a table. A problem arises however as the passed variable does not yield any result.
Here is the table:
And here is proof that the SQL syntax is not incorrect.
The problem is due to the string variable being incorrect and returning:
[]
no data.
When I select a variable from the OptionMenu, instead of getting:
jhgfds
I get:
('jhgfds',)
So understandably I get no result.
I have tried using these methods on the :
Creating a non-tkinter variable (`StrEditEvent)
The re method
The [2:-3] method
However these have not worked
import tkinter as tk
import mysql.connector
root=tk.Tk()
EventList=[]
def OptionChanged(*args):
EventSQL=("SELECT * FROM events WHERE eventname=%s")
print(EditEvent.get())
StrEditEvent=EditEvent.get()
print(StrEditEvent)
mycursor.execute(EventSQL,(StrEditEvent,))
myresults=mycursor.fetchall()
print(myresults)
# Adding Tracking Variable EditEvent
EditEvent = tk.StringVar()
EditEvent.trace("w", OptionChanged)
#Connecting To My Database
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="Cranmore1",
database="scoutsdatabase"
)
print(mydb)
mycursor = mydb.cursor()
mycursor.execute("SELECT eventname FROM events")
myresults=mycursor.fetchall()
for i in myresults:
EventList.append(i)
EventToEditOptionMenu = tk.OptionMenu(root,EditEvent,*EventList)
EventToEditOptionMenu.grid(row=1,column=1)
root.mainloop()
Any help would be greatly appreciated.
print(EventList)
[('jhgfds',), ('uytrds',), ('sadfghjk',), ('jhytre',), ('j',), ('h',), ('q',), ('BBC',), ('BBC',), ('qwed',)]
To get the result as jhgfds you need to iterate over it because it returns the result for the query as a tuple [('jhgfds',), ('uytrds',), ('sadfghjk',), ('jhytre',), ('j',), ('h',), ('q',), ('BBC',), ('BBC',), ('qwed',)]
You can use index to get the specific result you want result[0] or result[2]
def OptionChanged(*args):
EventSQL=("SELECT * FROM events WHERE eventname=%s")
print(EditEvent.get())
StrEditEvent=EditEvent.get()
print(StrEditEvent)
mycursor.execute(EventSQL,(StrEditEvent,))
myresults=mycursor.fetchall()
for result in myresults: # iterate over it
print(result)
print(result[2])
print(result[5])
I've finally worked out the answer! To obtain the string from the tuple, one must use the map() command.
def OptionChanged(*args):
EventSQL=("SELECT * FROM events WHERE eventname=%s")
StrEditEvent=EditEvent.get()
start,mid,end=map(str,StrEditEvent.split("'"))
print(mid)
mycursor.execute(EventSQL,(mid,))
myresults=mycursor.fetchall()
print(myresults)
Thus ('jhgfds',) is converted into jhgfds so the SQL query finds the data from the database.