#pip install sqlalchemy
#from Google import Create_Service # link to source code is in the description
import pyodbc as odbc # pip install pypyodbc
import pandas as pd
import sqlalchemy
import sqlite3
"""
Step 1.1 Connect to MS SQL Server Database System``
"""
server = '192.168.102.4'
database = ''
username = ''
password = ''
driver = 'ODBC Driver 17 for SQL Server'
def connection_string(driver , server, database):
conn_string = f"""
DRIVER={{{driver}}};
SERVER={server};
DATABASE={database};
uid={username};
pwd={password};
Trust_Connection=yes;
"""
return conn_string
try:
conn = odbc.connect(connection_string(driver, server, database))
print('Connection Created')
except odbc.DatabaseError as e:
print('Database Error:')
except odbc.Error as e:
print('Connection Error:')
else:
sql_query = ("SELECT TOP 1 "
"NOME = B.A1_NOME, CPF = B.A1_CGC, "
"'E-MAIL' = CASE WHEN LEN(ISNULL(CC.CLIEMAIL, '')) > 5 THEN CC.CLIEMAIL "
"WHEN LEN(ISNULL(DD.CLIEXTEMAIL2, '')) > 5 THEN DD.CLIEXTEMAIL2 "
"ELSE B.A1_EMAIL COLLATE Latin1_General_BIN END, "
"DDD = CASE WHEN LEN(ISNULL(CC.CLIDDDCELULAR, '')) > 0 THEN CC.CLIDDDCELULAR "
"WHEN LEN(ISNULL(DD.CLIEXTDDDCELULAR2, '')) > 0 THEN DD.CLIEXTDDDCELULAR2 "
"ELSE B.A1_DDD COLLATE Latin1_General_BIN END, "
"CELULAR = CASE WHEN LEN(ISNULL(CC.CLICELULAR, '')) > 5 THEN CC.CLICELULAR "
"WHEN LEN(ISNULL(DD.CLIEXTCELULAR2, '')) > 5 THEN DD.CLIEXTCELULAR2 "
"ELSE B.A1_TEL COLLATE Latin1_General_BIN END, "
"DATACADASTRO = CONVERT(VARCHAR,DATEADD(DAY,((ASCII(SUBSTRING(A1_USERLGI,12,1))-50)*100+(ASCII(SUBSTRING(A1_USERLGI,16,1))-50)),'19960101'),112), "
"ANIVERSARIO = CASE WHEN LEN(B.A1_DTNASC) > 5 THEN B.A1_DTNASC "
"ELSE CONVERT(VARCHAR(10), CC.CLIDTANASCIMENTO, 112) END, "
"ENDERECO = B.A1_END, "
"DOCUMENTO = A.L1_DOC, "
"CODIGOPRODUTO = E.L2_PRODUTO, "
"QUANTIDADE = E.L2_QUANT, "
"VALORUNITARIO = E.L2_VRUNIT, "
"VALORPEDIDO = E.L2_VLRITEM, "
"DATAPEDIDO = A.L1_DTLIM, "
"LOJA = A.L1_FILIAL, "
"CODVENDEDOR = A.L1_VEND, VENDEDOR = D.A3_NOME, "
"PDV = A.L1_PDV "
"FROM "
"[192.168.102.6].DBTOTVS12.dbo.SL1010 A, [192.168.102.6].DBTOTVS12.dbo.SA3010 D, "
"[192.168.102.6].DBTOTVS12.dbo.SL2010 E, "
"[192.168.102.6].DBTOTVS12.dbo.SA1010 B LEFT OUTER JOIN CLIENTES CC ON LEN(LTRIM(RTRIM(B.A1_CGC))) > 1 AND CONVERT(DECIMAL(14, 0), LTRIM(RTRIM(B.A1_CGC))) = CC.CLICPFCNPJ LEFT OUTER JOIN CLIENTESEXTENSAO DD ON CC.CLICODIGO = DD.CLICODIGO "
"WHERE "
"A.L1_CLIENTE = B.A1_COD "
"AND A.L1_CLIENTE <> '000000001' "
"AND A.L1_DTLIM >= '20210101' "
"AND A.L1_SITUA = 'OK' "
"AND A.L1_FILIAL = E.L2_FILIAL "
"AND A.L1_NUM = E.L2_NUM "
"AND A.L1_PDV = E.L2_PDV "
"AND A.L1_DOC = E.L2_DOC "
"AND E.L2_VEND = D.A3_COD "
"AND E.L2_FILIAL = D.A3_FILIAL "
"AND A.D_E_L_E_T_ = '' "
"AND B.D_E_L_E_T_ = '' "
"AND D.D_E_L_E_T_ = '' "
"AND E.D_E_L_E_T_ = '' "
"ORDER BY L1_DTLIM " )
cursor = conn.cursor()
# cursor.execute(sql_query)
cursor.execute(sql_query)
"""
Step 1.2 Retrieve Dataset from SQL Server
"""
recordset = cursor.fetchall()
#print(recordset)
columns = [col[0] for col in cursor.description]
#df = pd.read_sql(sql_query,conn )
#print(df.head(1000))
#df = pd.DataFrame([data],columns=['Nome','CPF','Email','DDD','Celular','Data de Cadastro','Aniversário','Endereço','Nº do pedido','Código de produto','Quantidade de produtos','Valor Unitário','Valor do pedido','Data do pedido','Loja do pedido','cod vendedor','Vendedor responsável','PDV'])
df = pd.DataFrame(recordset,columns=columns)
#df = df.transpose()
# if 'published_date' in df.columns:
# df['published_date'] = df['published_date'].dt.strftime('%Y-%m-%d %H:%M:%S')
# recordset = df.values.tolist()
"""
Step 2. Export Dataset to Google Spreadsheets
"""
gs_sheet_id = '1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk'
tab_id = 0
CLIENT_SECRET_FILE = 'yuryrogens.json'
API_NAME = 'sheets'
API_VERSION = 'v4'
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
service = (CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)
# create spreadsheets reference object
mySpreadsheets = service.spreadsheets().get(
spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk'
).execute()
recordset
tab_name = [sheet['properties']['title'] for sheet in mySpreadsheets['sheets'] if sheet['properties']['sheetId'] == tab_id][0]
"""
Insert dataset
"""
def construct_request_body(value_array, dimension: str='ROWS') -> dict:
try:
request_body = {
'majorDimension': dimension,
'values': value_array
}
return request_body
except Exception as e:
print(e)
return {}
"""
Insert column names
"""
request_body_columns = construct_request_body([columns])
service.spreadsheets().values().update(
spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk',
valueInputOption='USER_ENTERED',
range='!A1',
body=request_body_columns
).execute()
"""
Insert rows
"""
request_body_values = construct_request_body(recordset)
service.spreadsheets().values().update(
spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk',
valueInputOption='USER_ENTERED',
range='!A2',
body=request_body_values
).execute()
print('Task is complete')
cursor.close()
conn.close()
when getting a query to try to throw to googlhesheets I'm having this problem using
[df = pd.DataFrame([recordset],columns=columns)
gives the error
ValueError: 18 columns past, past data had 1 columns
and if I use
df = pd.DataFrame(recordset,columns=columns)
as well as the following error:
ValueError: Shape of pass values is (1, 1), indices implicit (1, 18)
Could you try something like this ?
records=cursor.fetchall()
record_list=[]
for record in records:
record_list.append(str(record[0]))
From my experience with pyodbc, this has given me something i can work with.
We can then
pd.DataFrame(record_list)
and manipulate the resulting data as needed
Related
import mysql.connector
from datetime import datetime
from datetime import timedelta
try:
def init_change_detector():
print("Change Detector started at " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
conn = mysql.connector.connect(user='user_app',
password='xxxxxxxxxxxx',
host='test.unit.com',
database='devdb')
cursor = conn.cursor()
query = ('SELECT table_name, column_name, key_name '
'FROM csj_change_detect_table_column '
'ORDER BY table_name, column_name')
cursor.execute(query)
# get all records
records = cursor.fetchall()
for record in records:
process_col_tab_chg(record[0], record[1], record[2])
if conn.is_connected():
conn.close()
cursor.close()
def insert_change_log(table_name, key_name, attr_name, old_attr_value, new_attr_value):
insert_query = """INSERT INTO csj_shipment_changelog(table_name, key_name,
attr_name, old_attr_value,
new_attr_value)
VALUES (%s, %s, %s, %s, %s)"""
conn2 = mysql.connector.connect(user='new_user',
password='xxxxxxxxxxxx',
host='test.unit.com',
database='devdb')
cursor2 = conn2.cursor()
tuple1 = (table_name, key_name, attr_name, old_attr_value, new_attr_value)
cursor2.execute(insert_query, tuple1)
conn2.commit()
# if row is None:
# row = 1;
# else:
# row= row+1;
# if row == 100:
# quit()
cursor2.close()
conn2.close()
# Look for Shipment, in past date
def find_past_shipment(table_name,
key_name,
column_name,
before_date,
curr_key
):
saved_col_name=column_name
saved_key_name=key_name
conn4 = mysql.connector.connect(user='new_user',
password='xxxxxxxxxxxx',
host='test.unit.com',
database='devdb')
cursor4 = conn4.cursor()
query4 = 'SELECT ' + saved_key_name + ' , ' + saved_col_name + ' FROM ' + table_name \
+ ' where rec_cre_dt_utc < ' + "'" + before_date.strftime('%Y-%m-%d 00:00:00') + "'" \
+ ' and shipment_num = ' + "'" + curr_key + "'" + ' order by rec_cre_dt_utc desc LIMIT 1'
cursor4.execute(query4)
records = cursor4.fetchone()
if records is not None:
past_attr_val = records[1]
return past_attr_val
else:
return 0
def process_col_tab_chg(table_name, column_name, key_name):
saved_key_name = key_name
saved_col_name = column_name
old_val = 0
ini_time_for_now = datetime.now()
date_before_1day = ini_time_for_now - timedelta(days=1)
query = 'SELECT ' + key_name + ' , ' + saved_col_name + ' , ' + ' rec_cre_dt_utc FROM ' + table_name \
+ ' where rec_cre_dt_utc >= ' + "'" + date_before_1day.strftime('%Y-%m-%d 00:00:00') + "'"
conn3 = mysql.connector.connect(user='new_user',
password='xxxxxxxxxxxx',
host='test.unit.com',
database='devdb')
cursor3 = conn3.cursor()
cursor3.execute(query)
for (key_name, column_name, rec_cre_dt_utc) in cursor3:
curr_attr_val = column_name
curr_key_val = key_name
old_val = find_past_shipment(table_name,
saved_key_name,
saved_col_name,
rec_cre_dt_utc,
curr_key_val
)
if curr_attr_val != old_val \
and old_val != 0:
insert_change_log(table_name, key_name, saved_col_name, old_val, curr_attr_val )
else:
continue
cursor3.close
conn3.close()
def cleanup():
print("Change Detector stopped " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
def main():
start = datetime.now()
init_change_detector()
end = datetime.now()
time_diff = (end - start)
execution_time = time_diff.total_seconds()
print("Elapsed time(secs): " + str(execution_time))
if __name__ == "__main__":
main()
except Exception as e:
print("Exception " + e)
finally:
cleanup()
Switch to executemany(). I recommend limiting to 1000 rows per batch.
I working on extracting table count information from Azure SQL Server for over 350+ tables. As the system metadata tables are not regularly refreshed so I can't rely upon that. I written the below code to help me achieve the same -
import pyodbc
from pyspark.sql.types import *
pyodbc.pooling = False
def get_table_count(query ,server, username, password, database):
conn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
cursor = conn.cursor()
cursor.execute(query)
row = cursor.fetchone()
columns = StructType([StructField('tableCount', LongType(), True) , StructField('tableName', StringType(), True), StructField('databaseName', StringType(), True)])
data = [(row[0], row[1], row[2])]
df = spark.createDataFrame( data = data,schema = columns)
cursor.close()
del cursor
conn.close()
return df
import pyspark.sql.functions as F
dbList = [ SQLServerDB1 , SQLServerDB1 ]
SQLServerDB1_query = ""
SQLServerDB2_query = ""
for db in dbList:
print("Currently loading for "+db+" database")
serverName = db + "SQLServerName"
serverUser = db + "SQLServerUser"
serverPassword = db + "SQLServerPassword"
serverDB = db + "SQLServerDB"
tables=df.select('target_object').filter(F.col('source') == db).distinct().toPandas()['target_object']
for tablename in list(tables):
if tablename != list(tables)[-1]:
vars()["%s_query"%db] = f" Select count_big(*) as tableCount, '{tablename}' as tableName, '{db}' as databaseName from " + f"{tablename} \n" + " union \n" + vars()["%s_query"%db]
else:
vars()["%s_query"%db] = vars()["%s_query"%db] + f" Select count_big(*) as tableCount, '{tablename}' as tableName, '{db}' as databaseName from " + f"{tablename}"
vars()["%s_DF"%db] = get_table_count( vars()["%s_query"%db] , eval(serverName), eval(serverUser), eval(serverPassword), eval(serverDB) )
# exec(f'{db}_DF = get_table_count( vars()["%s_query"%db] , eval(serverName), eval(serverUser), eval(serverPassword), eval(serverDB) )')
# print(tablename + " Loaded")
Getting Below error -
('42000', "[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Parse error at line: 3, column: 1: Incorrect syntax near 'union'. (103010) (SQLExecDirectW)")
I tried printing the SQL statements and it worked without any issue from SQL Server DB.
Please suggest where am I writing the code incorrectly.
Tried with the below code and it works. Thanks guys for the suggestions!
def get_table_count(query ,server, username, password, database):
jdbc_url = f"jdbc:sqlserver://{server}:1433;databaseName={database}"
df_read = spark.read \
.format("jdbc") \
.option("url",jdbc_url) \
.option("query", query) \
.option("user", username) \
.option("password", password) \
.option("driver", "com.microsoft.sqlserver.jdbc.SQLServerDriver") \
.load()
df_save = df_read.write.mode('overwrite').parquet('/tmp/' + f"{database}" + '.parquet')
df = spark.read.parquet('/tmp/' + f"{database}" + '.parquet')
return df
import pyspark.sql.functions as F
dbList = [ SQLServerDB1 , SQLServerDB1 ]
SQLServerDB1_query = ""
SQLServerDB2_query = ""
for db in dbList:
print("Currently loading for "+db+" database")
serverName = db + "SQLServerName"
serverUser = db + "SQLServerUser"
serverPassword = db + "SQLServerPassword"
serverDB = db + "SQLServerDB"
tables=df.select('target_object').filter(F.col('source') == db).distinct().toPandas()['target_object']
for tablename in list(tables):
if tablename != list(tables)[-1]:
vars()["%s_query"%db] = f" Select count_big(1) as tableCount, '{tablename}' as tableName, '{db}' as databaseName from " + f"{tablename} \n" + " union \n" + vars()["%s_query"%db]
else:
vars()["%s_query"%db] = vars()["%s_query"%db] + f" Select count_big(1) as tableCount, '{tablename}' as tableName, '{db}' as databaseName from " + f"{tablename}"
print(vars()["%s_query"%db])
vars()["%s_DF"%db] = get_table_count( vars()["%s_query"%db] , eval(serverName), eval(serverUser), eval(serverPassword), eval(serverDB) )
vars()["%s_DF"%db].createOrReplaceTempView(f"{db}_tablesCount")
print(f"{db}"+ " Loaded")
My goal is to change the text color by accessing color parameters stored in a database. I am able to get the colors to work when I define them manually. However, when I try to pull them from the database the colors aren't working. I feel like I'm not converting the tuples correctly as a usable unicode, but I can't seem to wrap my head around it.
Here is my code:
#!/usr/bin/python
##### Modules to Import ######
import database
import sqlite3
##### Connect To Databases #####
conn = sqlite3.connect('project.db')
c = conn.cursor()
def workingCode():
class bcolors:
status_read = '\033[97m'
status_good = '\033[32m'
status_warning = '\033[33m'
status_bad = '\033[31m'
status_reset = '\033[0m'
print "This is how " + bcolors.status_read + "I want the " + bcolors.status_good \
+ "text to be " + bcolors.status_warning + " printed " + bcolors.status_bad \
+ " on the screen."+ bcolors.status_reset
def nonWorkingCode():
c.execute ('SELECT * FROM text_colors')
text_colors = c.fetchone()
class bcolors:
status_read, status_good, status_warning, status_bad, task_start, task_success, \
lighting_text =text_colors [:7]
print "Instead " + bcolors.status_read + "I get " + bcolors.status_good + \
"a whole " + bcolors.status_warning + " bunch of this " + bcolors.status_bad + " garbage."
workingCode()
nonWorkingCode()
Here is the finished working code I just spent 30 minutes on
let me know if you have any questions. JP:
#!/usr/bin/python
import sqlite3
from sqlite3 import Error
sqlite_file = 'project.db'
def workingCode():
class bcolors:
status_read = '\033[97m'
status_good = '\033[32m'
status_warning = '\033[33m'
status_bad = '\033[31m'
status_reset = '\033[0m'
print("This is how " + bcolors.status_read + "I want the " + bcolors.status_good \
+ "text to be " + bcolors.status_warning + " printed " + bcolors.status_bad \
+ " on the screen."+ bcolors.status_reset)
def nonWorkingCode():
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
# Create text_colors table
create_table("text_colors")
# then populate text_colors table
populate_tbl("text_colors")
c.execute('SELECT STATUS, COLOR FROM text_colors')
status_color_mappings = c.fetchall()
status_color_dictionary = dict(status_color_mappings)
print(status_color_dictionary)
class dictColors:
status_read = str(status_color_dictionary['status_read'])
status_good = str(status_color_dictionary['status_good'])
status_warning = str(status_color_dictionary['status_warning'])
status_bad = str(status_color_dictionary['status_bad'])
status_reset = str(status_color_dictionary['status_reset'])
print("Instead " + dictColors.status_read + "I get " + dictColors.status_good + "a whole " +
dictColors.status_warning + " bunch of this " + dictColors.status_bad + " garbage." +
" on the screen."+ dictColors.status_reset)
def create_table(ptbl):
""" Assemble DDL (Data Definition Language) Table Create statement and build
sqlite3 db table
Args:
string: new db table name.
Returns:
Status string, '' or 'SUCCESS'.
"""
retval = ''
sqlCmd = ''
try:
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
if ptbl == 'text_colors':
sqlCmd = 'CREATE TABLE IF NOT EXISTS ' + ptbl + ' (STATUS TEXT, COLOR TEXT)'
else:
pass
if sqlCmd != '':
c.execute(sqlCmd)
conn.commit()
conn.close()
retval = 'SUCCESS'
except Error as e:
retval = 'FAIL'
print(e)
return retval
def populate_tbl(p_fml_tbl):
"""
:param p_fml_tbl:
:return:
"""
retval = ''
try:
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
c.execute('INSERT INTO ' + p_fml_tbl + ' (STATUS, COLOR) VALUES (?, ?)', ('status_read', '\033[97m'))
c.execute('INSERT INTO ' + p_fml_tbl + ' (STATUS, COLOR) VALUES (?, ?)', ('status_good', '\033[32m'))
c.execute('INSERT INTO ' + p_fml_tbl + ' (STATUS, COLOR) VALUES (?, ?)', ('status_warning', '\033[33m'))
c.execute('INSERT INTO ' + p_fml_tbl + ' (STATUS, COLOR) VALUES (?, ?)', ('status_bad', '\033[31m'))
c.execute('INSERT INTO ' + p_fml_tbl + ' (STATUS, COLOR) VALUES (?, ?)', ('status_reset', '\033[0m'))
conn.commit()
conn.close()
retval = 'SUCCESS'
except Error as e:
print(e)
return retval
if __name__ == '__main__':
workingCode()
nonWorkingCode()
This is the following code
pythonlist = ['Name','Mno']
datalist = ["qwerty",'234']
sql = "SELECT " + ",".join(pythonlist) + " FROM data WHERE name = '"+ "','".join(datalist) + "' INTO OUTFILE filename"
print(sql)
OUTPUT:
SELECT Name,Mno FROM data WHERE Name= 'qwerty','234'
DESIRED OUTPUT:
SELECT Name,Mno FROM data WHERE Name = 'qwerty' and Mno = 234
Do note the removal of quotations marks in 'mno'.
The reason I am doing this is due because the column names, as well as values corresponding it to, will change frequently
Code :
queryparams = {'Name': 'qwerty', 'Mno': '234'}
and_clause = []
[and_clause.append(' %s = %s ') for k,v in queryparams.items()]
and_clause_str = ' and '.join(and_clause)
sql = 'SELECT %s FROM data WHERE ' + and_clause_str
params = [','.join(queryparams.keys())]
for k,v in queryparams.items():
params.append(str(k))
params.append(str(v))
print(sql)
print(params)
cursor.execute(sql, params=tuple(params))
This works if you add 10/20 more items to dictionary .
Aswell as prevents SQL-injection : Using params to pass values instead of string-concatenation .
Try this:
data = {'Name': 'qwerty' , 'Mno' : '234'}
sql = "SELECT " + ", ".join(data.keys()) + " FROM data WHERE " + str(list(data.keys())[0]) + " = '" + \
str(data[list(data.keys())[0]]) + "' and " +\
str(list(data.keys())[1]) + " = " + str(data[list(data.keys())[1]])
print(sql)
I have to connect the sql database to python so that I can add new user data via python.
I have tried the int conversion which puts me in further trouble of null types dataset.
i have tried the bracket placement. It doesn't work.
import os
import datetime
import pyodbc
import sqlite3
file_open = open("filenames.txt","r")
path = 'C:\\Users\\Timble\\Desktop\\Face_recognition\\user-id_filenames\\'
flag_loc = 1
flag_proc = 0
flag_vis = 0
file_read_lines = file_open.readlines()
for line in file_read_lines:
for character in line:
if character == "_":
details = line.split("_")
now = datetime.datetime.now()
name = line
print("name:", name) #col-3
print("type of name:", type(name))
user_id = int(details[1])
print("user_id:", details[1]) #col-2
print("type of user_id:", type(user_id))
date = details[2]
print("date on which photo is taken:", details[2]) #col-4
print("type of data:",type(details[2]))
now = now.strftime("%Y-%m-%d %H:%M:%S")
print("Current date and time: ", now) #col-6
print("type of current date:", type(now))
path2 = path + details[1]
if os.path.exists(path2):
print(path2)
else:
os.makedirs(path2)
#break
date = str(date)
print("type of date", type(date))
user_id = str(user_id)
print("type of user_id", type(user_id))
name = str(name)
print("type of name",type(name))
now = str(now)
print("type of now", type(now))
flag_loc = str(flag_loc)
print("type loc flag", type(flag_loc))
flag_proc = str(flag_proc)
print("type proc flag", type(flag_proc))
flag_vis = str(flag_vis)
print("type vis flag", type(flag_vis))
conn = pyodbc.connect(
"DRIVER={SQl Server};"
"server=DESKTOP-3ORBD3I\MSSQL;"
"database=TimbleSecuritySystem;"
"uid=sa;"
"pwd=P#ssword")
cur = conn.cursor()
sqlInsertUser = "Insert Into retraining (date, user_id, image_name,location_flagged, processing_flagged, insert_date, visible)Values( "+ date + " , " + user_id + " , " + name + " , " + flag_loc + " , " + flag_proc + " , " + now + " , " + flag_vis + " )"
print(sqlInsertUser)
cur.execute(sqlInsertUser)
conn.commit()
break
file_open.close()
The actual results tell me that print(sqlInsertUser) prints all the right values.
I am expecting the execute command to work and sql data added there.
This line is the problem:
sqlInsertUser = "Insert Into retraining (date, user_id, image_name,location_flagged, processing_flagged, insert_date, visible)Values( "+ date + " , " + user_id + " , " + name + " , " + flag_loc + " , " + flag_proc + " , " + now + " , " + flag_vis + " )"
For example if name contains some invalid characters e.g. "[" or "]", then the execute call fails because the name string is not properly enclosed. (It should be enclosed in a pair of quote)
You can use the parameter substitution support in pyodbc e.g.
sqlInsertUser = "Insert Into retraining (date, user_id,
image_name, location_flagged, processing_flagged, insert_date,
visible) Values (?,?,?,?,?,?,?)"
then run
cur.execute(sqlInsertUser, date, user_id, name, flag_loc, flag_proc, now, flag_vis)
(My sample code above is untested. You might need to fix some syntax errors)
For more details about the syntax see https://www.python.org/dev/peps/pep-0249/#paramstyle or https://github.com/mkleehammer/pyodbc/wiki/Cursor