I want to copy some data from a SQLite DB to a MySQL DB ...
However, if I have for example 3 sqlite datasets the script creates 3 new tables in my target-db (mysql) but just inserts data in the 1st and 2nd db ... the 3rd stays empty
.. but I still get a "1" for each query ...
import sqlite3
import pymysql as mysql
db_connection_source = sqlite3.connect(db_path)
db_connection_target = mysql.connect("localhost", "username", "pw", "strf")
sql_target = db_connection_target.cursor()
data_tables = []
speed = []
hr = []
elev = []
gps = []
get_tables = db_connection_source.execute("SELECT name FROM sqlite_master WHERE type='table'")
for row_tables in get_tables:
if row_tables[0].find("chest_second") == False:
table_label = "sport_activity_"+row_tables[0][13:len(row_tables[0])]
get_data = sql_target.execute("SHOW TABLES LIKE '"+table_label+"'")
if get_data == 1:
print("[X] Old Data found: "+row_tables[0])
else:
print("[ ] New Data found: "+row_tables[0])
data_tables.append(row_tables[0])
for chest_sec in data_tables:
get_data = db_connection_source.execute("SELECT id, speed, hr, elevation, lat, lon from "+chest_sec)
table_label = "sport_activity_"+chest_sec[13:len(chest_sec)]
create_newTable = sql_target.execute("CREATE TABLE `"+table_label+"`(`id` INT NOT Null AUTO_INCREMENT,"
"`speed` FLOAT(16,10) NULL,"
"`bpm` FLOAT(16,10) NULL,"
"`elev` FLOAT(16,10) NULL,"
"`gps_lat` FLOAT(16,10) NULL,"
"`gps_lon` FLOAT(16,10) NULL,"
"`raw_filename` TEXT NULL,"
"PRIMARY KEY (`id`))")
print ("["+table_label+"] Copying data in database")
check_speed = "no"
check_bpm = "no"
check_elev = "no"
check_lat = "no"
check_lon = "no"
for row in get_data:
if float(row[1]) > 0:
check_speed = "yes"
if row[2] > 0:
check_bpm = "yes"
if row[3] > 0:
check_elev = "yes"
if row[4] > 0:
check_lat = "yes"
if row[5] > 0:
check_lon = "yes"
query = ("INSERT INTO "+table_label+" (speed, bpm, elev, gps_lat, gps_lon, raw_filename)"
"VALUES ('"+str(row[1])+"','"+str(row[2])+"','"+str(row[3])+"','"+str(row[4])+"','"+str(row[5])+"','"+str(chest_sec)+"')")
print(query)
sql_target.execute(query)
print ("["+table_label+"] Indexing new entry")
date_raw = chest_sec[13:len(chest_sec)]
date_new = date_raw[0:4]+"-"+date_raw[5:7]+"-"+date_raw[8:10]+" "+date_raw[11:13]+":"+date_raw[14:16]+":"+date_raw[17:19]
write_123 = sql_target.execute("INSERT INTO sport_index (datum, speed_data, hr_data, elev_data, strength_data, review, gps_data, second_id)"
"VALUES ('"+str(date_new)+"','"+str(check_speed)+"','"+str(check_bpm)+"','"+str(check_elev)+"','0','1','"+str(check_lat)+"','"+str(chest_sec)+"')")
print (write_123)
Try using db_connection_target.commit() at the end or while creating a connection use autocommit=True
Related
#pip install sqlalchemy
#from Google import Create_Service # link to source code is in the description
import pyodbc as odbc # pip install pypyodbc
import pandas as pd
import sqlalchemy
import sqlite3
"""
Step 1.1 Connect to MS SQL Server Database System``
"""
server = '192.168.102.4'
database = ''
username = ''
password = ''
driver = 'ODBC Driver 17 for SQL Server'
def connection_string(driver , server, database):
conn_string = f"""
DRIVER={{{driver}}};
SERVER={server};
DATABASE={database};
uid={username};
pwd={password};
Trust_Connection=yes;
"""
return conn_string
try:
conn = odbc.connect(connection_string(driver, server, database))
print('Connection Created')
except odbc.DatabaseError as e:
print('Database Error:')
except odbc.Error as e:
print('Connection Error:')
else:
sql_query = ("SELECT TOP 1 "
"NOME = B.A1_NOME, CPF = B.A1_CGC, "
"'E-MAIL' = CASE WHEN LEN(ISNULL(CC.CLIEMAIL, '')) > 5 THEN CC.CLIEMAIL "
"WHEN LEN(ISNULL(DD.CLIEXTEMAIL2, '')) > 5 THEN DD.CLIEXTEMAIL2 "
"ELSE B.A1_EMAIL COLLATE Latin1_General_BIN END, "
"DDD = CASE WHEN LEN(ISNULL(CC.CLIDDDCELULAR, '')) > 0 THEN CC.CLIDDDCELULAR "
"WHEN LEN(ISNULL(DD.CLIEXTDDDCELULAR2, '')) > 0 THEN DD.CLIEXTDDDCELULAR2 "
"ELSE B.A1_DDD COLLATE Latin1_General_BIN END, "
"CELULAR = CASE WHEN LEN(ISNULL(CC.CLICELULAR, '')) > 5 THEN CC.CLICELULAR "
"WHEN LEN(ISNULL(DD.CLIEXTCELULAR2, '')) > 5 THEN DD.CLIEXTCELULAR2 "
"ELSE B.A1_TEL COLLATE Latin1_General_BIN END, "
"DATACADASTRO = CONVERT(VARCHAR,DATEADD(DAY,((ASCII(SUBSTRING(A1_USERLGI,12,1))-50)*100+(ASCII(SUBSTRING(A1_USERLGI,16,1))-50)),'19960101'),112), "
"ANIVERSARIO = CASE WHEN LEN(B.A1_DTNASC) > 5 THEN B.A1_DTNASC "
"ELSE CONVERT(VARCHAR(10), CC.CLIDTANASCIMENTO, 112) END, "
"ENDERECO = B.A1_END, "
"DOCUMENTO = A.L1_DOC, "
"CODIGOPRODUTO = E.L2_PRODUTO, "
"QUANTIDADE = E.L2_QUANT, "
"VALORUNITARIO = E.L2_VRUNIT, "
"VALORPEDIDO = E.L2_VLRITEM, "
"DATAPEDIDO = A.L1_DTLIM, "
"LOJA = A.L1_FILIAL, "
"CODVENDEDOR = A.L1_VEND, VENDEDOR = D.A3_NOME, "
"PDV = A.L1_PDV "
"FROM "
"[192.168.102.6].DBTOTVS12.dbo.SL1010 A, [192.168.102.6].DBTOTVS12.dbo.SA3010 D, "
"[192.168.102.6].DBTOTVS12.dbo.SL2010 E, "
"[192.168.102.6].DBTOTVS12.dbo.SA1010 B LEFT OUTER JOIN CLIENTES CC ON LEN(LTRIM(RTRIM(B.A1_CGC))) > 1 AND CONVERT(DECIMAL(14, 0), LTRIM(RTRIM(B.A1_CGC))) = CC.CLICPFCNPJ LEFT OUTER JOIN CLIENTESEXTENSAO DD ON CC.CLICODIGO = DD.CLICODIGO "
"WHERE "
"A.L1_CLIENTE = B.A1_COD "
"AND A.L1_CLIENTE <> '000000001' "
"AND A.L1_DTLIM >= '20210101' "
"AND A.L1_SITUA = 'OK' "
"AND A.L1_FILIAL = E.L2_FILIAL "
"AND A.L1_NUM = E.L2_NUM "
"AND A.L1_PDV = E.L2_PDV "
"AND A.L1_DOC = E.L2_DOC "
"AND E.L2_VEND = D.A3_COD "
"AND E.L2_FILIAL = D.A3_FILIAL "
"AND A.D_E_L_E_T_ = '' "
"AND B.D_E_L_E_T_ = '' "
"AND D.D_E_L_E_T_ = '' "
"AND E.D_E_L_E_T_ = '' "
"ORDER BY L1_DTLIM " )
cursor = conn.cursor()
# cursor.execute(sql_query)
cursor.execute(sql_query)
"""
Step 1.2 Retrieve Dataset from SQL Server
"""
recordset = cursor.fetchall()
#print(recordset)
columns = [col[0] for col in cursor.description]
#df = pd.read_sql(sql_query,conn )
#print(df.head(1000))
#df = pd.DataFrame([data],columns=['Nome','CPF','Email','DDD','Celular','Data de Cadastro','Aniversário','Endereço','Nº do pedido','Código de produto','Quantidade de produtos','Valor Unitário','Valor do pedido','Data do pedido','Loja do pedido','cod vendedor','Vendedor responsável','PDV'])
df = pd.DataFrame(recordset,columns=columns)
#df = df.transpose()
# if 'published_date' in df.columns:
# df['published_date'] = df['published_date'].dt.strftime('%Y-%m-%d %H:%M:%S')
# recordset = df.values.tolist()
"""
Step 2. Export Dataset to Google Spreadsheets
"""
gs_sheet_id = '1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk'
tab_id = 0
CLIENT_SECRET_FILE = 'yuryrogens.json'
API_NAME = 'sheets'
API_VERSION = 'v4'
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
service = (CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)
# create spreadsheets reference object
mySpreadsheets = service.spreadsheets().get(
spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk'
).execute()
recordset
tab_name = [sheet['properties']['title'] for sheet in mySpreadsheets['sheets'] if sheet['properties']['sheetId'] == tab_id][0]
"""
Insert dataset
"""
def construct_request_body(value_array, dimension: str='ROWS') -> dict:
try:
request_body = {
'majorDimension': dimension,
'values': value_array
}
return request_body
except Exception as e:
print(e)
return {}
"""
Insert column names
"""
request_body_columns = construct_request_body([columns])
service.spreadsheets().values().update(
spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk',
valueInputOption='USER_ENTERED',
range='!A1',
body=request_body_columns
).execute()
"""
Insert rows
"""
request_body_values = construct_request_body(recordset)
service.spreadsheets().values().update(
spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk',
valueInputOption='USER_ENTERED',
range='!A2',
body=request_body_values
).execute()
print('Task is complete')
cursor.close()
conn.close()
when getting a query to try to throw to googlhesheets I'm having this problem using
[df = pd.DataFrame([recordset],columns=columns)
gives the error
ValueError: 18 columns past, past data had 1 columns
and if I use
df = pd.DataFrame(recordset,columns=columns)
as well as the following error:
ValueError: Shape of pass values is (1, 1), indices implicit (1, 18)
Could you try something like this ?
records=cursor.fetchall()
record_list=[]
for record in records:
record_list.append(str(record[0]))
From my experience with pyodbc, this has given me something i can work with.
We can then
pd.DataFrame(record_list)
and manipulate the resulting data as needed
In the line:
for row in cursor.execute("SELECT classCapacity FROM tblClass1 WHERE classID = ?", [classID]):
I am getting a binding error saying the statement uses one and six have been supplied and have been stuck trying to fix it for hours now any suggestions are welcome.
def addBooking():
def CostCalculation():
Cost = 0
Validation = False
sqlite3.connect("TestProject.db")
cursor = connection.cursor()
for row in cursor.execute("SELECT classCost from tblClass1 WHERE classID =?", cClassID):
Validation = True
cost = row[0]
Cost = (int(cost))
return Cost, Validation
connection = sqlite3.connect("TestProject.db")
curosr = connection.cursor()
cClientID = getID(clientID.get())
cClassID = classID.get()
cDate = bookingDate.get()
cCost, costValidation = CostCalculation()
bookingRec = [cClientID, cClassID, cDate, cCost]
for row in cursor.execute("SELECT classCapacity FROM tblClass1 WHERE classID = ?", [classID]):
maxCap = row[0]
capacity = 0
for row in cursor.execute("SELECT * FROM tblBooking WHERE classID = ? and bookingDate =?", [cClassID, cDate]):
capacity = capacity + 1
if capacity < maxCap:
cursor.execute("INSERT INTO tblBooking VALUES(Null,?,?,?,?)", bookingRec)
connection.commit()
connection.close()
I've some code to input data into list, how can I import data in my list into database?
import psycopg2
import random
import string
import time
conn = psycopg2.connect(host="localhost",database="postgres", user="postgres", password="potatona1")
cursor = conn.cursor()
FullChar = 'CEFLMPRTVWXYK0123456789#'
total = 4
count = 10
count = int(count)
for i in range(1000):
for x in range(total):
unique_code = ''.join(random.sample(FullChar, count - 1)) + '#'
unique_code = ''.join(random.sample(unique_code, len(unique_code)))
list(unique_code)
postgres_insert_query = """ INSERT INTO employees (id_employee, name) VALUES (%s,%s)"""
record_to_insert = (1, unique_code)
cursor.execute(postgres_insert_query, record_to_insert)
conn.commit()
count = cursor.rowcount
print (count, "Record inserted successfully into mobile table")
I want import 1000 data to postgresql with python.
i just trying this, and it works
conn = psycopg2.connect(host="192.168.13.10",database="postgres", port="5432", user="postgres", password="potatona1")
cursor = conn.cursor()
FullChar = 'CEFLMPRTVWXYK0123456789'
total = 1000
count = 10
count = int(count)
entries = []
bcd = ""
flg = ""
rll = ""
def inputDatabase(data):
postgres_insert_query = """INSERT INTO unique_code(unique_code, barcode, flag, roll) VALUES (%s,%s,%s,%s)"""
cursor.executemany(postgres_insert_query, data)
conn.commit()
for i in range(5):
for x in range(total): # banyaknya code yang di print
unique_code = ''.join(random.sample(FullChar, count - 1))
unique_code = ''.join(random.sample(unique_code, len(unique_code)))
entry = (unique_code, bcd, flg, rll)
entries.append(entry)
inputDatabase(entries)
print(i)
count = cursor.rowcount
print (count, "Record inserted successfully into mobile table")
I am trying to bulk insert locations on wordpress. I have defined functions to check and adding terms and taxonomy
def checkTerm(term,con):
cur = con.cursor()
query = "SELECT term_id FROM wp_terms as t WHERE t.name = '%s'" % term
print query
cur.execute(query)
rows = cur.fetchall()
if rows: return rows[0][0]
else : return None
def addTerm(term,slug,con):
cur = con.cursor()
try:
query = "INSERT INTO `wp_terms` (`name`,`slug`,`term_group`) VALUES ('%s','%s',0)" % (term,slug)
print query
cur.execute(query)
con.commit()
rows = checkTerm(term,con)
if rows: return rows[0][0]
else : return None
except:
return None
def checkTaxonomy(term_id,con):
cur = con.cursor()
query = "SELECT tt.term_taxonomy_id,tt.parent FROM wp_term_taxonomy AS tt INNER JOIN wp_terms AS t ON tt.term_id = t.term_id WHERE tt.taxonomy = 'project_location' AND t.term_id = '%s'" % term_id
print query
cur.execute(query)
rows = cur.fetchall()
if rows: return rows
else : return None
def addTaxonomy(term_id,taxonomy,description,parent,count,con):
cur = con.cursor()
query = "INSERT INTO `wp_term_taxonomy` (`term_id`,`taxonomy`,`description`,`parent`,`count`) VALUES ('%s','%s','%s','%s','%s')" % (term_id,taxonomy,description,parent,count)
print query
cur.execute(query)
con.commit()
rows = checkTaxonomy(term_id,con)
if rows: return rows
else: return None
I store cities in dictionary of dicionaries
df = pd.read_table('./Argentina.csv',sep='\t',header=None,engine='python')
for line in xrange(len(df)):
stringa = str(df[17][line])
location = str(df[1][line])
population = int(df[14][line])
if population < limit_pop: continue
string_state = stringa.split("/")
country = string_state[1]
state = string_state[2]
if not country in states:
states[country] = {}
if not state in states[country]:
states[country][state] = [location]
else :
states[country][state].append(location)
Then I try to insert terms and taxonomies in the wordpress db
con = mdb.connect('localhost', 'root', 'mypassword, 'Wordpress')
for country in states:
country_id = checkTerm(country.replace("_"," "),con)
if not country_id:
country_id = addTerm(country.replace("_"," "),country,con)
taxonomy = checkTaxonomy(country_id,con)
if not taxonomy:
taxonomy = addTaxonomy(country_id,'project_location','','0','0',con)
parent = dict((y, x) for x, y in taxonomy)
if not 0 in parent:
taxonomy = addTaxonomy(country_id,'project_location','','0','0',con)
for state in states[country]:
state_id = checkTerm(state.replace("_"," "),con)
if not state_id:
state_id = addTerm(state.replace("_"," "),state,con)
taxonomy = checkTaxonomy(state_id,con)
if not taxonomy:
taxonomy = addTaxonomy(state_id,'project_location','',country_id,'0',con)
parent = dict((y, x) for x, y in taxonomy)
if not country_id in parent:
taxonomy = addTaxonomy(state_id,'project_location','',country_id,'0',con)
for location in states[country][state]:
location_id=checkTerm(location.replace("_"," "),con)
if not location_id:
location_id = addTerm(location.replace("_"," "),location,con)
taxonomy = checkTaxonomy(location_id,con)
if not taxonomy:
taxonomy = addTaxonomy(location_id,'project_location','',state_id,'0',con)
parent = dict((y, x) for x, y in taxonomy)
if not state_id in parent:
taxonomy = addTaxonomy(location_id,'project_location','',state_id,'0',con)
When I try to execute the script I found this behaviour
SELECT term_id FROM wp_terms as t WHERE t.name = 'Argentina'
INSERT INTO `wp_terms` (`name`,`slug`,`term_group`) VALUES ('Argentina','Argentina',0)
SELECT term_id FROM wp_terms as t WHERE t.name = 'Argentina'
SELECT tt.term_taxonomy_id,tt.parent FROM wp_term_taxonomy AS tt INNER JOIN wp_terms AS t ON tt.term_id = t.term_id WHERE tt.taxonomy = 'project_location' AND t.term_id = 'None'
INSERT INTO `wp_term_taxonomy` (`term_id`,`taxonomy`,`description`,`parent`,`count`) VALUES ('None','project_location','','0','0')
SELECT tt.term_taxonomy_id,tt.parent FROM wp_term_taxonomy AS tt INNER JOIN wp_terms AS t ON tt.term_id = t.term_id WHERE tt.taxonomy = 'project_location' AND t.term_id = 'None'
And the script stop with the following error
./import.py:59: Warning: Truncated incorrect DOUBLE value: 'None'
cur.execute(query)
./import.py:69: Warning: Incorrect integer value: 'None' for column 'term_id' at row 1
cur.execute(query)
Traceback (most recent call last):
File "./import.py", line 115, in <module>
parent = dict((y, x) for x, y in taxonomy)
TypeError: 'NoneType' object is not iterable
This means that the insert statements are not executed. I don't understand. I con.commit() the query but it is still not executed. Where is the problem?
Solution:
I changed
import MySQLdb as mdb
to
import mysql.connector
and
con = mdb.connect(host='localhost',user='root', password='passowrd',database= 'Wordpress');
to
con = mysql.connector.connect(host='localhost',user='root', password='password',database= 'Wordpress',autocommit=False,buffered=False);
i use a function calculate servers prices so i made a function which retrieve a defalut prices of a server components and calculate server price for each server exsit in my DB but i try to run this function, i get this error:
function.py
import MySQLdb
def calculations_metric (param) :
db = MySQLdb.connect("localhost", "root", "aqw", "PFE_Project")
cursor = db.cursor()
sql = "SELECT * FROM examples_calculationsmetric"
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
RAM_prices = int(row[1])
Core_prices = int(row[2])
HHD_SATA_prices =int(row[3])
HHD_SSD_prices =int(row[4])
CPU_priority = int(row[5])
Avaibility = int(row[6])
db.close()
db1 = MySQLdb.connect("localhost", "root", "aqw", "PFE_Project")
cursor1 = db1.cursor()
sql1 = "SELECT * FROM examples_servercomponents WHERE id ='%d'" %(param)
cursor1.execute(sql1)
results1 = cursor1.fetchall()
for row in results1:
if row[6] == 'SATA':
Core_price = int(row[2]) * Core_prices # the error is here
Priority_price = int(row[3]) * CPU_priority
RAM_price = int(row[4]) * RAM_prices
HDD_price = int(row[5]) * HHD_SATA_prices
Availibility_price = int(row[7])*Avaibility
elif row[6] == 'SSD':
Core_price = int(row[2]) * Core_prices
Priority_price = int(row[3]) * CPU_priority
RAM_price = int(row[4]) * RAM_prices
HDD_price = int(row[5]) * HHD_SSD_prices
Availibility_price = int(row[7])*Avaibility
price = Core_price + Priority_price + RAM_price + HDD_price + Availibility_price
db1.close()
return price
i don't get what is the error so if can anyone help i will be so greatful
When your SELECT * FROM examples_calculationsmetric doesn't return any results, Core_prices is never set (nor are the other variables in that loop).
Python names do not exist until assigned to, so if results is an empty list, the names inside the for loop never get assigned to and thus do not exist by the time you loop over results1 later on.
You could set default values for those names as a work-around:
RAM_prices = 0
Core_prices = 0
HHD_SATA_prices = 0
HHD_SSD_prices = 0
CPU_priority = 0
Avaibility = 0
to at least ensure that they are defined.