0
I am trying to create a function which is reading from my csv file and inserts it in a sql table.
This is my code:
def transaction_import(path):
with open (path, 'r') as f:
reader = csv.reader(f)
columns = next(reader)
query = 'insert into transactions({0}) values ({1})'
query = query.format(','.join(columns), ','.join('?' * len(columns)))
cursor = conn.cursor()
for data in reader:
cursor.execute(query, data)
transactions = transaction_import('../data/budapest.csv')
c.execute("select * from transactions")
transactions = c.fetchall()
for row in transactions:
print(row)
What i want to do is to read several transactions from different csvs. All of them have the same structure and column names. ex: transactions = transaction_import('../Data/Source/ny.csv') transactions = transaction_import('../Data/Source/london.csv')
When I run it I get this error: File "/Users/.../main.py", line 82, in transaction_import cursor.execute(query,(data,)) sqlite3.ProgrammingError: Incorrect number of bindings supplied. The current statement uses 4, and there are 1 supplied.
You're missing a comma in # cursor.execute(query, data)
Also google is your friend, see: sqlite3.ProgrammingError: Incorrect number of bindings supplied. The current statement uses 1, and there are 74 supplied
Related
Is it possible to download data to a csv file by the cx_Oracle module, so that the floating point numbers have a comma instead of a dot?
I need this functionality to properly load the downloaded csv file into another table in the Oracle database. When I try to load such a csv file with floating point numbers, I get an error: cx_Oracle.DatabaseError: ORA-01722: invalid number
I have already solved the problem using the pandas library.
My question:
Is there a solution without the use of data frame pandas.
def load_csv():
conn = cx_Oracle.connect(user=db_user, password=db_userpwd, dsn=dsn, encoding="UTF-8")
cursor = conn.cursor()
cursor.execute(str("select * from tablename"))
result_set = cursor.fetchall()
with open(table_name['schemat']+"__"+table_name['tabela']+".csv", "w") as csv_file:
csv_writer = csv.writer(csv_file, delimiter='|', lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC)
for row in result_set:
csv_writer.writerow(row)
#df = pandas.read_sql("select * from tablename", conn)
#df.to_csv(table_name['schemat']+"__"+table_name['tabela']+".csv", index = False, encoding='utf-8', decimal=',', sep='|', header=False)
cursor.close()
conn.close()
def export_csv():
# Open connection to Oracle DB
conn = cx_Oracle.connect(user=db_user, password=db_userpwd, dsn=dsn, encoding="UTF-8")
# Open cursor to Oracle DB
cursor = conn.cursor()
batch_size = 1
with open(table_name['schemat']+"__"+table_name['tabela']+".csv", 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter='|' )
sql = sql_insert
data = []
for line in csv_reader:
data.append([i for i in line])
if len(data) % batch_size == 0:
cursor.executemany(sql, data)
data = []
if data:
cursor.executemany(sql, data)
conn.commit()
cursor.close()
conn.close()
I tried to set it up by changing the session, but unfortunately it doesn't work for me.
# -*- coding: utf-8 -*-
import csv
import os
import sys
import time
import decimal
import pandas as pd
import cx_Oracle
dsn = "(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=xxx)" \
"(PORT=xxx))(CONNECT_DATA=(SERVICE_NAME = xxx)))"
db_user = "xxx"
db_userpwd = "xxx"
def init_session(conn, requested_tag):
cursor = conn.cursor()
cursor.execute("alter session set nls_numeric_characters = ', '")
cursor.execute("select to_number(5/2) from dual")
dual, = cursor.fetchone()
print("dual=", repr(dual))
pool = cx_Oracle.SessionPool(user=db_user, password=db_userpwd,
dsn=dsn, session_callback=init_session, encoding="UTF-8")
with pool.acquire() as conn:
# Open cursor to Oracle DB
cursor = conn.cursor()
cursor.execute("select value from nls_session_parameters where parameter = 'NLS_NUMERIC_CHARACTERS'")
nls_session_parameters, = cursor.fetchone()
print("nls_session_parameters=", repr(nls_session_parameters))
#qryString = "select * from tablename"
#df = pd.read_sql(qryString,conn)
#df.to_csv(table_name['schemat']+"__"+table_name['tabela']+".csv", index = False, encoding='utf-8', decimal=',')
cursor.execute(str("select * from tablename"))
result_set = cursor.fetchall()
#result, = cursor.fetchone()
#print("result is", repr(result))
with open(table_name['schemat']+"__"+table_name['tabela']+".csv", "w") as csv_file:
csv_writer = csv.writer(csv_file, delimiter='|', lineterminator="\n")
for row in result_set:
csv_writer.writerow(row)
I would be grateful for any suggestions on how I can get data to csv file without pandas library.
example:
problematic result: 123.45
correct result: 123,45
Another, possibly simpler option:
Create an output type handler that tells Oracle to fetch the value as a string. Then replace the period with a comma:
import cx_Oracle as oracledb
def output_type_handler(cursor, name, default_type, size, precision, scale):
if default_type == oracledb.DB_TYPE_NUMBER:
return cursor.var(str, arraysize=cursor.arraysize,
outconverter=lambda s: s.replace(".", ","))
conn = oracledb.connect("user/password#host:port/service_name")
conn.outputtypehandler = output_type_handler
with conn.cursor() as cursor:
cursor.execute("select * from TestNumbers")
for row in cursor:
print(row)
Put the output type handler on the cursor if you only want to do this for one query instead of all queries.
You can do by TO_CHAR(<numeric_value>,'999999999D99999999999','NLS_NUMERIC_CHARACTERS=''.,''') conversion such as
cursor.execute("""
SELECT TRIM(TO_CHAR(5/2,'999999999D99999999999',
'NLS_NUMERIC_CHARACTERS=''.,'''))
FROM dual
""")
result_set = cursor.fetchall()
with open(table_name['schemat']+"__"+table_name['tabela']+".csv", "w") as csv_file:
csv_writer = csv.writer(csv_file, delimiter='|', lineterminator="\n")
for row in result_set:
csv_writer.writerow(row)
btw, switching ''.,'' to '',.'' will yield 2,50000000000 again
Since you're writing to a text file and presumably also want to avoid any Oracle decimal format to Python binary format precision issues, fetching as a string like Anthony showed has advantages. If you want to move the decimal separator conversion cost to the DB you could combine his solution and yours by adding this to your original code:
def output_type_handler(cursor, name, default_type, size, precision, scale):
if default_type == cx_Oracle.NUMBER:
return cursor.var(str, arraysize=cursor.arraysize)
and then after you open the cursor (and before executing), add the handler:
cursor.outputtypehandler = output_type_handler
Since the DB does the conversion to string, the value of NLS_NUMERIC_CHARACTERS is respected and you get commas as the decimal separator.
I am trying to populate a MySQL database table from a CSV file using PyMySQL. The CSV file has approx 948,000 rows. The script works fine but only approximately 840,000 rows appear in the database, I don't know where the rest go.
I am guessing this has something to do with connection.commit() method so I have tried committing at the end of the script as well as after every 10,000 INSERT queries but nothing works so far. Any ideas what I might be doing wrong?
I have attached the relevant code snippet below:
with gzip.open(temp_file_path, "rt", encoding="utf-8") as f:
reader = csv.reader(f)
for row in reader:
# if num % 10000 == 0:
# conn.commit()
print("[+] Processing row: ", num)
sql = "INSERT INTO `{0}`({1}) VALUES({2})".format(table_name, ", ".join(columns),
", ".join(["%s"] * len(columns)))
result = cursor.execute(sql, row)
if result == 1:
num += 1
else:
print("Not inserted!")
conn.commit()
cursor.close()
conn.close()
I am trying to insert data by chunks from a CSV files in the folder, but I cannot get the SQLITE insert query right. I was able to perform it without the lists, so I know that the data is correct.
However when I use the lists I get the error: sqlite3.ProgrammingError: Incorrect number of bindings supplied. The current statement uses 10, and there are 65 supplied.
Any ideas?
import csv, sqlite3, time, os
def chunks(data, rows=10000):
data = list(data)
for i in range(0, len(data), rows):
yield data[i:i+rows]
if __name__ == "__main__":
datab = 'MYDB'
con=sqlite3.connect(datab+'.db')
con.text_factory = str
cur = con.cursor()
koko = 'C:\\MYFOLDER\\'
print(koko)
directory = koko
print(directory)
for file in os.listdir(directory):
for searchfile, csvfile, csvcolumn, tablecolumn, table, valuemark, valcsvcolumn in zip(['USR02_FINAL.csv'],
['USR02_FINAL.csv'],
[['SYS,MANDT, BNAME, GLTGV, GLTGB, USTYP, CLASS, UFLAG, ERDAT, TRDAT']],
[['SYS,MANDT, BNAME, GLTGV2, GLTGB2, USTYP, CLASS, UFLAG, ERDAT2, TRDAT2']],
['USR_02_ALL_RAW2'],
[['?,?,?,?,?,?,?,?,?,?']],
[['SYS,MANDT, BNAME, GLTGV, GLTGB, USTYP, CLASS, UFLAG, ERDAT, TRDAT']]):
if file.endswith(searchfile):
fileinsert = directory + '\\' + csvfile
csvData = csv.reader(open(fileinsert, "rt"))
divData = chunks(csvData) # divide into 10000 rows each
for chunk in divData:
cur.execute('BEGIN TRANSACTION')
for csvcolumn in chunk:
print(searchfile, csvfile, csvcolumn, tablecolumn, table, valuemark, valcsvcolumn)
cur.execute("""INSERT OR IGNORE INTO """ + table +""" ("""+ ', '.join(tablecolumn) +""") VALUES ("""+ ', '.join(valuemark)+""")""",( ', '.join(valcsvcolumn)))
cur.execute('COMMIT')
Look at the loops:
for chunk in divData:
# ...
for csvcolumn in chunk:
# ...
...join(valcsvcolumn)
I see that you only use csvcolumn in the print, but not in the insert statement; it's using valcsvcolumn which is an unrelated thing. Probably this is the problem.
Ive created a python script that sets up a SQLite database and creates a table, now im trying to read values into the table from a .txt file, as follows
import sqlite3
conn = sqlite3.connect('mydatabase.db')
c = conn.cursor()
c.execute('''CREATE TABLE mytable
(var1 TEXT,
var2 REAL)''')
c.execute('separator "," ')
c.execute('import records.txt myTable ')
conn.commit()
for row in c.execute('SELECT * FROM myTable'):
print(row)
conn.close()
the records.txt looks like
item1, 8.8
item2, 9.1
when i run the python code form the command line i get
c.execute('separator "," ')
sqlite3.OperationalError: near "separator": syntax error
how do I use the seperator sql statement here and and maybe the same problem will be for the import statement?
How to get this code working?
I don't think this is possible with the Python module sqlite3. The .separator (for me only with leading dot) command, as well as the .import, are features of the CLI fronted sqlite3.
If you want to use these, you could use subprocess to invoke the commands:
import subprocess
p = subprocess.Popen(["sqlite3", "mydatabase.db"], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.communicate(b"""
CREATE TABLE mytable
(var1 TEXT,
var2 REAL);
.separator ","
.import records.txt myTable
""")
c.execute('separator "," ')
should be replaced with:
sql_statement = ''' INSERT INTO mytable(var1, var2) VALUES (?, ?) '''
values = ('text', 343.4)
c.execute(sql_statement, values)
The question marks are the placeholders for the values you want to insert. Note that you should pass these values in a tuple, if you want to insert a single value into your database the values argument should look like this:
values = (text,)
A working solution:
import sqlite3
# create database and initialize cursor
conn = sqlite3.connect('mydatabase.db')
c = conn.cursor()
# create table if not exists
c.execute('''CREATE TABLE IF NOT EXISTS mytable(var1 TEXT, var2 REAL)''')
sql_insert = ''' INSERT INTO mytable(var1, var2) VALUES (?, ?) '''
with open('results.txt', 'r') as fr:
for line in fr.readlines():
# parse the results.txt, create a list of comma separated values
line = line.replace('\n', '').split(',')
t, f = line
c.execute(sql_insert, (t, float(f)))
conn.commit()
sql_select = ''' SELECT * FROM mytable '''
for row in c.execute(sql_select):
print(row)
conn.close()
The solution above will work fine. Just to make it a little more readable and easier to debug you could also go like this:
import sqlite3
conn = sqlite3.connect('mydatabase.db')
c = conn.cursor()
c.execute('''CREATE TABLE mytable (var1 TEXT, var2 REAL)''')
with open("records.txt", "r") as f:
rows = f.readlines()
for row in rows:
fields = row.split(',')
c.execute(f'INSERT INTO mytable (var1, var2)'\
f"VALUES ('{fields[0]}','{fields[1]}')")
conn.commit()
for row in c.execute('SELECT * FROM myTable'):
print(row)
conn.close()
This way you open the text file with Python and read the lines. for each line you than seperate the records and put them into the INSERT-statement right away. Quick and easy!
goodluck!
Found an example using cx_Oracle, this example shows all the information of Cursor.description.
import cx_Oracle
from pprint import pprint
connection = cx_Oracle.Connection("%s/%s#%s" % (dbuser, dbpasswd, oracle_sid))
cursor = cx_Oracle.Cursor(connection)
sql = "SELECT * FROM your_table"
cursor.execute(sql)
data = cursor.fetchall()
print "(name, type_code, display_size, internal_size, precision, scale, null_ok)"
pprint(cursor.description)
pprint(data)
cursor.close()
connection.close()
What I wanted to see was the list of Cursor.description[0](name), so I changed the code:
import cx_Oracle
import pprint
connection = cx_Oracle.Connection("%s/%s#%s" % (dbuser, dbpasswd, oracle_sid))
cursor = cx_Oracle.Cursor(connection)
sql = "SELECT * FROM your_table"
cursor.execute(sql)
data = cursor.fetchall()
col_names = []
for i in range(0, len(cursor.description)):
col_names.append(cursor.description[i][0])
pp = pprint.PrettyPrinter(width=1024)
pp.pprint(col_names)
pp.pprint(data)
cursor.close()
connection.close()
I think there will be better ways to print out the names of columns. Please get me alternatives to the Python beginner. :-)
You can use list comprehension as an alternative to get the column names:
col_names = [row[0] for row in cursor.description]
Since cursor.description returns a list of 7-element tuples you can get the 0th element which is a column name.
Here the code.
import csv
import sys
import cx_Oracle
db = cx_Oracle.connect('user/pass#host:1521/service_name')
SQL = "select * from dual"
print(SQL)
cursor = db.cursor()
f = open("C:\dual.csv", "w")
writer = csv.writer(f, lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC)
r = cursor.execute(SQL)
#this takes the column names
col_names = [row[0] for row in cursor.description]
writer.writerow(col_names)
for row in cursor:
writer.writerow(row)
f.close()
The SQLAlchemy source code is a good starting point for robust methods of database introspection. Here is how SQLAlchemy reflects table names from Oracle:
SELECT table_name FROM all_tables
WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX')
AND OWNER = :owner
AND IOT_NAME IS NULL