I'm trying to perform bulk extracts/loads from Oracle to MySQL using cx_Oracle and SQL Alchemy.
I found this example online and it works well for most data types, but fails from Blob data types:
https://vbaoverall.com/transfer-data-from-oracle-to-mysql-using-sqlalchemy-python/
I have about 43 tables and about 12 of them have BLOB data types.
import cx_Oracle
import pandas as pd
from sqlalchemy import create_engine
import pymysql
import warnings
warnings.filterwarnings('ignore')
# list out all 43 tables:
table_list = [
"FILE",
"ATTACHMENT",
"DOCUMENTS",
"USERS",
"INFO",
"ONE",
"TWO",
"THREE",
"FOUR",
"...."
]
# Set Oralce Connection
dsn_tns = cx_Oracle.makedsn('source.example.com', '1530', service_name='test')
oracle_connection = cx_Oracle.connect(user='root', password='toot', dsn=dsn_tns)
# Open Oracle cursor
cursor = oracle_connection.cursor()
# set mysql connection with foreign key checks
mysql_engine = create_engine("mysql+pymysql://root:toot#target.example.com:3306/target")
mysql_engine.execute("SET FOREIGN_KEY_CHECKS=0")
# loop thru tables:
for table in table_list:
# select from oracle
sql = "SELECT * FROM " + table
# read into pandas df
data=pd.read_sql(sql, oracle_connection)
# insert into mysql
mysql_engine.execute("TRUNCATE TABLE "+table)
data.to_sql(table, con=mysql_engine, if_exists='append', index=False, chunksize=10000)
print("{}: sucessfully inserted {} rows.".format(table, data.shape[0]))
# update foreign key checks
mysql_engine.execute("SET FOREIGN_KEY_CHECKS=1")
#close connection
oracle_connection.close()
mysql_engine.dispose()
Here's the error I'm getting:
return "'%s'" % escape_string(str(value), mapping)
TypeError: __str__ returned non-string (type bytes)
Thanks to #Gord Thompson, I found out I just needed to specify dtype=
import cx_Oracle
import pandas as pd
from sqlalchemy import create_engine
import sqlalchemy
import pymysql
import warnings
warnings.filterwarnings('ignore')
table_list = [
"FILE",
"ATTACHMENT",
"DOCUMENTS",
"USERS",
"INFO",
"ONE",
"TWO",
"THREE",
"FOUR",
"...."
]
# Set Oralce Connection
dsn_tns = cx_Oracle.makedsn('source.example.com', '1530', service_name='test')
oracle_connection = cx_Oracle.connect(user='root', password='toot', dsn=dsn_tns)
# Open Oracle cursor
cursor = oracle_connection.cursor()
# set mysql connection with foreign key checks
mysql_engine = create_engine("mysql+pymysql://root:toot#target.example.com:3306/target")
mysql_engine.execute("SET FOREIGN_KEY_CHECKS=0")
for table in table_list:
# select from oracle
sql = "SELECT * FROM " + table
# read into pandas df
data=pd.read_sql(sql, oracle_connection)
dtype = {}
if table == "ATTACHMENT":
dtype['FILE_CONTENT'] = sqlalchemy.types.PickleType
# insert into mysql
mysql_engine.execute("TRUNCATE TABLE "+table)
data.to_sql(table, con=mysql_engine, if_exists='append', index = False, chunksize =10000, dtype=dtype)
print("{}: sucessfully inserted {} rows.".format(table, data.shape[0]))
# update foreign key checks
mysql_engine.execute("SET FOREIGN_KEY_CHECKS=1")
#close connection
oracle_connection.close()
mysql_engine.dispose()
Related
import mysql.connector
import pymongo
import json
from pymongo import MongoClient
mysql_host="**"
mysql_database="**"
mysql_schema = "**"
mysql_user="**"
mysql_password="**"
mongodb_host = "**"
mongodb_dbname = "**"
mysqldb = mysql.connector.connect(
host="**",
database="**",
user="**",
password="***"
)
if (mysqldb.is_connected()):
print("successfully Connected")
mycursor = mysqldb.cursor(dictionary=True)
mycursor.execute("SELECT Name from MYSQLTABLE;")
Ndata = mycursor.fetchall()
print(Ndata)
mycursor.execute("SELECT location from MYSQLTABLE;")
Ldata = mycursor.fetchall()
print(Ldata)
myclient = pymongo.MongoClient(mongodb_host)
mydb = myclient[mongodb_dbname]
mycol = mydb["CASES"]
if len(Ndata) > 0:
if len(Ldata) > 0:
if len(iddata) > 0:
x = mycol.insert_many([{ "Name" : Ndata, "location" : Ldata}])
print(len(x.inserted_ids))
I have Written the above code, the data migrated through this process is an array,
i want to migrate data as a string
you can also suggest any other easy way to migrate data from mysql to mongodb using python through proper mapping (selected columns of mysql to selected fields of mongodb)
Ok, I have tried several kinds of solutions recommended by others on this site and other sited. However, I can't get it work as I would like it to do.
I get a XML-response which I normalize and then save to a CSV. This first part works fine.
Instead of saving it to CSV I would like to save it into an existing table in an access database. The second part below:
Would like to use an existing table instead of creating a new one
The result is not separated with ";" into different columns. Everything ends up in the same column not separated, see image below
response = requests.get(u,headers=h).json()
dp = pd.json_normalize(response,'Units')
response_list.append(dp)
export = pd.concat(response_list)
export.to_csv(r'C:\Users\username\Documents\Python Scripts\Test\Test2_'+str(now)+'.csv', index=False, sep=';',encoding='utf-8')
access_path = r"C:\Users\username\Documents\Python Scripts\Test\Test_db.accdb"
conn = pyodbc.connect("DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={};" \
.format(access_path))
strSQL = "SELECT * INTO projects2 FROM [text;HDR=Yes;FMT=sep(;);" + \
"Database=C:\\Users\\username\\Documents\\Python Scripts\\Test].Testdata.csv;"
cur = conn.cursor()
cur.execute(strSQL)
conn.commit()
conn.close()
If you already have the data in a well-formed pandas DataFrame then you don't really need to dump it to a CSV file; you can use the sqlalchemy-access dialect to push the data directly into an Access table using pandas' to_sql() method:
from pprint import pprint
import urllib
import pandas as pd
import sqlalchemy as sa
connection_string = (
r"DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};"
r"DBQ=C:\Users\Public\Database1.accdb;"
r"ExtendedAnsiSQL=1;"
)
connection_uri = f"access+pyodbc:///?odbc_connect={urllib.parse.quote_plus(connection_string)}"
engine = sa.create_engine(connection_uri)
with engine.begin() as conn:
# existing data in table
pprint(
conn.execute(sa.text("SELECT * FROM user_table")).fetchall(), width=30
)
"""
[('gord', 'gord#example.com'),
('jennifer', 'jennifer#example.com')]
"""
# DataFrame to insert
df = pd.DataFrame(
[
("newdev", "newdev#example.com"),
("newerdev", "newerdev#example.com"),
],
columns=["username", "email"],
)
df.to_sql("user_table", engine, index=False, if_exists="append")
with engine.begin() as conn:
# updated table
pprint(
conn.execute(sa.text("SELECT * FROM user_table")).fetchall(), width=30
)
"""
[('gord', 'gord#example.com'),
('jennifer', 'jennifer#example.com'),
('newdev', 'newdev#example.com'),
('newerdev', 'newerdev#example.com')]
"""
(Disclosure: I am currently the maintainer of the sqlalchemy-access dialect.)
Solved with the following code
SE_export_Tuple = list(zip(SE_export.Name,SE_export.URL,SE_export.ImageUrl,......,SE_export.ID))
print(SE_export_Tuple)
access_path = r"C:\Users\username\Documents\Python Scripts\Test\Test_db.accdb"
conn = pyodbc.connect("DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={};" \
.format(access_path))
cursor = conn.cursor()
mySql_insert_query="INSERT INTO Temp_table (UnitName,URL,ImageUrl,.......,ID) VALUES (?,?,?,......,?)"
cursor.executemany(mySql_insert_query,SE_export_Tuple)
conn.commit()
conn.close()
However, when I add many fields I get an error at "executemany", saying:
cursor.executemany(mySql_insert_query,SE_export_Tuple)
Error: ('HY004', '[HY004] [Microsoft][ODBC Microsoft Access Driver]Invalid SQL data type (67) (SQLBindParameter)')
I have been trying to insert data from a dataframe in Python to a table already created in SQL Server. The data frame has 90K rows and wanted the best possible way to quickly insert data in the table. I only have read,write and delete permissions for the server and I cannot create any table on the server.
Below is the code which is inserting the data but it is very slow. Please advise.
import pandas as pd
import xlsxwriter
import pyodbc
df = pd.read_excel(r"Url path\abc.xlsx")
conn = pyodbc.connect('Driver={ODBC Driver 11 for SQL Server};'
'SERVER=Server Name;'
'Database=Database Name;'
'UID=User ID;'
'PWD=Password;'
'Trusted_Connection=no;')
cursor= conn.cursor()
#Deleting existing data in SQL Table:-
cursor.execute("DELETE FROM datbase.schema.TableName")
conn.commit()
#Inserting data in SQL Table:-
for index,row in df.iterrows():
cursor.execute("INSERT INTO Table Name([A],[B],[C],) values (?,?,?)", row['A'],row['B'],row['C'])
conn.commit()
cursor.close()
conn.close()
To insert data much faster, try using sqlalchemy and df.to_sql. This requires you to create an engine using sqlalchemy, and to make things faster use the option fast_executemany=True
connect_string = urllib.parse.quote_plus(f'DRIVER={{ODBC Driver 11 for SQL Server}};Server=<Server Name>,<port>;Database=<Database name>')
engine = sqlalchemy.create_engine(f'mssql+pyodbc:///?odbc_connect={connect_string}', fast_executemany=True)
with engine.connect() as connection:
df.to_sql(<table name>, connection, index=False)
Here is the script and hope this works for you.
import pandas as pd
import pyodbc as pc
connection_string = "Driver=SQL Server;Server=localhost;Database={0};Trusted_Connection=Yes;"
cnxn = pc.connect(connection_string.format("DataBaseNameHere"), autocommit=True)
cur=cnxn.cursor()
df= pd.read_csv("your_filepath_and_filename_here.csv").fillna('')
query = 'insert into TableName({0}) values ({1})'
query = query.format(','.join(df.columns), ','.join('?' * len(df1.columns)))
cur.fast_executemany = True
cur.executemany(query, df.values.tolist())
cnxn.close()
This should do what you want...very generic example...
# Insert from dataframe to table in SQL Server
import time
import pandas as pd
import pyodbc
# create timer
start_time = time.time()
from sqlalchemy import create_engine
df = pd.read_csv("C:\\your_path\\CSV1.csv")
conn_str = (
r'DRIVER={SQL Server Native Client 11.0};'
r'SERVER=Excel-PC\SQLEXPRESS;'
r'DATABASE=NORTHWND;'
r'Trusted_Connection=yes;'
)
cnxn = pyodbc.connect(conn_str)
cursor = cnxn.cursor()
for index,row in df.iterrows():
cursor.execute('INSERT INTO dbo.Table_1([Name],[Address],[Age],[Work]) values (?,?,?,?)',
row['Name'],
row['Address'],
row['Age'],
row['Work'])
cnxn.commit()
cursor.close()
cnxn.close()
# see total time to do insert
print("%s seconds ---" % (time.time() - start_time))
Try that and post back if you have additional questions/issues/concerns.
Replace df.iterrows() with df.apply() for one thing. Remove the loop for something much more efficient.
Try to populate a temp table with 1 or none indexes then insert it into your good table all at once.
Might speed things up due to not having to update the indexes after each insert??
I am trying to store a pickled nested dictionary in Postgresql (I am aware that this is a quick & dirty method and won't be able to access dictionary contents from Postgresql - usually bad practice)
# boilerplate, preamble and upstream work.
import psycopg2
''' Inputs: nd = dictionary to be pickled '''
pickled = pickle.dumps(nd)
connection = psycopg2.connect(user = "-----",
password = "----",
host = "----",
port = "----",
database = "----")
name = 'database1'
print('Connected...')
cursor = connection.cursor()
print(connection.get_dsn_parameters(),"\n")
cursor.execute("CREATE TABLE thetable (name TEXT, ablob BYTEA)")
print('Created Table...')
cursor.execute("INSERT INTO thetable VALUES(%s)",(psycopg2.Binary(pickled),))
connection.commit()
print('Added Data...')
cursor.close()
connection.close()
print('Connection closed...')
When I come to data data retrieval, I am having many issues importing the data from Postgres - essentially the data is to be opened, unpickled back to the dictionary and visualised. I have tried:
import psycopg2
from io import BytesIO
connection = psycopg2.connect(user = "----",
password = "----",
host = "----",
port = "----",
database = "----")
cursor = connection.cursor()
cursor.execute("SELECT ablob FROM thetable")
result, = cursor.fetchone()
cursor.close()
connection.rollback()
result = BytesIO(result)
print(pickle.load(result))
As per this link: https://www.oreilly.com/library/view/python-cookbook/0596001673/ch08s08.html, and consulted: Insert an image in postgresql database and: saving python object in postgres table with pickle, however have been unable to return the pickled dictionary.
Any advice in achieving this is greatly appreciated!
When your CREATE TABLE lists two fields, you have to list in INSERT which ones you want to fill, unless you fill them all.
import psycopg2
import pickle
dict = {
"foo": "bar"
}
p = pickle.dumps(dict)
connection = psycopg2.connect(database = "test")
cursor = connection.cursor()
cursor.execute("CREATE TABLE thetable (name TEXT, ablob BYTEA)")
cursor.execute("INSERT INTO thetable VALUES(%s,%s)",('test',p))
connection.commit()
cursor.close()
connection.close()
and reading
import psycopg2
import pickle
connection = psycopg2.connect(database = "test")
cursor = connection.cursor()
cursor.execute("SELECT ablob FROM thetable WHERE name='test';")
result = cursor.fetchone()
print pickle.loads(result[0])
cursor.close()
connection.close()
I am trying to run the following code to create a Teradata table using the teradata python library:
import teradata
import pandas as pd
udaExec = teradata.UdaExec (appName="Hello", version="1.0",
logConsole=False)
session = udaExec.connect(method="odbc", system="tdprod",
username="xxx", password="xxx");
sqlStr = "CREATE SET TABLE \"TEST123\" \
(col1 INTEGER) PRIMARY INDEX (col1);"
result = pd.read_sql(sqlStr, self.session)
I am receiving the following error:
File "..\pandas\io\sql.py", line 1436, in read_query
columns = [col_desc[0] for col_desc in cursor.description]
TypeError: 'NoneType' object is not iterable
Any idea on how to solve this?
Your SQL (sqlStr) is a DDL (CREATE Table) -> it will NOT deliver any Resultset that can be placed into the Pandas Dataframe (pd.read_sql).
If you just want to create the table, you don't need pandas:
session.execute(sqlStr);
If you want to read from the table "TEST123":
sqlStr = "SELECT col1 FROM Test123;";
result = pd.read_sql(sqlStr, self.session);
or alternatively:
result = pd.read_sql_table("Test123", self.session);
To addon hhoeck answer, it is a good practice to use Context Manager. Otherwise you are risking having unclosed sessions.
import teradata
import pandas as pd
udaExec = teradata.UdaExec (appName="Hello", version="1.0",
logConsole=False)
with udaExec.connect(method="odbc", system="tdprod",
username="xxx", password="xxx") as session:
sqlStr = "CREATE SET TABLE \"TEST123\" \
(col1 INTEGER) PRIMARY INDEX (col1);"
# Create Table
session.execute(sqlStr)
# Read table to result
result = pd.read_sql(sqlStr,session)