mapping excel columns to table column in database with python - python

json file
"mappingdef": [
{
"src": "A",
"dest": "id"
},
{
"src": "B",
"dest": "expense_type"
},
{
"src": "C",
"dest": "balance"
},
{
"src": "D",
"dest": "debit"
},
{
"src": "E",
"dest": "credit"
},
{
"src": "F",
"dest": "total_balance"
}
]
my python script:
#changing excel column names
df.columns = ["A", "B", "C", "D", "E", "F"]
#fetching data from dataframe
for row in range(df.shape[0]):
col_A = str(df.at[row, "A"]),
col_B = str(df.at[row, "B"]),
col_C = float(df.at[row, "C"]),
col_D = float(df.at[row, "D"]),
col_E = float(df.at[row, "E"]),
col_F = float(df.at[row, "F"])
#query to insert data in database
query2 = """
INSERT INTO ocean_street_apartments(
id,
expense_type,
balance,
debit,
credit,
total_balance)
values (%s, %s, %s, %s, %s, %s)
"""
i have this table definition info in json which tells src as excel column, and dest as database table column name. i want to read an excel file through pandas and want to map excel column (src) to database table column (dest). i am working in python

Assuming that its JSON file so its an API get response.
Things i am assuming you know how to do:
1)fetch get response and what is returned is an array of object descriptions for every file.
2)create script to download this and move it to a DF.
Now you have a a list of direct links to our csv files! We can read these urls directly using pandas.read_csv(url).
If data is problematic transform them.
It's time to Directly Load DF into a SQL DB using pandas.DataFrame.to_sql
Code below describes how to connect to a SQLite db.
def upload_to_sql(filenames, db_name, debug=False):
""" Given a list of paths, upload to a database
"""
conn = sqlite3.connect(f"{db_name}.db")
if debug:
print("Uploading into database")
for i, file_path in tqdm(list(enumerate(filenames))):
dat = pd.read_csv(file_path)
# rename labels
filename = os.path.basename(file_path).split('.')[0]
dat = factor_dataframe(dat, filename)
# write records to sql database
if i == 0: # if first entry, and table name already exist, replace
dat.to_sql(db_name, con=conn, index = False, if_exists='replace')
else: # otherwise append to current table given db_name
dat.to_sql(db_name, con=conn, index = False, if_exists='append')
# upload into sql database
upload_to_sql(download_urls, 'example', debug=True)

import psycopg2
import ijson
conn = psycopg2.connect(
host="localhost",
database="sea",
user="postgres",
password="hemant888")
cursor = conn.cursor()
chunk_size = 10
skiprows = 5
file_name = "Ocean Street Apartments Trial Balance 03-22.xlsx"
cursor.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = 'ocean_street_apartments'
""")
if cursor.fetchone()[0] != 1:
columns = list()
datatype = list()
row = list()
with open("tabledef.json", "r") as f:
for record in ijson.items(f, "item"):
for i in record["def"]["tabledef"]["columns"]:
col = i["name"]
columns.append(col)
dt = i["datatype"]
datatype.append(dt)
for i in range(len(columns)):
row.append("{col} {dt}".format(col=columns[i], dt=datatype[i]))
query1 = "create table ocean_street_apartments(" + \
",".join(map(str, row)) + ")"
cursor.execute(query1)
conn.commit()
while True:
df_chunk = pd.read_excel(file_name, skiprows=skiprows,
nrows=chunk_size)
skiprows += chunk_size
# When there is no data, we know we can break out of the loop.
if not df_chunk.shape[0]:
break
else:
columns = list()
columns_table = list()
with open("tabledef.json", "r") as f:
for record in ijson.items(f, "item"):
for i in record["def"]["tabledef"]["mappingdef"]:
col = i["src"]
columns.append(col)
col_table = i["dest"]
columns_table.append(col_table)
query2 = "INSERT INTO ocean_street_apartments(" + ",".join(
map(str, columns_table)) + ")values (%s, %s, %s, %s, %s, %s)"
df_chunk.columns = columns
values_list = list()
for row in range(df_chunk.shape[0]):
for col in df_chunk.columns:
val = str(df_chunk.at[row, col])
values_list.append(val)
values = tuple(values_list)
cursor.execute(query2, values)
values_list = list(values)
values_list.clear()
values = tuple(values_list)
conn.commit()
conn.close()

Related

How to migrate a table which contains a JSON column from AWS RedShift to BigQuery in Python?

I want to migrate the data tables from AWS database to BigQuery. I have a specific table named sampletable which includes id, user_id and log. Log is a JSON field that contains a dictionary which consists of keys and its respective values.
'reason': {
'id': 5,
'name': 'Sample name'
'contact': {
number = 123
address = None
}
},
'subreason': {
'id': 80,
'name': 'Sample name',
'is_active': True,
'created_at': '2022-07-18T18:33:28.911Z',
'deleted_at': None,
'complaint_id': 5,
},
This is the function that loads the data from the table to BigQuery:
def load_data(table_id, data):
print("load_data::Writing records to table", table_id)
job_config = bigquery.LoadJobConfig(
write_disposition="WRITE_APPEND",
schema=[
bigquery.SchemaField("id", "INT64"),
bigquery.SchemaField("user_id", "INT64"),
bigquery.SchemaField("log", "JSON"),
],
)
try:
start = time.time()
job = client.load_table_from_dataframe(
data, table_id, job_config=job_config
)
job.result()
end = time.time()
print("load_data::Time taken for writing " + str(data.shape[0]) + " records: ", end - start, "s")
except Exception as e:
print("load_data::exception", e)
print("load_data::Could not establish connection with Google BigQuery. Terminating program")
conn.close()
sys.exit()
However, an exception arises. The exception is that "exception cannot mix list and non-list, non-null values".
I tried changing the schema in this way:
schema=[
bigquery.SchemaField("id", "INT64"),
bigquery.SchemaField("user_id", "INT64"),
bigquery.SchemaField("log", "RECORD"), fields=
[
bigquery.SchemaField("reason", "RECORD", fields=
[
bigquery.SchemaField("id", "INT64"),
bigquery.SchemaField("name", "STRING")
bigquery.SchemaField("contact", "RECORD", fields=
[
bigquery.SchemaField("number", "STRING")
bigquery.SchemaField("address," "STRING"))
]
]),
bigquery.SchemaField("subreason", "RECORD", fields=
[
bigquery.SchemaField("id", "INT64"),
bigquery.SchemaField("name", "STRING")
bigquery.SchemaField("is_active", "BOOLEAN")
bigquery.SchemaField("created_at", "TIMESTAMP")
bigquery.SchemaField("deleted_at", "TIMESTAMP")
bigquery.SchemaField("complaint_id", "INT64")
]),
])
However, I get the exception " with type dict: was expecting tuple of (key, value) pair "
Can anyone guide me in this issue as I am new to data migration of JSON columns in tables? What is the proper way to modify the schema to accept the JSON columns for migration?
You can try and consider below approach.
In this approach, you will be loading the data as JSON data type in BigQuery. However, there will be manual adjustment on the JSON file since BigQuery accepts new-line delimited JSON for data ingestion. See below sample updated file json file.
{"log":{"reason":{"contact":{"address": null,"number": 123},"id": 5,"name": "Sample name"},"subreason": {"complaint_id": 5,"created_at": "2022-07-18T18:33:28.911Z","deleted_at": "None","id": 80,"is_active": true,"name": "Sample name"}}}
Notice that I compressed the JSON into one key which is named "log" and also compressed it into one line to satisfy new-line delimited JSON.
Below is the python code I used to ingest the data:
table_id = "your-project.-your-dataset.your-table"
file_path = "/path/of/your_json_file.json"
def load_table_file(file_path, table_id):
# [START bigquery_load_from_file]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set table_id to the ID of the table to create.
job_config = bigquery.LoadJobConfig(
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON, autodetect=True,
#write_disposition="WRITE_APPEND",
schema=[
bigquery.SchemaField("log", "JSON"),
],
)
with open(file_path, "rb") as source_file:
job = client.load_table_from_file(source_file, table_id, job_config=job_config)
job.result() # Waits for the job to complete.
table = client.get_table(table_id) # Make an API request.
print(
"Loaded {} rows and {} columns to {}".format(
table.num_rows, len(table.schema), table_id
)
)
# [END bigquery_load_from_file]
return table
load_table_file(file_path, table_id)
Output:

Json Response to pyodbc Better way

This works, but I have to do 30 of 100 felids of the response. Is there a better way?
for record in data:
record["lastName"] = record["lastName"].replace("'","''")
record["lastName"] = record["lastName"].replace("'","''")
cursor.execute("Insert Into emp_temp (employeeId, firstName, lastName) values ('" + record["employeeId"] +"','"+ record["firstName"] + "','"+ record["lastName"] +"')")
cursor.commit()
cursor.close()
conn.close()```
Assuming that json.loads() is giving you a simple list of dict objects then that is precisely the format that can be directly consumed by SQLAlchemy:
# https://stackoverflow.com/q/67129218/2144390
import json
import sqlalchemy as sa
response_text = '''\
[{"employeeId": 1, "firstName": "Gord", "lastName": "Thompson"},
{"employeeId": 2, "firstName": "Bob", "lastName": "Loblaw"}]'''
data = json.loads(response_text)
print(type(data)) # <class 'list'>
print(type(data[0])) # <class 'dict'>
engine = sa.create_engine("mssql+pyodbc://#mssqlLocal64")
emp_temp = sa.Table("emp_temp", sa.MetaData(), autoload_with=engine)
with engine.begin() as conn:
conn.execute(emp_temp.insert(), data)
# check results
with engine.begin() as conn:
results = conn.execute(sa.text("SELECT * FROM emp_temp")).fetchall()
print(results)
# [(1, 'Gord', 'Thompson'), (2, 'Bob', 'Loblaw')]

What are the possible ways for JSON data processing using SQL, elastic search or preprocessing using python

I have a case study where i need to take data from a REST API do some analysis on the data using aggregate function,joins etc and use the response data in JSON format to plot some retail grahs.
Approaches being followed till now:
Read the data from JSON store these in python variable and use insert to hit the SQL query. Obviously it is a costly operation because for every JSON line read it is inserting into database.For 33k rows it is taking more than 20 mins which is inefficient.
This can be handled in elastic search for faster processing but complex operation like joins are not present in elastic search.
If anybody can suggest what would be the best approach (like preprocessing or post processing in python) to follow for handling such scenerios it would be helpful.
Thanks in advance
Sql Sript
def store_data(AccountNo)
db=MySQLdb.connect(host=HOST, user=USER, passwd=PASSWD, db=DATABASE, charset="utf8")
cursor = db.cursor()
insert_query = "INSERT INTO cstore (AccountNo) VALUES (%s)"
cursor.execute(insert_query, (AccountNo))
db.commit()
cursor.close()
db.close()
return
def on_data(file_path):
#This is the meat of the script...it connects to your mongoDB and stores the tweet
try:
# Decode the JSON from Twitter
testFile = open(file_path)
datajson = json.load(testFile)
#print (len(datajson))
#grab the wanted data from the Tweet
for i in range(len(datajson)):
for cosponsor in datajson[i]:
AccountNo=cosponsor['AccountNo']
store_data( AccountNo)
Edit1: Json Added
{
"StartDate": "1/1/18",
"EndDate": "3/30/18",
"Transactions": [
{
"CSPAccountNo": "41469300",
"ZIP": "60098",
"ReportDate": "2018-03-08T00:00:00",
"POSCode": "00980030003",
"POSCodeModifier": "0",
"Description": "TIC TAC GUM WATERMEL",
"ActualSalesPrice": 1.59,
"TotalCount": 1,
"Totalsales": 1.59,
"DiscountAmount": 0,
"DiscountCount": 0,
"PromotionAmount": 0,
"PromotionCount": 0,
"RefundAmount": 0,
"RefundCount": 0
},
{
"CSPAccountNo": "41469378",
"ZIP": "60098",
"ReportDate": "2018-03-08T00:00:00",
"POSCode": "01070080727",
"POSCodeModifier": "0",
"Description": "PAYDAY KS",
"ActualSalesPrice": 2.09,
"TotalCount": 1,
"Totalsales": 2.09,
"DiscountAmount": 0,
"DiscountCount": 0,
"PromotionAmount": 0,
"PromotionCount": 0,
"RefundAmount": 0,
"RefundCount": 0
}
]
}
I do not have your json file so not know if it is runnable, but I would have tried something like below: I read just your account infos to a list and than try to write to the db at once with executemany I expect it to have a better(less) execution time than 20 mins.
def store_data(AccountNo):
db = MySQLdb.connect(host=HOST, user=USER, passwd=PASSWD, db=DATABASE, charset="utf8")
cursor = db.cursor()
insert_query = "INSERT INTO cstore (AccountNo,ZIP,ReportDate) VALUES (:AccountNo,:ZIP,:ReportDate)"
cursor.executemany(insert_query, AccountNo)
db.commit()
cursor.close()
db.close()
return
def on_data(file_path):
# This is the meat of the script...it connects to your mongoDB and stores the tweet
try:
#declare an empty list for the all accountno's
accountno_list = list()
# Decode the JSON from Twitter
testFile = open(file_path)
datajson = json.load(testFile)
# print (len(datajson))
# grab the wanted data from the Tweet
for row in datajson[0]['Transactions']:
values = dict()
values['AccountNo'] = row['CSPAccountNo']
values['ZIP'] = row['ZIP']
values['ReportDate'] = row['ReportDate']
#from here on you can populate the attributes you need in a similar way..
accountno_list.append(values)
except:
pass
store_data(accountno_list)

building json data from sql database cursor

Without knowing the structure of the json, how can I return a json object from the database query? All of the the information is there, I just can't figure out how to build the object.
import MySQLdb
import json
db = MySQLdb.connect( host, user, password, db)
cursor = db.cursor()
cursor.execute( query )
rows = cursor.fetchall()
field_names = [i[0] for i in cursor.description]
json_string = json.dumps( dict(rows) )
print field_names[0]
print field_names[1]
print json_string
db.close()
count
severity
{"321": "7.2", "1": "5.0", "5": "4.3", "7": "6.8", "1447": "9.3", "176": "10.0"}
The json object would look like:
{"data":[{"count":"321","severity":"7.2"},{"count":"1","severity":"5.0"},{"count":"5","severity":"4.3"},{"count":"7","severity":"6.8"},{"count":"1447","severity":"9.3"},{"count":"176","severity":"10.0"}]}
The problem you are encountering happens because you only turn the fetched items into dicts, without their description.
dict in python expects either another dict, or an iterable returning two-item tuples, where for each tuple the first item will be the key, and the second the value.
Since you only fetch two columns, you get the first one (count) as key, and the second (severity) as value for each fetched row.
What you want to do is also combine the descriptions, like so:
json_string = json.dumps([
{description: value for description, value in zip(field_names, row)}
for row in rows])
1- You can use pymsql DictCursor:
import pymysql
connection = pymysql.connect(db="test")
cursor = connection.cursor(pymysql.cursors.DictCursor)
cursor.execute("SELECT ...")
row = cursor.fetchone()
print row["key"]
2- MySQLdb also includes DictCursor that you can use. You need to pass cursorclass=MySQLdb.cursors.DictCursor when making the connection.
import MySQLdb
import MySQLdb.cursors
connection = MySQLdb.connect(db="test",cursorclass=MySQLdb.cursors.DictCursor)
cursor = connection.cursor()
cursor.execute("SELECT ...")
row = cursor.fetchone()
print row["key"]
I got this to work using Collections library, although the code is confusing:
import MySQLdb
import json
import collections
db = MySQLdb.connect(host, user, passwd, db)
cursor = db.cursor()
cursor.execute( query )
rows = cursor.fetchall()
field_names = [i[0] for i in cursor.description]
objects_list = []
for row in rows:
d = collections.OrderedDict()
d[ field_names[0] ] = row[0]
d[ field_names[1] ] = row[1]
objects_list.append(d)
json_string = json.dumps( objects_list )
print json_string
db.close()
[{"count": 176, "severity": "10.0"}, {"count": 1447, "severity": "9.3"}, {"count": 321, "severity": "7.2"}, {"count": 7, "severity": "6.8"}, {"count": 1, "severity": "5.8"}, {"count": 1, "severity": "5.0"}, {"count": 5, "severity": "4.3"}]

Iterating rows with Pyodbc

I am using Pyodbc to return a number of rows which are dumped into a JSON and sent to a server. I would like to iterate my SQL table and return all records. I am using cursor.fetchall() now, and the program returns one record. As shown below. When I use fetchone an error is returned AttributeError: 'unicode' object has no attribute 'SRNUMBER' and fetchmany returns one record as well. How do I successfully return all records? I am using Python 2.6.7
Code:
import pyodbc
import json
import collections
import requests
connstr = 'DRIVER={SQL Server};SERVER=server;DATABASE=ServiceRequest; UID=SA;PWD=pwd'
conn = pyodbc.connect(connstr)
cursor = conn.cursor()
cursor.execute("""
SELECT SRNUMBER, FirstName, LastName, ParentNumber
FROM MYLA311 """)
rows = cursor.fetchone()
objects_list = []
for row in rows:
d = collections.OrderedDict()
d['SRNUMBER']= row.SRNUMBER
d['FirstName']= row.FirstName
d['LastName']= row.LastName
d['ParentNumber']= row.ParentNumber
objects_list.append(d)
output = {"MetaData": {},
"SRData": d}
print output
j = json.dumps(output)
print json.dumps(output, sort_keys=True, indent=4)`
Output for fetchall and fetchmany:
{
"MetaData": {},
"SRData": {
"FirstName": "MyLAG",
"LastName": "ThreeEleven",
"ParentNumber": "021720151654176723",
"SRNUMBER": "1-3580171"
}
}
Use code from my answer here to build a list of dictionaries for the value of output['SRData'], then JSON encode the output dict as normal.
import pyodbc
import json
connstr = 'DRIVER={SQL Server};SERVER=server;DATABASE=ServiceRequest; UID=SA;PWD=pwd'
conn = pyodbc.connect(connstr)
cursor = conn.cursor()
cursor.execute("""SELECT SRNUMBER, FirstName, LastName, ParentNumber FROM MYLA311""")
# build list of column names to use as dictionary keys from sql results
columns = [column[0] for column in cursor.description]
results = []
for row in cursor.fetchall():
results.append(dict(zip(columns, row)))
output = {"MetaData": {}, "SRData": results}
print(json.dumps(output, sort_keys=True, indent=4))
For starters, the line
objects_list.append(d)
needs to be inside the for loop, not outside.

Categories

Resources