Parse JSON output from API file into CSV - python

I am currently trying to convert a JSON output from an API request to a CSV format so i can store the results into our database. Here is my current code for reference:
import pyodbc
import csv
#import urllib2
import json
import collections
import requests
#import pprint
#import functools
print ("Connecting via ODBC")
conn = pyodbc.connect('DSN=DSN', autocommit=True)
print ("Connected!\n")
cur = conn.cursor()
sql = """SELECT DATA"""
cur.execute(sql)
#df = pandas.read_sql_query(sql, conn)
#df.to_csv('TEST.csv')
#print('CSV sheet is ready to go!')
rows = cur.fetchall()
obs_list = []
for row in rows:
d = collections.OrderedDict()
d['addressee'] = row.NAME
d['street'] = row.ADDRESS
d['city'] = row.CITY
d['state'] = row.STATE
d['zipcode'] = row.ZIP
obs_list.append(d)
obs_file = 'TEST.json'
with open(obs_file, 'w') as file:
json.dump(obs_list, file)
print('Run through API')
url = 'https://api.smartystreets.com/street-address?'
headers = {'content-type': 'application/json'}
with open('test1.json', 'r') as run:
dict_run = run.readlines()
dict_ready = (''.join(dict_run))
r = requests.post(url , data=dict_ready, headers=headers)
ss_output = r.text
output = 'output.json'
with open(output,'w') as of:
json.dump(ss_output, of)
print('I think it works')
f = open('output.json')
data = json.load(f)
data_1 = data['analysis']
data_2 = data['metadata']
data_3 = data['components']
entity_data = open('TEST.csv','w')
csvwriter = csv.writer(entity_data)
count = 0
count2 = 0
count3 = 0
for ent in data_1:
if count == 0:
header = ent.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(ent.values())
for ent_2 in data_2:
if count2 == 0:
header2 = ent_2.keys()
csvwriter.writerow(header2)
count2 += 1
csvwriter.writerow(ent_2.values())
for ent_3 in data_3:
if count3 == 0:
header3 = ent_3.keys()
csvwriter.writerow(header3)
count3 += 1
csvwriter.writerow(ent_3.values())
entity_data.close()
Sample output from API:
[
{
"input_index": 0,
"candidate_index": 0,
"delivery_line_1": "1 Santa Claus Ln",
"last_line": "North Pole AK 99705-9901",
"delivery_point_barcode": "997059901010",
"components": {
"primary_number": "1",
"street_name": "Santa Claus",
"street_suffix": "Ln",
"city_name": "North Pole",
"state_abbreviation": "AK",
"zipcode": "99705",
"plus4_code": "9901",
"delivery_point": "01",
"delivery_point_check_digit": "0"
},
"metadata": {
"record_type": "S",
"zip_type": "Standard",
"county_fips": "02090",
"county_name": "Fairbanks North Star",
"carrier_route": "C004",
"congressional_district": "AL",
"rdi": "Commercial",
"elot_sequence": "0001",
"elot_sort": "A",
"latitude": 64.75233,
"longitude": -147.35297,
"precision": "Zip8",
"time_zone": "Alaska",
"utc_offset": -9,
"dst": true
},
"analysis": {
"dpv_match_code": "Y",
"dpv_footnotes": "AABB",
"dpv_cmra": "N",
"dpv_vacant": "N",
"active": "Y",
"footnotes": "L#"
}
},
{
"input_index": 1,
"candidate_index": 0,
"delivery_line_1": "Loop land 1",
"last_line": "North Pole AK 99705-9901",
"delivery_point_barcode": "997059901010",
"components": {
"primary_number": "1",
"street_name": "Lala land",
"street_suffix": "Ln",
"city_name": "North Pole",
"state_abbreviation": "AK",
"zipcode": "99705",
"plus4_code": "9901",
"delivery_point": "01",
"delivery_point_check_digit": "0"
},
"metadata": {
"record_type": "S",
"zip_type": "Standard",
"county_fips": "02090",
"county_name": "Fairbanks North Star",
"carrier_route": "C004",
"congressional_district": "AL",
"rdi": "Commercial",
"elot_sequence": "0001",
"elot_sort": "A",
"latitude": 64.75233,
"longitude": -147.35297,
"precision": "Zip8",
"time_zone": "Alaska",
"utc_offset": -9,
"dst": true
},
"analysis": {
"dpv_match_code": "Y",
"dpv_footnotes": "AABB",
"dpv_cmra": "N",
"dpv_vacant": "N",
"active": "Y",
"footnotes": "L#"
}
]
After storing the API output the trouble is trying to parse the returned output (Sample output) into a CSV format. The code im using to try to do this:
f = open('output.json')
data = json.load(f)
data_1 = data['analysis']
data_2 = data['metadata']
data_3 = data['components']
entity_data = open('TEST.csv','w')
csvwriter = csv.writer(entity_data)
count = 0
count2 = 0
count3 = 0
for ent in data_1:
if count == 0:
header = ent.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(ent.values())
for ent_2 in data_2:
if count2 == 0:
header2 = ent_2.keys()
csvwriter.writerow(header2)
count2 += 1
csvwriter.writerow(ent_2.values())
for ent_3 in data_3:
if count3 == 0:
header3 = ent_3.keys()
csvwriter.writerow(header3)
count3 += 1
csvwriter.writerow(ent_3.values())
entity_data.close()
returns the following error: TypeError: string indices must be integers. And as someone kindly commented and pointed out it appears i am iterating over keys instead of the different dictionaries, and this is where I get stuck because im not sure what to do? From my understanding it looks like the JSON is split into 3 different arrays with JSON object for each, but that does not appear to be the case according to the structure? I apologize for the length of the code, but I want some resemblance of context to what i am trying to accomplish.

Consider pandas's json_normalize() method to flatten nested items into tabular df structure:
import pandas as pd
from pandas.io.json import json_normalize
import json
with open('Output.json') as f:
data = json.load(f)
df = json_normalize(data)
df.to_csv('Output.csv')
Do note the components, metadata, and analysis become period-separated prefixes to corresponding values. If not needed, consider renaming columns.

You are saving request's result.text with json. result.text is a string so upon rereading it through json you get the same one long string instead of a list. Try to write result.text as is:
output = 'output.json'
with open(output,'w') as of:
of.write(ss_output)
That's the cause of TypeError:string indices must be integers you mention.
The rest of your code has multiple issues.
The data in json is a list of dicts so to get ,say , data_1 you need list comprehension like this:
data_1 = [x['analysis'] for x in data]
You write three types of rows into the same csv file: components, metadata and analyzis. That's really odd.
Probably you have to rewrite the second half of the code: open three csv_writers one per data type, then iterate over data items and write their fields into corresponding csv_writer.

Related

Converting text file to json

I have text file and I want to convert it to JSON:
red|2022-09-29|03:15:00|info 1
blue|2022-09-29|10:50:00|
yellow|2022-09-29|07:15:00|info 2
so i type a script to convert this file into JSON:
import json
filename = 'input_file.txt'
dict1 = {}
fields =['name', 'date', 'time', 'info']
with open(filename) as fh:
l = 1
for line in fh:
description = list( line.strip().split("|", 4))
print(description)
sno ='name'+str(l)
i = 0
dict2 = {}
while i<len(fields):
dict2[fields[i]]= description[i]
i = i + 1
dict1[sno]= dict2
l = l + 1
out_file = open("json_file.json", "w")
json.dump(dict1, out_file, indent = 4)
out_file.close()
and output looks like this:
{
"name1": {
"name": "red",
"date": "2022-09-29",
"time": "03:15:00",
"info": "info 1"
},
"name2": {
"name": "blue",
"date": "2022-09-29",
"time": "10:50:00",
"info": ""
},
"name3": {
"name": "yellow",
"date": "2022-09-29",
"time": "07:15:00",
"info": "info 2"
}
}
As you can see I do so, but now I want to change looks of this JSON file. How can I change it to make my output looks like this:
to look like this:
[
{"name":"red", "date": "2022-09-29", "time": "03:15:00", "info":"info 1"},
{"name":"blue", "date": "2022-09-29", "time": "10:50:00", "info":""},
{"name":"yellow", "date": "2022-09-29", "time": "07:15:00", "info":"info 2"}
]
If you see your required json output, it is a list and not a dict like you have right now. So using a list(data) instead of dict(dict1) should give the correct output.
Following updated code should generate the json data in required format -
import json
filename = 'input_file.txt'
data = []
fields =['name', 'date', 'time', 'info']
with open(filename) as fh:
l = 1
for line in fh:
description = list( line.strip().split("|", 4))
print(description)
sno ='name'+str(l)
i = 0
dict2 = {}
while i<len(fields):
dict2[fields[i]]= description[i]
i = i + 1
data.append(dict2)
l = l + 1
out_file = open("json_file.json", "w")
json.dump(data, out_file, indent = 4)
out_file.close()
I would use pandas, it allows you to solve your problem in one statement and avoid reinventing a wheel:
import pandas as pd
pd.read_table("input_file.txt", sep="|", header=None,
names=["name", "date" , "time", "info"]).fillna("")\
.to_json("json_file.json", orient="records")

Scrape specific Json data to a csv

I am trying to scrape some json data. The first few rows ae as follows and all the latter is in the same format.
Json data:
{
"data": [
{
"date": "2011-10-07",
"f(avg(output_total)/number(100000000))": 50
},
{
"date": "2011-10-08",
"f(avg(output_total)/number(100000000))": 50
},
{
"date": "2011-10-12",
"f(avg(output_total)/number(100000000))": 50
},
{
"date": "2011-10-13",
"f(avg(output_total)/number(100000000))": 54.0515120216902
},.......]
I am willing scrape the date with the its relevant value (like fi=or the above, 2011-10-07 and 50, 2011-10-08 and 50 etc.) into a csv file which contains two columns (date and value)
How can I proceed this? is it possible with python?
This is how I grabbed the json data:
import os
import requests
url='https://api.blockchair.com/litecoin/transactions?a=date,f(avg(output_total)/number(100000000))'
proxies = {}
response = requests.get(url=url, proxies=proxies)
print(response.content)
pandas allows you to solve this one in a few lines:
import pandas as pd
df = pd.DataFrame(json_data['data'])
df.columns = ["date", "value"]
df.to_csv("data.csv", index=False)
json = {
"data": [
{
"date": "2011-10-07",
"f(avg(output_total)/number(100000000))": 50
},
{
"date": "2011-10-08",
"f(avg(output_total)/number(100000000))": 50
},
{
"date": "2011-10-12",
"f(avg(output_total)/number(100000000))": 50
},
{
"date": "2011-10-13",
"f(avg(output_total)/number(100000000))": 54.0515120216902
}]}
Step 1: Convert json into a Pandas Dataframe
df = pd.DataFrame(json['data'])
Step 2: Filter Df based on conditions ( e.g >>> value = 50)
df_filtered = df[(df["f(avg(output_total)/number(100000000))"] == 50)]
Step 3: Save df into csv file and choose the location where you like to store the CSV file on your computer.
df_filtered.to_csv(r'C:\user\foo\output.csv', index = False)
if you wish to include the index, then simply remove index = False
You can do like this.
Iterate over the JSON string, extract the data you need and then write that data to CSV file.
import json
import csv
fields = ['Date', 'Value']
filename = 'test.csv'
s = """
{
"data":[
{
"date":"2011-10-07",
"f(avg(output_total)/number(100000000))":50
},
{
"date":"2011-10-08",
"f(avg(output_total)/number(100000000))":50
},
{
"date":"2011-10-12",
"f(avg(output_total)/number(100000000))":50
},
{
"date":"2011-10-13",
"f(avg(output_total)/number(100000000))":54.0515120216902
}
]
}
"""
x = json.loads(s)
with open(filename, 'w', newline='') as f:
cw = csv.writer(f)
cw.writerow(fields)
for i in x['data']:
cw.writerow(i.values())
test.csv
Date Value
07-10-11 50
08-10-11 50
12-10-11 50
13-10-11 54.05151202
If you just want a CSV file without relying on any additional Python modules (such as pandas) then it's very simple:
import requests
CSV = 'blockchair.csv'
url='https://api.blockchair.com/litecoin/transactions?a=date,f(avg(output_total)/number(100000000))'
with requests.Session() as session:
response = session.get(url)
response.raise_for_status()
with open(CSV, 'w') as csv:
csv.write('Date,Value\n')
for d in response.json()['data']:
for i, v in enumerate(d.values()):
if i > 0:
csv.write(',')
csv.write(str(v))
csv.write('\n')
You can try this:
import requests
import csv
import pandas as pd
url='https://api.blockchair.com/litecoin/transactions?a=date,f(avg(output_total)/number(100000000))'
csv_name = 'res_values_1.csv'
response = requests.get(url=url).json()
res_data = response.get('data', [])
# Solution using pandas
res_df = pd.DataFrame(res_data)
res_df.rename(columns={'f(avg(output_total)/number(100000000))': 'value'}, inplace=True)
# filter data those value in >= 50
filtered_res_df = res_df[(res_df["value"] >= 50)]
filtered_res_df.to_csv(csv_name, sep=',', encoding='utf-8', index = False)
# Solution using csv
csv_name = 'res_values_2.csv'
headers = ['date', 'value']
with open(csv_name, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(headers)
for data in res_data:
values = list(data.values())
if values[1] >= 50:
writer.writerow(values)
CSV Output:
date,value
2011-10-07,50.0
2011-10-08,50.0
2011-10-12,50.0
2011-10-13,54.0515120216902
.
.
.
2021-10-05,346.12752821011594
2021-10-06,293.5061907016782
2021-10-07,333.17665010641673
2021-10-08,332.2437737707938

how to extract columns for dictionary that do not have keys

so I have tried resources of how transform dict in data frame, but the problem this is an weird Dict.
it is not like key: {} , key: {} and etc..
the data has lots of items. But the goal is extract only the stuff inside of dict {}, if possible the dates also is a plus.
data:
id,client,source,status,request,response,queued,created_at,updated_at
54252,sdf,https://asdasdadadad,,"{
"year": "2010",
"casa": "aca",
"status": "p",
"Group": "57981",
}",,1,"2020-05-02 11:06:17","2020-05-02 11:06:17"
54252,msc-lp,https://discover,,"{
"year": "27",
"casa": "Na",
"status": "p",
"Group": "57981",
}"
my attempts:
#attempt 1
with open('data.csv') as fd:
pairs = (line.split(None) for line in fd)
res = {int(pair[0]):pair[1] for pair in pairs if len(pair) == 2 and pair[0].isdigit()}
#attempt 2
import json
# reading the JSON data using json.load()
file = 'data.json'
with open(file) as train_file:
dict_train = json.load(train_file)
# converting json dataset from dictionary to dataframe
train = pd.DataFrame.from_dict(dict_train, orient='index')
train.reset_index(level=0, inplace=True)
#attempt 3
df = pd.read_csv("data.csv")
df = df.melt(id_vars=["index", "Date"], var_name="variables",value_name="values")
Nothening works due the data be weird shaped
Expected output:
All the items inside of the dictionary, every key will be one column at df
Date year casa status Group
2020-05-02 11:06:17 2010 aca p 57981
2020-05-02 11:06:17 27 Na p 57981
Format data into a valid csv stucture:
id,client,source,status,request,response,queued,created_at,updated_at
54252,sdf,https://asdasdadadad,,'{ "ag": "2010", "ca": "aca", "ve": "p", "Group": "57981" }',,1,"2020-05-02 11:06:17","2020-05-02 11:06:17"
54252,msc-lp,https://discover,,'{ "ag": "27", "ca": "Na", "ve": "p", "Group": "57981" }',,1,"2020-05-02 11:06:17","2020-05-02 11:06:17"
This should work for the worst-case scenario as well,
check it out.
import json
import pandas as pd
def parse_column(data):
try:
return json.loads(data)
except Exception as e:
print(e)
return None
df =pd.read_csv('tmp.csv',converters={"request":parse_column}, quotechar="'")

i there a simple way to get these json values using python 3

EDIT: used the suggested solution, now the json has more headers, as shown bellow.
I have input file as Json,
{
"systems-under-test": [{
"type": "url",
"sytems": [
"www.google.com",
"www.google.com",
"www.google.com"
]
},
{
"type": "api",
"sytems": [
"api.com",
"api.fr"
]
},
{
"type": "ip",
"sytems": [
"172.168 .1 .1",
"172.168 .1 .0"
]
}
],
"headers-configuration": [{
"custom": true,
"headers-custom-configuration": {
"headers": [{
"header-name": "x - frame - options",
"ignore": false,
"expected-value": ["deny", "sameorigin"]
},
{
"header-name": "content-security-policy",
"ignore": false,
"expected-value": []
}
]
}
}],
"header-results": []
}
after using the suggested solution by, I created dict that stores each header information and added these dict to a list.
for a single header in Json works fine, when I have multi headers its not working.
def load_header_settings2(self):
header_custom_setting = []
newDict = {}
path = self.validate_path()
with open(path) as json_file:
data = load(json_file)
config = data["headers-configuration"][0]
if config["custom"]:
headers = config["headers-custom-configuration"]["headers"]
headers_name = headers["header-name"]
ignore = headers["ignore"]
expected_values = headers["expected-value"]
newDict["header name"] = headers_name
newDict["ignore"] = ignore
newDict["expected value"] = expected_values
header_custom_setting.append(newDict)
newDict.clear()
for i in header_custom_setting:
print(i)
return header_custom_setting
can someone help?
This has been solved like this, is this safe way to reach my goal ?# RoadRunner
def load_header_settings2(self):
header_custom_setting = []
newDict = {}
path = self.validate_path()
with open(path) as json_file:
data = load(json_file)
config = data["headers-configuration"][0]
if config["custom"]:
headers = config["headers-custom-configuration"]["headers"]
for header in headers:
headers_name = header["header-name"]
ignore = header["ignore"]
expected_values = header["expected-value"]
newDict["header name"] = headers_name
newDict["ignore"] = ignore
newDict["expected value"] = expected_values
header_custom_setting.append(newDict)
for i in header_custom_setting:
print(i)
return header_custom_setting
You could get both header name and values like this, making sure we check "custom" is set to true before proceeding:
from json import load
with open("data.json") as json_file:
data = load(json_file)
config = data["headers-configuration"][0]
if config["custom"]:
headers = config["headers-custom-configuration"]["headers"]
headers_name = headers["header-name"]
print(headers_name)
expected_values = headers["expected-value"]
print(expected_values)
Output:
x - frame - options
['deny', 'sameorigin']
As for concatenating the headers and values, you could iterate the headers and values and combine them together into a string:
for value in expected_values:
print("%s %s" % (headers_name, value))
Or using f-strings:
for value in expected_values:
print(f"{headers_name} {value}")
Which will give you:
x - frame - options deny
x - frame - options sameorigin

convert csv file to multiple nested json format

I have written a code to convert csv file to nested json format. I have multiple columns to be nested hence assigning separately for each column. The problem is I'm getting 2 fields for the same column in the json output.
import csv
import json
from collections import OrderedDict
csv_file = 'data.csv'
json_file = csv_file + '.json'
def main(input_file):
csv_rows = []
with open(input_file, 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter='|')
for row in reader:
row['TYPE'] = 'REVIEW', # adding new key, value
row['RAWID'] = 1,
row['CUSTOMER'] = {
"ID": row['CUSTOMER_ID'],
"NAME": row['CUSTOMER_NAME']
}
row['CATEGORY'] = {
"ID": row['CATEGORY_ID'],
"NAME": row['CATEGORY']
}
del (row["CUSTOMER_NAME"], row["CATEGORY_ID"],
row["CATEGORY"], row["CUSTOMER_ID"]) # deleting since fields coccuring twice
csv_rows.append(row)
with open(json_file, 'w') as f:
json.dump(csv_rows, f, sort_keys=True, indent=4, ensure_ascii=False)
f.write('\n')
The output is as below:
[
{
"CATEGORY": {
"ID": "1",
"NAME": "Consumers"
},
"CATEGORY_ID": "1",
"CUSTOMER_ID": "41",
"CUSTOMER": {
"ID": "41",
"NAME": "SA Port"
},
"CUSTOMER_NAME": "SA Port",
"RAWID": [
1
]
}
]
I'm getting 2 entries for the fields I have assigned using row[''].
Is there any other way to get rid of this? I want only one entry for a particular field in each record.
Also how can I convert the keys to lower case after reading from csv.DictReader(). In my csv file all the columns are in upper case and hence I'm using the same to assign. But I want to convert all of them to lower case.
In order to convert the keys to lower case, it would be simpler to generate a new dict per row. BTW, it should be enough to get rid of the duplicate fields:
for row in reader:
orow = collection.OrderedDict()
orow['type'] = 'REVIEW', # adding new key, value
orow['rawid'] = 1,
orow['customer'] = {
"id": row['CUSTOMER_ID'],
"name": row['CUSTOMER_NAME']
}
orow['category'] = {
"id": row['CATEGORY_ID'],
"name": row['CATEGORY']
}
csv_rows.append(orow)

Categories

Resources