Identify location of item in json file - python

Suppose I have the following json file. With data1["tenants"][1]['name'] I can select uniquename2. Is there a way to collect the '1' number by looping over the document?
{
"tenants": [{
"key": "identifier",
"name": "uniquename",
"image": "url",
"match": [
"identifier"
],
"tags": [
"tag1",
"tag2"
]
},
{
"key": "identifier",
"name": "uniquename2",
"image": "url",
"match": [
"identifier1",
"identifier2"
],
"tags": ["tag"]
}
]
}
in short: data1["tenants"][1]['name']= uniquename2 data1["tenants"][0]['name'] = uniquename
How can I find out which number has which name. So if I have uniquename2 what number/index corresponds with it?

you can iterate over the tenants to map the index to the name
data = {
"tenants": [{
"key": "identifier",
"name": "uniquename",
"image": "url",
"match": [
"identifier"
],
"tags": [
"tag1",
"tag2"
]
},
{
"key": "identifier",
"name": "uniquename2",
"image": "url",
"match": [
"identifier1",
"identifier2"
],
"tags": ["tag"]
}
]
}
for index, tenant in enumerate(data['tenants']):
print(index, tenant['name'])
OUTPUT
0 uniquename
1 uniquename2

Assuming, you have turned your json into a dictionary already, this is how you can get the index of the firs ocurrence of a name in your list (This relies on the names actually being unique):
data = {
"tenants": [{
"key": "identifier",
"name": "uniquename",
"image": "url",
"match": [
"identifier"
],
"tags": [
"tag1",
"tag2"
]
},
{
"key": "identifier",
"name": "uniquename2",
"image": "url",
"match": [
"identifier1",
"identifier2"
],
"tags": ["tag"]
}
]
}
def index_of(tenants, tenant_name):
try:
return tenants.index(
next(
tenant for tenant in tenants
if tenant["name"] == tenant_name
)
)
except StopIteration:
raise ValueError(
f"tenants list does not have tenant by name {tenant_name}."
)
index_of(data["tenants"], "uniquename") # 0

Related

Best way to parse a JSON to store in SQL database (SQL stored procedure/Python)

I have a table of overly complex JSON files I'm trying to convert to tabular format to store in a SQL database. I'm pulling the JSONs from the quickbooks online API, and the format is messy to say the least.. (We're talking 7x nested JSONs for some bits of it..
The format resembles the code snippet down below. Currently I am using a bunch of OpenJSON's + Cross applys to dig down to the innermost ColData then work my way up but it looks like some of the ColData's get skipped over doing that.
Are there any better ways, using either Python (since I pull the JSON initially in Python before sending the JSON to a SQL database to parse) or SQL to convert it to tabular format besides manually trying to use OpenJSON with Cross applys?
The goal is to get all of the ColData's into a SQL table...
Thanks!
{
"Header": {
"ReportName": "BalanceSheet",
"Option": [
{
"Name": "AccountingStandard",
"Value": "GAAP"
},
{
"Name": "NoReportData",
"Value": "false"
}
],
"DateMacro": "this calendar year-to-date",
"ReportBasis": "Accrual",
"StartPeriod": "2016-01-01",
"Currency": "USD",
"EndPeriod": "2016-10-31",
"Time": "2016-10-31T09:42:21-07:00",
"SummarizeColumnsBy": "Total"
},
"Rows": {
"Row": [
{
"Header": {
"ColData": [
{
"value": "ASSETS"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"Header": {
"ColData": [
{
"value": "Current Assets"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"Header": {
"ColData": [
{
"value": "Bank Accounts"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "35",
"value": "Checking"
},
{
"value": "1350.55"
}
],
"type": "Data"
},
{
"ColData": [
{
"id": "36",
"value": "Savings"
},
{
"value": "800.00"
}
],
"type": "Data"
}
]
},
"type": "Section",
"group": "BankAccounts",
"Summary": {
"ColData": [
{
"value": "Total Bank Accounts"
},
{
"value": "2150.55"
}
]
}
},
{
"Header": {
"ColData": [
{
"value": "Accounts Receivable"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "84",
"value": "Accounts Receivable (A/R)"
},
{
"value": "6383.12"
}
],
"type": "Data"
}
]
},
"type": "Section",
"group": "AR",
"Summary": {
"ColData": [
{
"value": "Total Accounts Receivable"
},
{
"value": "6383.12"
}
]
}
},
{
"Header": {
"ColData": [
{
"value": "Other current assets"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "81",
"value": "Inventory Asset"
},
{
"value": "596.25"
}
],
"type": "Data"
},
{
"ColData": [
{
"id": "4",
"value": "Undeposited Funds"
},
{
"value": "2117.52"
}
],
"type": "Data"
}
]
},
"type": "Section",
"group": "OtherCurrentAssets",
"Summary": {
"ColData": [
{
"value": "Total Other current assets"
},
{
"value": "2713.77"
}
]
}
}
]
},
"type": "Section",
"group": "CurrentAssets",
"Summary": {
"ColData": [
{
"value": "Total Current Assets"
},
{
"value": "11247.44"
}
]
}
},
{
"Header": {
"ColData": [
{
"value": "Fixed Assets"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"Header": {
"ColData": [
{
"id": "37",
"value": "Truck"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "38",
"value": "Original Cost"
},
{
"value": "13495.00"
}
],
"type": "Data"
}
]
},
"type": "Section",
"Summary": {
"ColData": [
{
"value": "Total Truck"
},
{
"value": "13495.00"
}
]
}
}
]
},
"type": "Section",
"group": "FixedAssets",
"Summary": {
"ColData": [
{
"value": "Total Fixed Assets"
},
{
"value": "13495.00"
}
]
}
}
]
},
"type": "Section",
"group": "TotalAssets",
"Summary": {
"ColData": [
{
"value": "TOTAL ASSETS"
},
{
"value": "24742.44"
}
]
}
},
{
"Header": {
"ColData": [
{
"value": "LIABILITIES AND EQUITY"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"Header": {
"ColData": [
{
"value": "Liabilities"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"Header": {
"ColData": [
{
"value": "Current Liabilities"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"Header": {
"ColData": [
{
"value": "Accounts Payable"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "33",
"value": "Accounts Payable (A/P)"
},
{
"value": "1984.17"
}
],
"type": "Data"
}
]
},
"type": "Section",
"group": "AP",
"Summary": {
"ColData": [
{
"value": "Total Accounts Payable"
},
{
"value": "1984.17"
}
]
}
},
{
"Header": {
"ColData": [
{
"value": "Credit Cards"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "41",
"value": "Mastercard"
},
{
"value": "157.72"
}
],
"type": "Data"
}
]
},
"type": "Section",
"group": "CreditCards",
"Summary": {
"ColData": [
{
"value": "Total Credit Cards"
},
{
"value": "157.72"
}
]
}
},
{
"Header": {
"ColData": [
{
"value": "Other Current Liabilities"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "89",
"value": "Arizona Dept. of Revenue Payable"
},
{
"value": "4.55"
}
],
"type": "Data"
},
{
"ColData": [
{
"id": "90",
"value": "Board of Equalization Payable"
},
{
"value": "401.98"
}
],
"type": "Data"
},
{
"ColData": [
{
"id": "43",
"value": "Loan Payable"
},
{
"value": "4000.00"
}
],
"type": "Data"
}
]
},
"type": "Section",
"group": "OtherCurrentLiabilities",
"Summary": {
"ColData": [
{
"value": "Total Other Current Liabilities"
},
{
"value": "4406.53"
}
]
}
}
]
},
"type": "Section",
"group": "CurrentLiabilities",
"Summary": {
"ColData": [
{
"value": "Total Current Liabilities"
},
{
"value": "6548.42"
}
]
}
},
{
"Header": {
"ColData": [
{
"value": "Long-Term Liabilities"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "44",
"value": "Notes Payable"
},
{
"value": "25000.00"
}
],
"type": "Data"
}
]
},
"type": "Section",
"group": "LongTermLiabilities",
"Summary": {
"ColData": [
{
"value": "Total Long-Term Liabilities"
},
{
"value": "25000.00"
}
]
}
}
]
},
"type": "Section",
"group": "Liabilities",
"Summary": {
"ColData": [
{
"value": "Total Liabilities"
},
{
"value": "31548.42"
}
]
}
},
{
"Header": {
"ColData": [
{
"value": "Equity"
},
{
"value": ""
}
]
},
"Rows": {
"Row": [
{
"ColData": [
{
"id": "34",
"value": "Opening Balance Equity"
},
{
"value": "-9337.50"
}
],
"type": "Data"
},
{
"ColData": [
{
"id": "2",
"value": "Retained Earnings"
},
{
"value": "91.25"
}
],
"type": "Data"
},
{
"ColData": [
{
"value": "Net Income"
},
{
"value": "2440.27"
}
],
"type": "Data",
"group": "NetIncome"
}
]
},
"type": "Section",
"group": "Equity",
"Summary": {
"ColData": [
{
"value": "Total Equity"
},
{
"value": "-6805.98"
}
]
}
}
]
},
"type": "Section",
"group": "TotalLiabilitiesAndEquity",
"Summary": {
"ColData": [
{
"value": "TOTAL LIABILITIES AND EQUITY"
},
{
"value": "24742.44"
}
]
}
}
]
},
"Columns": {
"Column": [
{
"ColType": "Account",
"ColTitle": "",
"MetaData": [
{
"Name": "ColKey",
"Value": "account"
}
]
},
{
"ColType": "Money",
"ColTitle": "Total",
"MetaData": [
{
"Name": "ColKey",
"Value": "total"
}
]
}
]
}
}
Here is what I tried to get the ColData (unsuccessfully I might add), I think it might be a little too contrived to do in SQL but I'm not sure if I should continue trying this way or if there's a better way in Python:
declare #json nvarchar(max)
SELECT #json = json FROM QboApiRawJSONData WHERE ID = 2
--Outer layer of JSON breaks into 3 parts - header, columns, rows
SELECT * FROM OPENJSON(#json)
WITH
(
Rows nvarchar(max) AS JSON
) as MainLayer
CROSS APPLY OPENJSON (MainLayer.Rows)
WITH
(
Row nvarchar(max) AS JSON
) as SecondaryLayer
CROSS APPLY OPENJSON (SecondaryLayer.Row)
WITH
(
Rows nvarchar(max) AS JSON
) As ThirdLayer
CROSS APPLY OPENJSON (ThirdLayer.Rows)
WITH
(
Row nvarchar(max) AS JSON
) as FourthLayer
CROSS APPLY OPENJSON (FourthLayer.Row)
WITH
(
Rows nvarchar(max) AS JSON
) as FifthLayer
CROSS APPLY OPENJSON (FifthLayer.Rows)
WITH
(
Row nvarchar(max) AS JSON
) as SixthLayer
CROSS APPLY OPENJSON (SixthLayer.Row)
WITH
(
Rows nvarchar(max) AS JSON
) as SeventhLayer
CROSS APPLY OPENJSON (SeventhLayer.Rows)
WITH
(
Row nvarchar(max) AS JSON
) as EighthLayer
CROSS APPLY OPENJSON (EighthLayer.Row)
WITH
(
Rows nvarchar(max) AS JSON
) as LayerNine
---Things get funky here
CROSS APPLY OPENJSON (LayerNine.Rows)
WITH
(
Row nvarchar(max) AS JSON
) as LayerTen
CROSS APPLY OPENJSON (LayerTen.Row)
WITH
(
Rows nvarchar(max) AS JSON
) as LayerEleven
CROSS APPLY OPENJSON (LayerEleven.Rows)
WITH
(
Row nvarchar(max) AS JSON
) as LayerTwelve
--21 items in last col
There is JSON support for sql-server:
https://learn.microsoft.com/en-us/sql/relational-databases/json/json-data-sql-server?view=sql-server-ver15
There is a JSON storage method here: https://learn.microsoft.com/en-us/sql/relational-databases/json/store-json-documents-in-sql-tables?view=sql-server-ver15. Although it will be fairly complex here I recommend storing the JSON data as a Logs table and then following the tutorial above to see if that solves your issue.
Use Quickbooks API to get the JSON, then refer to this guide:
https://learn.microsoft.com/en-us/sql/relational-databases/json/json-data-sql-server?view=sql-server-ver15
and this guide:
https://learn.microsoft.com/en-us/sql/relational-databases/json/convert-json-data-to-rows-and-columns-with-openjson-sql-server?view=sql-server-ver15
You can also consider setting something like AWS Lambda or Google Cloud Functions if you need something more automated.

Flattening dict/list object - Python vs Sql?

I have a bit of dict/list values- typically I would store as json and flatten via SQL - but wondering if anyone has any perspective on using python? Just in general the most efficient way to get this data in one row, where the values for dimensions & metricHeaderEntries are columns and the respective row values fall in line under them
BTW this is an example Google Analytics API Export , so may help others. The goal would be to get all the values for Dimension & Metric on one row.
{
"reports": [
{
"columnHeader": {
"dimensions": [
"DimA",
"DimAB",
"DimAC",
"DimAD",
"DimAE"
],
"metricHeader": {
"metricHeaderEntries": [
{
"name": "METRICAB",
"type": "INTEGER"
},
{
"name": "METRICABAC",
"type": "INTEGER"
},
{
"name": "METRICABACD",
"type": "INTEGER"
},
{
"name": "METRICABADCE",
"type": "VARCHAR"
},
{
"name": "METRICABADEEW",
"type": "FLOAT"
},
{
"name": "METRICABADEEFG",
"type": "FLOAT"
}
]
}
},
"data": {
"rows": [
{
"dimensions": [
"(test)",
"2019022",
"(not set)",
"d",
"/search"
],
"metrics": [
{
"values": [
"0",
"1",
"1",
"100000.0",
"61.0",
"16.0"
]
}
]
},
{
"dimensions": [
"(test)",
"20190222",
"(not set)",
"asdf",
"/adf"
],
"metrics": [
{
"values": [
"30",
"133",
"31",
"330.0",
"31.0",
"32.0"
]
}
]
},
"nextPageToken": "331000"
}
]
}
Desired Output:

Fast way of adding fields to a nested dict

I need a help with improving my code.
I've got a nested dict with many levels:
{
"11": {
"FacLC": {
"immty": [
"in_mm",
"in_mm"
],
"moood": [
"in_oo",
"in_oo"
]
}
},
"22": {
"FacLC": {
"immty": [
"in_mm",
"in_mm",
"in_mm"
]
}
}
}
And I want to add additional fields on every level, so my output looks like this:
[
{
"id": "",
"name": "11",
"general": [
{
"id": "",
"name": "FacLC",
"specifics": [
{
"id": "",
"name": "immty",
"characteristics": [
{
"id": "",
"name": "in_mm"
},
{
"id": "",
"name": "in_mm"
}
]
},
{
"id": "",
"name": "moood",
"characteristics": [
{
"id": "",
"name": "in_oo"
},
{
"id": "",
"name": "in_oo"
}
]
}
]
}
]
},
{
"id": "",
"name": "22",
"general": [
{
"id": "",
"name": "FacLC",
"specifics": [
{
"id": "",
"name": "immty",
"characteristics": [
{
"id": "",
"name": "in_mm"
},
{
"id": "",
"name": "in_mm"
},
{
"id": "",
"name": "in_mm"
}
]
}
]
}
]
}
]
I managed to write a 4-times nested for loop, what I find inefficient and inelegant:
for main_name, general in my_dict.items():
generals = []
for general_name, specific in general.items():
specifics = []
for specific_name, characteristics in specific.items():
characteristics_dicts = []
for characteristic in characteristics:
characteristics_dicts.append({
"id": "",
"name": characteristic,
})
specifics.append({
"id": "",
"name": specific_name,
"characteristics": characteristics_dicts,
})
generals.append({
"id": "",
"name": general_name,
"specifics": specifics,
})
my_new_dict.append({
"id": "",
"name": main_name,
"general": generals,
})
I am wondering if there is more compact and efficient solution.
In the past I created a function to do it. Basically you call this function everytime that you need to add new fields to a nested dict, independently on how many levels this nested dict have. You only have to inform the 'full path' , that I called the 'key_map'.
Like ['node1','node1a','node1apart3']
def insert_value_using_map(_nodes_list_to_be_appended, _keys_map, _value_to_be_inserted):
for _key in _keys_map[:-1]:
_nodes_list_to_be_appended = _nodes_list_to_be_appended.setdefault(_key, {})
_nodes_list_to_be_appended[_keys_map[-1]] = _value_to_be_inserted

Manipulating data from json to reflect a single value from each entry

Setup:
This data set has 50 "issues", within these "issues" i have captured the data that I need to then put into my postgresql database. But when i get to "components" is where i have trouble. I am able to get a list of all "names" of "components" but only want to have 1 instance of "name" for each "issue", and some of them have 2. Some are empty and would like to return null for those.
Here is some sample data that should suffice:
{
"issues": [
{
"key": "1",
"fields": {
"components": [],
"customfield_1": null,
"customfield_2": null
}
},
{
"key": "2",
"fields": {
"components": [
{
"name": "Testing"
}
],
"customfield_1": null,
"customfield_2": null
}
},
{
"key": "3",
"fields": {
"components": [
{
"name": "Documentation"
},
{
"name": "Manufacturing"
}
],
"customfield_1": null,
"customfield_2": 5
}
}
]
}
I am looking to return (just for the component name piece):
['null', 'Testing', 'Documentation']
I set up the other data for entry into the db like so:
values = list((item['key'],
//components list,
item['fields']['customfield_1'],
item['fields']['customfield_2']) for item in data_story['issues'])
I am wondering if there is a possible way to enter in the created components list where i have commented "components list" above
Just for recap, i want to have only 1 component name for each issue null or not and be able to have it put in the the values variable with the rest of the data. Also the first name in components will work for each "issue"
Here's what I would do, assuming that we are working with a data variable:
values = [(x['fields']['components'][0]['name'] if len(x['fields']['components']) != 0 else 'null') for x in data['issues']]
Let me know if you have any queries.
in dict comprehension use if/else
example code is
results = [ (x['fields']['components'][0]['name'] if 'components' in x['fields'] and len(x['fields']['components']) > 0 else 'null') for x in data['issues'] ]
full sample code is
import json
data = json.loads('''{ "issues": [
{
"key": "1",
"fields": {
"components": [],
"customfield_1": null,
"customfield_2": null
}
},
{
"key": "2",
"fields": {
"components": [
{
"name": "Testing"
}
],
"customfield_1": null,
"customfield_2": null
}
},
{
"key": "3",
"fields": {
"components": [
{
"name": "Documentation"
},
{
"name": "Manufacturing"
}
],
"customfield_1": null,
"customfield_2": 5
}
}
]
}''')
results = [ (x['fields']['components'][0]['name'] if 'components' in x['fields'] and len(x['fields']['components']) > 0 else 'null') for x in data['issues'] ]
print(results)
output is ['null', u'Testing', u'Documentation']
If you just want to delete all but one of the names from the list, then you can do that this way:
issues={
"issues": [
{
"key": "1",
"fields": {
"components": [],
"customfield_1": "null",
"customfield_2": "null"
}
},
{
"key": "2",
"fields": {
"components": [
{
"name": "Testing"
}
],
"customfield_1": "null",
"customfield_2": "null"
}
},
{
"key": "3",
"fields": {
"components": [
{
"name": "Documentation"
},
{
"name": "Manufacturing"
}
],
"customfield_1": "null",
"customfield_2": 5
}
}
]
}
Data^
componentlist=[]
for i in range(len(issues["issues"])):
x= issues["issues"][i]["fields"]["components"]
if len(x)==0:
x="null"
componentlist.append(x)
else:
x=issues["issues"][i]["fields"]["components"][0]
componentlist.append(x)
print(componentlist)
>>>['null', {'name': 'Testing'}, {'name': 'Documentation'}]
Or, if you just want the values, and not the dictionary keys:
else:
x=issues["issues"][i]["fields"]["components"][0]["name"]
componentlist.append(x)
['null', 'Testing', 'Documentation']

Convert Nested JSON to Excel using Python

I want to convert Nested JSON to Excel file format using Python. I've done nearly as per requirements but I want to achieve excel format as below.
JSON
[
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Cooktops/zgbs/appliances/3741261",
"subCategory": [
],
"title": "Cooktops"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Dishwashers/zgbs/appliances/3741271",
"subCategory": [
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Built-Dishwashers/zgbs/appliances/3741281",
"subCategory": [
],
"title": "Built-In Dishwashers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Portable-Countertop-Dishwashers/zgbs/appliances/3741301",
"subCategory": [
],
"title": "Portable & Countertop Dishwashers"
}
],
"title": "Dishwashers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Freezers/zgbs/appliances/3741331",
"subCategory": [
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Chest-Freezers/zgbs/appliances/3741341",
"subCategory": [
],
"title": "Chest Freezers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Upright-Freezers/zgbs/appliances/3741351",
"subCategory": [
],
"title": "Upright Freezers"
}
],
"title": "Freezers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Ice-Makers/zgbs/appliances/2399939011",
"subCategory": [
],
"title": "Ice Makers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Range-Hoods/zgbs/appliances/3741441",
"subCategory": [
],
"title": "Range Hoods"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Ranges/zgbs/appliances/3741411",
"subCategory": [
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Drop-Ranges/zgbs/appliances/3741421",
"subCategory": [
],
"title": "Drop-In Ranges"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Freestanding-Ranges/zgbs/appliances/3741431",
"subCategory": [
],
"title": "Freestanding Ranges"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Slide-Ranges/zgbs/appliances/2399946011",
"subCategory": [
],
"title": "Slide-In Ranges"
}
],
"title": "Ranges"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Refrigerators/zgbs/appliances/3741361",
"subCategory": [
],
"title": "Refrigerators"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Wall-Ovens/zgbs/appliances/3741481",
"subCategory": [
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Combination-Microwave-Wall-Ovens/zgbs/appliances/3741491",
"subCategory": [
],
"title": "Combination Microwave & Wall Ovens"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Double-Wall-Ovens/zgbs/appliances/3741501",
"subCategory": [
],
"title": "Double Wall Ovens"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Single-Wall-Ovens/zgbs/appliances/3741511",
"subCategory": [
],
"title": "Single Wall Ovens"
}
],
"title": "Wall Ovens"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Warming-Drawers/zgbs/appliances/2399955011",
"subCategory": [
],
"title": "Warming Drawers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Washers-Dryers/zgbs/appliances/2383576011",
"subCategory": [
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Clothes-Dryers/zgbs/appliances/13397481",
"subCategory": [
],
"title": "Dryers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Clothes-Washing-Machines/zgbs/appliances/13397491",
"subCategory": [
],
"title": "Washers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Combination-Washers-Dryers/zgbs/appliances/13755271",
"subCategory": [
],
"title": "All-in-One Combination Washers & Dryers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Stacked-Washer-Dryer-Units/zgbs/appliances/2399957011",
"subCategory": [
],
"title": "Stacked Washer & Dryer Units"
}
],
"title": "Washers & Dryers"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Wine-Cellars/zgbs/appliances/3741521",
"subCategory": [
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Built-Wine-Cellars/zgbs/appliances/3741551",
"subCategory": [
],
"title": "Built-In Wine Cellars"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Freestanding-Wine-Cellars/zgbs/appliances/3741541",
"subCategory": [
],
"title": "Freestanding Wine Cellars"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Furniture-Style-Wine-Cellars/zgbs/appliances/3741561",
"subCategory": [
],
"title": "Furniture-Style Wine Cellars"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Small-Wine-Cellars/zgbs/appliances/3741531",
"subCategory": [
],
"title": "Small Wine Cellars"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Wine-Cellar-Cooling-Systems/zgbs/appliances/3741581",
"subCategory": [
],
"title": "Wine Cellar Cooling Systems"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Wine-Rooms/zgbs/appliances/3741571",
"subCategory": [
],
"title": "Wine Rooms"
}
],
"title": "Wine Cellars"
},
{
"url": "https://www.amazon.com/Best-Sellers-Appliances-Home-Appliance-Warranties/zgbs/appliances/2242350011",
"subCategory": [
],
"title": "Appliance Warranties"
}
]
I'm traversing all subCategories like this:
row = 1
def TraverseJSONTree(jsonObject, count=0):
title = jsonObject.get('title')
url = jsonObject.get('url')
print 'Title: ' + title + ' , Position: ' + str(count)
worksheet.write_string(row, count, title)
worksheet.write_string(row, 6, url)
global row
row+=1
subCategories = jsonObject.get('subCategory',[])
for category in subCategories:
TraverseJSONTree(category, count+1)
for jsonObject in json.loads(jsonArray):
TraverseJSONTree(jsonObject)
Expected Result
row = 1
def TraverseJSONTree(jsonObject, main_title=None, count=0):
if main_title is None:
main_title = title = jsonObject.get('title')
else:
title = jsonObject.get('title')
url = jsonObject.get('url')
print 'Title: ' + title + ' , Position: ' + str(count)
if main_title is not None:
worksheet.write_string(row, 0, title)
worksheet.write_string(row, count, title)
worksheet.write_string(row, 6, url)
global row
row+=1
subCategories = jsonObject.get('subCategory',[])
for category in subCategories:
TraverseJSONTree(category, main_title, count+1)
for jsonObject in json.loads(jsonArray):
TraverseJSONTree(jsonObject)
it will return your expected output as it needs a check if category is there then you have to right the original title on the 0th col in excel reamin as same.
Modification :
Simplest way to do this would be to use csv module, say we have the whole json in the variable a
import csv
import cPickle as pickle
fieldnames = ['Category1', 'Category1.1', 'url']
csvfile = open("category.csv", 'wb')
csvfilewriter = csv.DictWriter(csvfile, fieldnames=fieldnames,dialect='excel', delimiter=',')
csvfilewriter.writeheader()
for b in a:
data = []
data.append(b['title'])
data.append("")
data.append(b['url'])
csvfilewriter.writerow(dict(zip(fieldnames,data)))
data = []
for i in xrange(len(b['subCategory'])):
data.append(b['title'])
data.append(b['subCategory'][i]['title'])
data.append(b['subCategory'][i]['url'])
csvfilewriter.writerow(dict(zip(fieldnames,data)))
You will have the desired csv in the same location. This works for only two subcategories (because i have checked the data given by you and say there were only two categories (ie 1 and 1.1)) but in case you want for more than repeat the same(I know it's not the most efficient way couldn't think of any in such a short time)
You can also use pandas module to convert the dictionary
import pandas as pd
pd.DataFrame.from_dict(dcitionaty_element)
And then do it on all the dictionaries in that json and merge them and save it to a csv file.

Categories

Resources