Nested and escaped JSON payload to flattened dictionary - python - python

I'm looking for any suggestions to resolve an issue I'm facing. It might seem as a simple problem, but after a few days trying to find an answer - I think it is not anymore.
I'm receiving data (StringType) in a following JSON-like format, and there is a requirement to turn it into flat key-value pair dictionary. Here is a payload sample:
s = """{"status": "active", "name": "{\"first\": \"John\", \"last\": \"Smith\"}", "street_address": "100 \"Y\" Street"}"""
and the desired output should look like this:
{'status': 'active', 'name_first': 'John', 'name_last': 'Smith', 'street_address': '100 "Y" Street'}
The issue is I can't find a way to turn original string (s) into a dictionary. If I can achieve that the flattening part is working perfectly fine.
import json
import collections
import ast
#############################################################
# Flatten complex structure into a flat dictionary
#############################################################
def flatten_dictionary(dictionary, parent_key=False, separator='_', value_to_str=True):
"""
Turn a nested complex json into a flattened dictionary
:param dictionary: The dictionary to flatten
:param parent_key: The string to prepend to dictionary's keys
:param separator: The string used to separate flattened keys
:param value_to_str: Force all returned values to string type
:return: A flattened dictionary
"""
items = []
for key, value in dictionary.items():
new_key = str(parent_key) + separator + key if parent_key else key
try:
value = json.loads(value)
except BaseException:
value = value
if isinstance(value, collections.MutableMapping):
if not value.items():
items.append((new_key,None))
else:
items.extend(flatten_dictionary(value, new_key, separator).items())
elif isinstance(value, list):
if len(value):
for k, v in enumerate(value):
items.extend(flatten_dictionary({str(k): (str(v) if value_to_str else v)}, new_key).items())
else:
items.append((new_key,None))
else:
items.append((new_key, (str(value) if value_to_str else value)))
return dict(items)
# Data sample; sting and dictionary
s = """{"status": "active", "name": "{\"first\": \"John\", \"last\": \"Smith\"}", "street_address": "100 \"Y\" Street"}"""
d = {"status": "active", "name": "{\"first\": \"John\", \"last\": \"Smith\"}", "street_address": "100 \"Y\" Street"}
# Works for dictionary type
print(flatten_dictionary(d))
# Doesn't work for string type, for any of the below methods
e = eval(s)
# a = ast.literal_eval(s)
# j = json.loads(s)

Try:
import json
import re
def jsonify(s):
s = s.replace('"{','{').replace('}"','}')
s = re.sub(r'street_address":\s+"(.+)"(.+)"(.+)"', r'street_address": "\1\2\3"',s)
return json.loads(s)
If you must keep the quotes around Y, try:
def jsonify(s):
s = s.replace('"{','{').replace('}"','}')
search = re.search(r'street_address":\s+"(.+)"(.+)"(.+)"',s)
if search:
s = re.sub(r'street_address":\s+"(.+)"(.+)"(.+)"', r'street_address": "\1\2\3"',s)
dict_version = json.loads(s)
dict_version['street_address'] = dict_version['street_address'].replace(search.group(2),'"'+search.group(2)+'"')
return dict_version
A more generalized attempt:
def jsonify(s):
pattern = r'(?<=[,}])\s*"(.[^\{\}:,]+?)":\s+"([^\{\}:,]+?)"([^\{\}:,]+?)"([^\{\}:,]+?)"([,\}])'
s = s.replace('"{','{').replace('}"','}')
search = re.search(pattern,s)
matches = []
if search:
matches = re.findall(pattern,s)
s = re.sub(pattern, r'"\1": "\2\3\4"\5',s)
dict_version = json.loads(s)
for match in matches:
dict_version[match[0]] = dict_version[match[0]].replace(match[2],'"'+match[2]+'"')
return dict_version

Related

Parse nested json to csv using Python Pandas

I have a json in below format:
{"MainName":[{"col1":"12345","col2":"False","col3":"190809","SubName1":{"col4":30.00,"SubName2":{"col5":"19703","col6":"USD"}},"col7":"7372267","SubName3":[{"col8":"345337","col9":"PC"}],"col10":"10265","col11":"29889004","col12":"calculated","col13":"9218","SubName4":{"col14":1,"SubName5":{"col15":"1970324","col16":"integer"}},"col17":"434628","col18":"2020-02-06T13:47:40.000-0800","col19":"754878037","SubName6":{"col20":30.00,"SubName7":{"col21":"19703248","col22":"USD"}}},{"col1":"12345","col2":"False","col3":"190809","SubName1":{"col4":30.00,"SubName2":{"col5":"19703","col6":"USD"}},"col7":"7372267","SubName3":[{"col8":"345337","col9":"PC"}],"col10":"10265","col11":"29889004","col12":"calculated","col13":"9218","SubName4":{"col14":1,"SubName5":{"col15":"1970324","col16":"integer"}},"col17":"434628","col18":"2020-02-06T13:47:40.000-0800","col19":"754878037","SubName6":{"col20":30.00,"SubName7":{"col21":"19703248","col22":"USD"}}}],"skip":0,"top":2,"next":"/v1/APIName?skip=2&top=2"}
I want to convert it into csv with below format:
MainName_col1,MainName_col2,MainName_col3,MainName_SubName1_col4,MainName_SubName1_SubName2_col5,MainName_SubName1_SubName2_col6,MainName_col7,MainName_SubName3_col8,MainName_SubName3_col9,MainName_col10,MainName_col11,MainName_col12,MainName_col13,MainName_SubName4_col14,MainName_SubName4_SubName5_col15,MainName_SubName4_SubName5_col16,MainName_col17,MainName_col18,MainName_col19,MainName_SubName6_col20,MainName_SubName6_SubName7_col21,MainName_SubName6_SubName7_col22
12345,False,190809,30.0,19703,USD,7372267,345337,PC,10265,29889004,calculated,9218,1,1970324,integer,434628,2020-02-06T13:47:40.000-0800,754878037,30.0,19703248,USD
12345,False,190809,30.0,19703,USD,7372267,345337,PC,10265,29889004,calculated,9218,2,123453,integer,434628,2020-02-06T13:47:40.000-0800,754878037,30.0,19703248,USD
Kindly help me out in this.
Use below function to flatten your JSON data.
dc = {"MainName":[{"col1":"12345","col2":False,"col3":"190809","SubName1":{"col4":30.00,"SubName2":{"col5":"19703","col6":"USD"}},"col7":"7372267","SubName3":[{"col8":"345337","col9":"PC"}],"col10":"10265","col11":"29889004","col12":"calculated","col13":"9218","SubName4":{"col14":1,"SubName5":{"col15":"1970324","col16":"integer"}},"col17":"434628","col18":"2020-02-06T13:47:40.000-0800","col19":"754878037","SubName6":{"col20":30.00,"SubName7":{"col21":"19703248","col22":"USD"}}}],"skip":0,"top":1,"next":"/v1/APIName?skip=1&top=1"}
def flatten(root: str, dict_obj: dict):
flat = {}
for i in dict_obj.keys():
val = dict_obj[i]
if not isinstance(val, dict) and not isinstance(val, list):
flat[f'{root}_{i}'] = val
else:
if isinstance(val, list):
val = val[-1]
flat.update(flatten(f'{root}_{i}', val))
return flat
flatten('MainName', dc['MainName'][0])
It will give you expected output. Then use it the way you want.
{'MainName_col1': '12345',
'MainName_col2': False,
'MainName_col3': '190809',
'MainName_SubName1_col4': 30.0,
'MainName_SubName1_SubName2_col5': '19703',
'MainName_SubName1_SubName2_col6': 'USD',
'MainName_col7': '7372267',
'MainName_SubName3_col8': '345337',
'MainName_SubName3_col9': 'PC',
'MainName_col10': '10265',
'MainName_col11': '29889004',
'MainName_col12': 'calculated',
'MainName_col13': '9218',
'MainName_SubName4_col14': 1,
'MainName_SubName4_SubName5_col15': '1970324',
'MainName_SubName4_SubName5_col16': 'integer',
'MainName_col17': '434628',
'MainName_col18': '2020-02-06T13:47:40.000-0800',
'MainName_col19': '754878037',
'MainName_SubName6_col20': 30.0,
'MainName_SubName6_SubName7_col21': '19703248',
'MainName_SubName6_SubName7_col22': 'USD'}
As of my understanding, your dc will look like below
dc = {"MainName":[{"col1":"12345","col2":"False","col3":"190809","SubName1":{"col4":30.00,"SubName2":{"col5":"19703","col6":"USD"}},"col7":"7372267","SubName3":[{"col8":"345337","col9":"PC"}],"col10":"10265","col11":"29889004","col12":"calculated","col13":"9218","SubName4":{"col14":1,"SubName5":{"col15":"1970324","col16":"integer"}},"col17":"434628","col18":"2020-02-06T13:47:40.000-0800","col19":"754878037","SubName6":{"col20":30.00,"SubName7":{"col21":"19703248","col22":"USD"}}},{"col1_a":"12345XX","col2_b":"False","col3_c":"190809","SubName1":{"col4_d":30.00,"SubName2":{"col5_e":"19703","col6_f":"USD"}},"col7_g":"7372267","SubName3":[{"col8_h":"345337","col9":"PC"}],"col10_i":"10265","col11_j":"29889004","col12_k":"calculated","col13_l":"9218","SubName4":{"col14_m":1,"SubName5":{"col15_n":"1970324","col16_o":"integer"}},"col17_p":"434628","col18_q":"2020-02-06T13:47:40.000-0800","col19_r":"754878037","SubName6":{"col20_s":30.00,"SubName7":{"col21_t":"19703248","col22_u":"USDZZ"}}}],"skip":0,"top":2,"next":"/v1/APIName?skip=2&top=2"}
I used the above answer to flatten everything into single object
def flatten(root: str, dict_obj: dict):
flat = {}
for i in dict_obj.keys():
val = dict_obj[i]
if not isinstance(val, dict) and not isinstance(val, list):
flat[f'{root}_{i}'] = val
else:
if isinstance(val, list):
val = val[-1]
flat.update(flatten(f'{root}_{i}', val))
return flat
keys_list = []
values_list = []
for i in range(len(dc['MainName'])):
result = flatten('MainName', dc['MainName'][i])
keys_list.append(list(result.keys()))
values_list.append(list(result.values()))
for k in keys_list:
for res in k:
guestFile = open("sample.csv","a")
guestFile.write(res)
guestFile.write(",")
guestFile.close()
for v in values_list:
for res in v:
guestFile = open("sample.csv","a")
guestFile.write(str(res))
guestFile.write(",")
guestFile.close()
Checkout my code at https://repl.it/#TamilselvanLaks/jsontocsvmul
Note: Use the 'run' button to run the program, left side you can see sample.csv
there you can see all keys as like you want
Please let me know my answer meets your expectation

Optimize code to handle large chunks of data

I have the following code:
import json
data_sample = [{
"name":"John",
"age":30,
"cars":[ {
"temp":{
"sum":"20",
"for":12,
}
,
"id":30,
"element":[ {"model":"Taurus1", "doors":{"id":"1", "id2":101}}, {"model":"T1", "doors":{"id":"2", "id2":12}}, {"model":"As", "doors":{"id":"Mo", "id2":4}} ]
}, {
"temp":{
"sum":"10",
"for":12,
}
,
"id":31,
"element":[ {"model":"Taurus2", "doors":{"id":"2", "id2":102}}, {"model":"T2", "doors":{"id":"5", "id2":12}}, {"model":"Thing", "doors":{"id":"Fo", "id2":4}} ]
}, {
"temp":{
"sum":"20",
"for":10,
}
,
"id":32,
"element":[ {"model":"Taurus3", "doors":{"id":"3", "id2":103}}, {"model":"T3", "doors":{"id":"15", "id2":62}}, {"model":"By", "doors":{"id":"Log", "id2":4}} ]
} ]
}]
def flat_list(z):
x = []
for i, data_obj in enumerate(z):
if type(data_obj) is dict or type(data_obj) is list:
x.extend([flatten_data(data_obj)])
else:
x.extend([data_obj])
return x
def flatten_data(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
out[name[:-1]] = flat_list(x)
else:
out[name[:-1]] = x
flatten(y)
return out
def generatejson(response2):
# response 2 is [(first data set), (second data set)] convert it to dictionary {0: (first data set), 1: (second data set)}
sample_object = {i: data_response for i, data_response in enumerate(response2)}
flat = {k: flatten_data(v) for k, v in sample_object.items()}
return json.dumps(flat, sort_keys=True)
print generatejson(data_sample)
This code takes data from the following format:
[(first data set), (second data set)]
and begin to look for nesting dicts. If nesting dict is detected the code flats it to the parent level.
For example the code detects this:
doors is nested dict so it converts it to:
Note that it doesn't change the lists/arrays. They are not being flattened.
My issue:
On small amount of data the code works great however handling large amount of sets (1000+) the performance is very low... And sometimes even crash.
How can I improve and optimize the performance of this code?
The data_sample contains only 1 data set (I assume that's enough for checking).

create a dict by spliting a string in an ordered dict

I have an ordered dict that represent field definition ie Name, type, width, precision
it looks like this:
OrderedDict([(u'CODE_MUN', 'str:8'), (u'CODE_DR_AN', 'str:8'),
(u'AREA', 'float:31.2'), (u'PERIMETER', 'float:31.4')])
I would like to create a dict for each item that would be like this:
{'name' : 'CODE_MUN', 'type': 'str', 'width': 8, 'precision':0} for fields without precision
and
{'name' : 'AREA', 'type': 'float', 'width': 31, 'precision':2 } for fiels with precision
for keys, values in fieldsDict.iteritems():
dict = {}
dict['name'] = keys
props = re.split(':.', values)
dict['type'] = props[0]
dict['width'] = props[1]
dict['precision'] = props[2]
of course I have index error when there is no precision defined. What would be the best way to achieve that?
Use a try-except block.
for keys, values in fieldsDict.iteritems():
dict = {}
dict['name'] = keys
props = re.split(':.', values)
dict['type'] = props[0]
dict['width'] = props[1]
try:
dict['precision'] = props[2]
except IndexError:
dict['precision'] = 0
You could also test for length using an if-else block. The methods are pretty close and I doubt this is a situation where it really matters, but for more on asking forgiveness vs permission you can see this question.
You have to check precision is there or not.
from collections import OrderedDict
import re
fieldsDict = OrderedDict([(u'CODE_MUN', 'str:8'), (u'CODE_DR_AN', 'str:8'),
(u'AREA', 'float:31.2'), (u'PERIMETER', 'float:31.4')])
for keys, values in fieldsDict.iteritems():
dict = {}
dict['name'] = keys
props = re.split(':.', values)
dict['type'] = props[0]
dict['width'] = props[1]
if len(props) == 3:
dict['precision'] = props[2]
else:
dict['precision'] = 0
print dict
This might be help

Search for a value in a nested dictionary python

Search for a value and get the parent dictionary names (keys):
Dictionary = {dict1:{
'part1': {
'.wbxml': 'application/vnd.wap.wbxml',
'.rl': 'application/resource-lists+xml',
},
'part2':
{'.wsdl': 'application/wsdl+xml',
'.rs': 'application/rls-services+xml',
'.xop': 'application/xop+xml',
'.svg': 'image/svg+xml',
},
'part3':{...}, ...
dict2:{
'part1': { '.dotx': 'application/vnd.openxmlformats-..'
'.zaz': 'application/vnd.zzazz.deck+xml',
'.xer': 'application/patch-ops-error+xml',}
},
'part2':{...},
'part3':{...},...
},...
In above dictionary I need to search values like: "image/svg+xml". Where, none of the values are repeated in the dictionary. How to search the "image/svg+xml"? so that it should return the parent keys in a dictionary { dict1:"part2" }.
Please note: Solutions should work unmodified for both Python 2.7 and Python 3.3.
Here's a simple recursive version:
def getpath(nested_dict, value, prepath=()):
for k, v in nested_dict.items():
path = prepath + (k,)
if v == value: # found value
return path
elif hasattr(v, 'items'): # v is a dict
p = getpath(v, value, path) # recursive call
if p is not None:
return p
Example:
print(getpath(dictionary, 'image/svg+xml'))
# -> ('dict1', 'part2', '.svg')
To yield multiple paths (Python 3 only solution):
def find_paths(nested_dict, value, prepath=()):
for k, v in nested_dict.items():
path = prepath + (k,)
if v == value: # found value
yield path
elif hasattr(v, 'items'): # v is a dict
yield from find_paths(v, value, path)
print(*find_paths(dictionary, 'image/svg+xml'))
This is an iterative traversal of your nested dicts that additionally keeps track of all the keys leading up to a particular point. Therefore as soon as you find the correct value inside your dicts, you also already have the keys needed to get to that value.
The code below will run as-is if you put it in a .py file. The find_mime_type(...) function returns the sequence of keys that will get you from the original dictionary to the value you want. The demo() function shows how to use it.
d = {'dict1':
{'part1':
{'.wbxml': 'application/vnd.wap.wbxml',
'.rl': 'application/resource-lists+xml'},
'part2':
{'.wsdl': 'application/wsdl+xml',
'.rs': 'application/rls-services+xml',
'.xop': 'application/xop+xml',
'.svg': 'image/svg+xml'}},
'dict2':
{'part1':
{'.dotx': 'application/vnd.openxmlformats-..',
'.zaz': 'application/vnd.zzazz.deck+xml',
'.xer': 'application/patch-ops-error+xml'}}}
def demo():
mime_type = 'image/svg+xml'
try:
key_chain = find_mime_type(d, mime_type)
except KeyError:
print ('Could not find this mime type: {0}'.format(mime_type))
exit()
print ('Found {0} mime type here: {1}'.format(mime_type, key_chain))
nested = d
for key in key_chain:
nested = nested[key]
print ('Confirmation lookup: {0}'.format(nested))
def find_mime_type(d, mime_type):
reverse_linked_q = list()
reverse_linked_q.append((list(), d))
while reverse_linked_q:
this_key_chain, this_v = reverse_linked_q.pop()
# finish search if found the mime type
if this_v == mime_type:
return this_key_chain
# not found. keep searching
# queue dicts for checking / ignore anything that's not a dict
try:
items = this_v.items()
except AttributeError:
continue # this was not a nested dict. ignore it
for k, v in items:
reverse_linked_q.append((this_key_chain + [k], v))
# if we haven't returned by this point, we've exhausted all the contents
raise KeyError
if __name__ == '__main__':
demo()
Output:
Found image/svg+xml mime type here: ['dict1', 'part2', '.svg']
Confirmation lookup: image/svg+xml
Here is a solution that works for a complex data structure of nested lists and dicts
import pprint
def search(d, search_pattern, prev_datapoint_path=''):
output = []
current_datapoint = d
current_datapoint_path = prev_datapoint_path
if type(current_datapoint) is dict:
for dkey in current_datapoint:
if search_pattern in str(dkey):
c = current_datapoint_path
c+="['"+dkey+"']"
output.append(c)
c = current_datapoint_path
c+="['"+dkey+"']"
for i in search(current_datapoint[dkey], search_pattern, c):
output.append(i)
elif type(current_datapoint) is list:
for i in range(0, len(current_datapoint)):
if search_pattern in str(i):
c = current_datapoint_path
c += "[" + str(i) + "]"
output.append(i)
c = current_datapoint_path
c+="["+ str(i) +"]"
for i in search(current_datapoint[i], search_pattern, c):
output.append(i)
elif search_pattern in str(current_datapoint):
c = current_datapoint_path
output.append(c)
output = filter(None, output)
return list(output)
if __name__ == "__main__":
d = {'dict1':
{'part1':
{'.wbxml': 'application/vnd.wap.wbxml',
'.rl': 'application/resource-lists+xml'},
'part2':
{'.wsdl': 'application/wsdl+xml',
'.rs': 'application/rls-services+xml',
'.xop': 'application/xop+xml',
'.svg': 'image/svg+xml'}},
'dict2':
{'part1':
{'.dotx': 'application/vnd.openxmlformats-..',
'.zaz': 'application/vnd.zzazz.deck+xml',
'.xer': 'application/patch-ops-error+xml'}}}
d2 = {
"items":
{
"item":
[
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{"id": "1001", "type": "Regular"},
{"id": "1002", "type": "Chocolate"},
{"id": "1003", "type": "Blueberry"},
{"id": "1004", "type": "Devil's Food"}
]
},
"topping":
[
{"id": "5001", "type": "None"},
{"id": "5002", "type": "Glazed"},
{"id": "5005", "type": "Sugar"},
{"id": "5007", "type": "Powdered Sugar"},
{"id": "5006", "type": "Chocolate with Sprinkles"},
{"id": "5003", "type": "Chocolate"},
{"id": "5004", "type": "Maple"}
]
},
...
]
}
}
pprint.pprint(search(d,'svg+xml','d'))
>> ["d['dict1']['part2']['.svg']"]
pprint.pprint(search(d2,'500','d2'))
>> ["d2['items']['item'][0]['topping'][0]['id']",
"d2['items']['item'][0]['topping'][1]['id']",
"d2['items']['item'][0]['topping'][2]['id']",
"d2['items']['item'][0]['topping'][3]['id']",
"d2['items']['item'][0]['topping'][4]['id']",
"d2['items']['item'][0]['topping'][5]['id']",
"d2['items']['item'][0]['topping'][6]['id']"]
Here are two similar quick and dirty ways of doing this type of operation. The function find_parent_dict1 uses list comprehension but if you are uncomfortable with that then find_parent_dict2 uses the infamous nested for loops.
Dictionary = {'dict1':{'part1':{'.wbxml':'1','.rl':'2'},'part2':{'.wbdl':'3','.rs':'4'}},'dict2':{'part3':{'.wbxml':'5','.rl':'6'},'part4':{'.wbdl':'1','.rs':'10'}}}
value = '3'
def find_parent_dict1(Dictionary):
for key1 in Dictionary.keys():
item = {key1:key2 for key2 in Dictionary[key1].keys() if value in Dictionary[key1][key2].values()}
if len(item)>0:
return item
find_parent_dict1(Dictionary)
def find_parent_dict2(Dictionary):
for key1 in Dictionary.keys():
for key2 in Dictionary[key1].keys():
if value in Dictionary[key1][key2].values():
print {key1:key2}
find_parent_dict2(Dictionary)
Traverses a nested dict looking for a particular value. When success is achieved the full key path to the value is printed. I left all the comments and print statements for pedagogical purposes (this isn't production code!)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 17:16:46 2022
#author: wellington
"""
class Tree(dict):
"""
allows autovivification as in Perl hashes
"""
def __missing__(self, key):
value = self[key] = type(self)()
return value
# tracking the key sequence when seeking the target
key_list = Tree()
# dict storing the target success result
success = Tree()
# example nested dict of dicts and lists
E = {
'AA':
{
'BB':
{'CC':
{
'DD':
{
'ZZ':'YY',
'WW':'PP'
},
'QQ':
{
'RR':'SS'
},
},
'II':
{
'JJ':'KK'
},
'LL':['MM', 'GG', 'TT']
}
}
}
def find_keys_from_value(data, target):
"""
recursive function -
given a value it returns all the keys in the path to that value within
the dict "data"
there are many paths and many false routes
at the end of a given path if success has not been achieved
the function discards keys to get back to the next possible path junction
"""
print(f"the number of keys in the local dict is {len(data)}")
key_counter = 0
for key in data:
key_counter += 1
# if target has been located stop iterating through keys
if success[target] == 1:
break
else:
# eliminate prior key from path that did not lead to success
if key_counter > 1:
k_list.pop()
# add key to new path
k_list.append(key)
print(f"printing k_list after append{k_list}")
# if target located set success[target] = 1 and exit
if key == target or data[key] == target:
key_list[target] = k_list
success[target] = 1
break
# if the target has not been located check to see if the value
# associated with the new key is a dict and if so return to the
# recursive function with the new dict as "data"
elif isinstance(data[key], dict):
print(f"\nvalue is dict\n {data[key]}")
find_keys_from_value(data[key], target)
# check to see if the value associated with the new key is a list
elif isinstance(data[key], list):
# print("\nv is list\n")
# search through the list
for i in data[key]:
# check to see if the list element is a dict
# and if so return to the recursive function with
# the new dict as "data
if isinstance(i, dict):
find_keys_from_value(i, target)
# check to see if each list element is the target
elif i == target:
print(f"list entry {i} is target")
success[target] = 1
key_list[target] = k_list
elif i != target:
print(f"list entry {i} is not target")
print(f"printing k_list before pop_b {k_list}")
print(f"popping off key_b {key}")
# so if value is not a key and not a list and not the target then
# discard the key from the key list
elif data[key] != target:
print(f"value {data[key]} is not target")
print(f"printing k_list before removing key_before {k_list}")
print(f"removing key_c {key}")
k_list.remove(key)
# select target values
values = ["PP", "SS", "KK", "TT"]
success = {}
for target in values:
print(f"\nlooking for target {target}")
success[target] = 0
k_list = []
find_keys_from_value(E, target)
print(f"\nprinting key_list for target {target}")
print(f"{key_list[target]}\n")
print("\n****************\n\n")

Parsing json and searching through it

I have this code
import json
from pprint import pprint
json_data=open('bookmarks.json')
jdata = json.load(json_data)
pprint (jdata)
json_data.close()
How can I search through it for u'uri': u'http:?
ObjectPath is a library that provides ability to query JSON and nested structures of dicts and lists. For example, you can search for all attributes called "foo" regardless how deep they are by using $..foo.
While the documentation focuses on the command line interface, you can perform the queries programmatically by using the package's Python internals. The example below assumes you've already loaded the data into Python data structures (dicts & lists). If you're starting with a JSON file or string you just need to use load or loads from the json module first.
import objectpath
data = [
{'foo': 1, 'bar': 'a'},
{'foo': 2, 'bar': 'b'},
{'NoFooHere': 2, 'bar': 'c'},
{'foo': 3, 'bar': 'd'},
]
tree_obj = objectpath.Tree(data)
tuple(tree_obj.execute('$..foo'))
# returns: (1, 2, 3)
Notice that it just skipped elements that lacked a "foo" attribute, such as the third item in the list. You can also do much more complex queries, which makes ObjectPath handy for deeply nested structures (e.g. finding where x has y that has z: $.x.y.z). I refer you to the documentation and tutorial for more information.
As json.loads simply returns a dict, you can use the operators that apply to dicts:
>>> jdata = json.load('{"uri": "http:", "foo", "bar"}')
>>> 'uri' in jdata # Check if 'uri' is in jdata's keys
True
>>> jdata['uri'] # Will return the value belonging to the key 'uri'
u'http:'
Edit: to give an idea regarding how to loop through the data, consider the following example:
>>> import json
>>> jdata = json.loads(open ('bookmarks.json').read())
>>> for c in jdata['children'][0]['children']:
... print 'Title: {}, URI: {}'.format(c.get('title', 'No title'),
c.get('uri', 'No uri'))
...
Title: Recently Bookmarked, URI: place:folder=BOOKMARKS_MENU(...)
Title: Recent Tags, URI: place:sort=14&type=6&maxResults=10&queryType=1
Title: , URI: No uri
Title: Mozilla Firefox, URI: No uri
Inspecting the jdata data structure will allow you to navigate it as you wish. The pprint call you already have is a good starting point for this.
Edit2: Another attempt. This gets the file you mentioned in a list of dictionaries. With this, I think you should be able to adapt it to your needs.
>>> def build_structure(data, d=[]):
... if 'children' in data:
... for c in data['children']:
... d.append({'title': c.get('title', 'No title'),
... 'uri': c.get('uri', None)})
... build_structure(c, d)
... return d
...
>>> pprint.pprint(build_structure(jdata))
[{'title': u'Bookmarks Menu', 'uri': None},
{'title': u'Recently Bookmarked',
'uri': u'place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&(...)'},
{'title': u'Recent Tags',
'uri': u'place:sort=14&type=6&maxResults=10&queryType=1'},
{'title': u'', 'uri': None},
{'title': u'Mozilla Firefox', 'uri': None},
{'title': u'Help and Tutorials',
'uri': u'http://www.mozilla.com/en-US/firefox/help/'},
(...)
}]
To then "search through it for u'uri': u'http:'", do something like this:
for c in build_structure(jdata):
if c['uri'].startswith('http:'):
print 'Started with http'
Seems there's a typo (missing colon) in the JSON dict provided by jro.
The correct syntax would be:
jdata = json.load('{"uri": "http:", "foo": "bar"}')
This cleared it up for me when playing with the code.
Functions to search through and print dicts, like JSON.
*made in python 3
Search:
def pretty_search(dict_or_list, key_to_search, search_for_first_only=False):
"""
Give it a dict or a list of dicts and a dict key (to get values of),
it will search through it and all containing dicts and arrays
for all values of dict key you gave, and will return you set of them
unless you wont specify search_for_first_only=True
:param dict_or_list:
:param key_to_search:
:param search_for_first_only:
:return:
"""
search_result = set()
if isinstance(dict_or_list, dict):
for key in dict_or_list:
key_value = dict_or_list[key]
if key == key_to_search:
if search_for_first_only:
return key_value
else:
search_result.add(key_value)
if isinstance(key_value, dict) or isinstance(key_value, list) or isinstance(key_value, set):
_search_result = pretty_search(key_value, key_to_search, search_for_first_only)
if _search_result and search_for_first_only:
return _search_result
elif _search_result:
for result in _search_result:
search_result.add(result)
elif isinstance(dict_or_list, list) or isinstance(dict_or_list, set):
for element in dict_or_list:
if isinstance(element, list) or isinstance(element, set) or isinstance(element, dict):
_search_result = pretty_search(element, key_to_search, search_result)
if _search_result and search_for_first_only:
return _search_result
elif _search_result:
for result in _search_result:
search_result.add(result)
return search_result if search_result else None
Print:
def pretty_print(dict_or_list, print_spaces=0):
"""
Give it a dict key (to get values of),
it will return you a pretty for print version
of a dict or a list of dicts you gave.
:param dict_or_list:
:param print_spaces:
:return:
"""
pretty_text = ""
if isinstance(dict_or_list, dict):
for key in dict_or_list:
key_value = dict_or_list[key]
if isinstance(key_value, dict):
key_value = pretty_print(key_value, print_spaces + 1)
pretty_text += "\t" * print_spaces + "{}:\n{}\n".format(key, key_value)
elif isinstance(key_value, list) or isinstance(key_value, set):
pretty_text += "\t" * print_spaces + "{}:\n".format(key)
for element in key_value:
if isinstance(element, dict) or isinstance(element, list) or isinstance(element, set):
pretty_text += pretty_print(element, print_spaces + 1)
else:
pretty_text += "\t" * (print_spaces + 1) + "{}\n".format(element)
else:
pretty_text += "\t" * print_spaces + "{}: {}\n".format(key, key_value)
elif isinstance(dict_or_list, list) or isinstance(dict_or_list, set):
for element in dict_or_list:
if isinstance(element, dict) or isinstance(element, list) or isinstance(element, set):
pretty_text += pretty_print(element, print_spaces + 1)
else:
pretty_text += "\t" * print_spaces + "{}\n".format(element)
else:
pretty_text += str(dict_or_list)
if print_spaces == 0:
print(pretty_text)
return pretty_text
You can use jsonpipe if you just need the output (and more comfortable with command line):
cat bookmarks.json | jsonpipe |grep uri

Categories

Resources