explode dictionary keys in a list - python

I want to explode the keys in a Python dict such that if I have as input:
d = {"first":
{"second_a": 3, "second_b": 4},
"another": 2,
"anotherone": {"third_a": {"last": 3}}
}
I will get as output a list of the exploded keys:
["first.second_a",
"first.second_b",
"another",
"anotherone.third_a.last"
]
Do you know any utility function that does this?
Thank you!

If your dictionary contains only nested dictionaries you can do for example:
d = {
"first": {"second_a": 3, "second_b": 4},
"another": 2,
"anotherone": {"third_a": {"last": 3}},
}
def flatten(d, prefix=""):
for k, v in d.items():
if isinstance(v, dict):
yield from flatten(v, prefix + "." + k)
else:
yield (prefix + "." + k).strip(".")
print(list(flatten(d)))
Prints:
['first.second_a',
'first.second_b',
'another',
'anotherone.third_a.last']

Related

Unpack nodes of a dict for each slash in the key

I have a dict:
a = {
"group_a/category_a/metric_a": 5,
"group_a/category_a/metric_b": 4,
"group_a/category_b/metric_a": 3,
"group_a/category_b/metric_b": 2,
"group_b/category_a/metric_d": 1
}
I would like to unpack the nodes by creating a nested view, seperated by each slash, where the outcome looks as follows:
b = {
"group_a": {
"category_a": {
"metric_a": 5,
"metric_b": 4
},
"category_b": {
"metric_a": 3,
"metric_b": 2
},
"group_b": {
"category_a": {
"metric_d": 1
}
}
}
How can we go from a to b? I encountered this problem when trying to publish the dict above to Firebase as the nodes group_a/category_a/metric_a is not accepted, whereas a dict that is nested is allowed.
Loop through, split each key into a list of keys, create sub-dictionaries as required...
from pprint import pprint
a = {
"group_a/category_a/metric_a": 5,
"group_a/category_a/metric_b": 4,
"group_a/category_b/metric_a": 3,
"group_a/category_b/metric_b": 2,
"group_b/category_a/metric_d": 1
}
b = {}
for k, v in a.items():
dct = b
keys = k.split("/")
for key in keys[:-1]:
if key not in dct:
dct[key] = {}
dct = dct[key]
dct[keys[-1]] = v
pprint(b)
Gives:
{'group_a': {'category_a': {'metric_a': 5, 'metric_b': 4},
'category_b': {'metric_a': 3, 'metric_b': 2}},
'group_b': {'category_a': {'metric_d': 1}}}

Turn dict with duplicate keys into list containing these keys

I receive a response I have no control over from an API. Using requests response.json() will filter out duplicate keys. So I would need to turn this response into a list where each key is an element in that list: What I get now:
{
"user": {
//...
},
"user": {
//...
},
//...
}
What I need:
{
"users": [
{
"user": {
//...
}
},
{
"user": {
//...
}
},
//...
]
}
This way JSON won't filter out any of the results, and I can loop through users.
Okay, let me have a try by method used in Python json parser allow duplicate keys
All we should do is handle the pairs_list by ourself.
from json import JSONDecoder
def parse_object_pairs(pairs):
return pairs
data = """
{"foo": {"key": 2, "key": 3}, "foo": 4, "foo": 23}
"""
decoder = JSONDecoder(object_pairs_hook=parse_object_pairs)
pairs_list = decoder.decode(data)
# the pairs_list is the real thing which we can use
aggre_key = 's'
def recusive_handle(pairs_list):
dct = {}
for k, v in pairs_list:
if v and isinstance(v, list) and isinstance(v[0], tuple):
v = recusive_handle(v)
if k + aggre_key in dct:
dct[k + aggre_key].append({k: v})
elif k in dct:
first_dict = {k: dct.pop(k)}
dct[k + aggre_key] = [first_dict, {k: v}]
else:
dct[k] = v
return dct
print(recusive_handle(pairs_list))
output:
{'foos': [{'foo': {'keys': [{'key': 2}, {'key': 3}]}}, {'foo': {'bar': 4}}, {'foo': 23}]}

How to convert python nested dict to non-nested dict?

How to convert below src dict (nested dict)
{
'a':{'b':1, 'c':{'d':2}},
'b':3,
'c':{'d':4, 'a':5}
}
to dst dict (not nested) below?
{
'a.b':1,
'a.c.d':2,
'b':3,
'c.d':4,
'c.a':5
}
The src dict is nested dict. And the dst dict is not nested dict.
Any easy method to do this convention?
This is python package for flatten dictionary. You can use this
https://pypi.org/project/flatten-dict/
Implementation:
from flatten_dict import flatten
nested = {'a': {'b': 1, 'c': {'d': 2}},
'b': 3,
'c': {'d': 4, 'a': 5}}
flat = flatten(nested, reducer=lambda k1, k2: k2 if k1 is None else k1 + '.' + k2)
print(flat)
# {'a.b': 1, 'a.c.d': 2, 'b': 3, 'c.d': 4, 'c.a': 5}
There are multiple ways. Here is one way to do it.
nested_dict = {
'a': {
'b': 1,
'c': {
'd': 2
}
},
'b': 3,
'c': {
'd': 4,
'a': 5
},
}
flatten_dict = {}
def flatten_the_nested(nested_dict, parent_key=''):
for key, value in nested_dict.items():
new_key = parent_key + '.' + key if parent_key is not '' else key
if isinstance(value, dict):
flatten_the_nested(value, new_key)
else:
flatten_dict[new_key] = value
return flatten_dict
print(flatten_the_nested(nested_dict, ''))
You will get the following result.
{'c.d': 4, 'c.a': 5, 'b': 3, 'a.b': 1, 'a.c.d': 2}
Or if you want to use some library then you can use https://pypi.org/project/flatten-dict/
Well, its not complicated. In just a few minutes I got the following:
def flatten(dic, prefix = ""):
if prefix is not "":
prefix = prefix + "."
result = {}
for k, v in dic.iteritems():
if isinstance(v, dict):
for k1, v1 in flatten(v, prefix + k).iteritems():
result[k1] = v1
else:
result[prefix + k] = v
return result
I have not thoroughly tested this algorithm, though.

Search nested dictionary values in a list and return whole nested dictionary that contains searched value [duplicate]

Assume I have this:
[
{"name": "Tom", "age": 10},
{"name": "Mark", "age": 5},
{"name": "Pam", "age": 7}
]
and by searching "Pam" as name, I want to retrieve the related dictionary: {name: "Pam", age: 7}
How to achieve this ?
You can use a generator expression:
>>> dicts = [
... { "name": "Tom", "age": 10 },
... { "name": "Mark", "age": 5 },
... { "name": "Pam", "age": 7 },
... { "name": "Dick", "age": 12 }
... ]
>>> next(item for item in dicts if item["name"] == "Pam")
{'age': 7, 'name': 'Pam'}
If you need to handle the item not being there, then you can do what user Matt suggested in his comment and provide a default using a slightly different API:
next((item for item in dicts if item["name"] == "Pam"), None)
And to find the index of the item, rather than the item itself, you can enumerate() the list:
next((i for i, item in enumerate(dicts) if item["name"] == "Pam"), None)
This looks to me the most pythonic way:
people = [
{'name': "Tom", 'age': 10},
{'name': "Mark", 'age': 5},
{'name': "Pam", 'age': 7}
]
filter(lambda person: person['name'] == 'Pam', people)
result (returned as a list in Python 2):
[{'age': 7, 'name': 'Pam'}]
Note: In Python 3, a filter object is returned. So the python3 solution would be:
list(filter(lambda person: person['name'] == 'Pam', people))
#Frédéric Hamidi's answer is great. In Python 3.x the syntax for .next() changed slightly. Thus a slight modification:
>>> dicts = [
{ "name": "Tom", "age": 10 },
{ "name": "Mark", "age": 5 },
{ "name": "Pam", "age": 7 },
{ "name": "Dick", "age": 12 }
]
>>> next(item for item in dicts if item["name"] == "Pam")
{'age': 7, 'name': 'Pam'}
As mentioned in the comments by #Matt, you can add a default value as such:
>>> next((item for item in dicts if item["name"] == "Pam"), False)
{'name': 'Pam', 'age': 7}
>>> next((item for item in dicts if item["name"] == "Sam"), False)
False
>>>
You can use a list comprehension:
def search(name, people):
return [element for element in people if element['name'] == name]
I tested various methods to go through a list of dictionaries and return the dictionaries where key x has a certain value.
Results:
Speed: list comprehension > generator expression >> normal list iteration >>> filter.
All scale linear with the number of dicts in the list (10x list size -> 10x time).
The keys per dictionary does not affect speed significantly for large amounts (thousands) of keys. Please see this graph I calculated: https://imgur.com/a/quQzv (method names see below).
All tests done with Python 3.6.4, W7x64.
from random import randint
from timeit import timeit
list_dicts = []
for _ in range(1000): # number of dicts in the list
dict_tmp = {}
for i in range(10): # number of keys for each dict
dict_tmp[f"key{i}"] = randint(0,50)
list_dicts.append( dict_tmp )
def a():
# normal iteration over all elements
for dict_ in list_dicts:
if dict_["key3"] == 20:
pass
def b():
# use 'generator'
for dict_ in (x for x in list_dicts if x["key3"] == 20):
pass
def c():
# use 'list'
for dict_ in [x for x in list_dicts if x["key3"] == 20]:
pass
def d():
# use 'filter'
for dict_ in filter(lambda x: x['key3'] == 20, list_dicts):
pass
Results:
1.7303 # normal list iteration
1.3849 # generator expression
1.3158 # list comprehension
7.7848 # filter
people = [
{'name': "Tom", 'age': 10},
{'name': "Mark", 'age': 5},
{'name': "Pam", 'age': 7}
]
def search(name):
for p in people:
if p['name'] == name:
return p
search("Pam")
Have you ever tried out the pandas package? It's perfect for this kind of search task and optimized too.
import pandas as pd
listOfDicts = [
{"name": "Tom", "age": 10},
{"name": "Mark", "age": 5},
{"name": "Pam", "age": 7}
]
# Create a data frame, keys are used as column headers.
# Dict items with the same key are entered into the same respective column.
df = pd.DataFrame(listOfDicts)
# The pandas dataframe allows you to pick out specific values like so:
df2 = df[ (df['name'] == 'Pam') & (df['age'] == 7) ]
# Alternate syntax, same thing
df2 = df[ (df.name == 'Pam') & (df.age == 7) ]
I've added a little bit of benchmarking below to illustrate pandas' faster runtimes on a larger scale i.e. 100k+ entries:
setup_large = 'dicts = [];\
[dicts.extend(({ "name": "Tom", "age": 10 },{ "name": "Mark", "age": 5 },\
{ "name": "Pam", "age": 7 },{ "name": "Dick", "age": 12 })) for _ in range(25000)];\
from operator import itemgetter;import pandas as pd;\
df = pd.DataFrame(dicts);'
setup_small = 'dicts = [];\
dicts.extend(({ "name": "Tom", "age": 10 },{ "name": "Mark", "age": 5 },\
{ "name": "Pam", "age": 7 },{ "name": "Dick", "age": 12 }));\
from operator import itemgetter;import pandas as pd;\
df = pd.DataFrame(dicts);'
method1 = '[item for item in dicts if item["name"] == "Pam"]'
method2 = 'df[df["name"] == "Pam"]'
import timeit
t = timeit.Timer(method1, setup_small)
print('Small Method LC: ' + str(t.timeit(100)))
t = timeit.Timer(method2, setup_small)
print('Small Method Pandas: ' + str(t.timeit(100)))
t = timeit.Timer(method1, setup_large)
print('Large Method LC: ' + str(t.timeit(100)))
t = timeit.Timer(method2, setup_large)
print('Large Method Pandas: ' + str(t.timeit(100)))
#Small Method LC: 0.000191926956177
#Small Method Pandas: 0.044392824173
#Large Method LC: 1.98827004433
#Large Method Pandas: 0.324505090714
To add just a tiny bit to #FrédéricHamidi.
In case you are not sure a key is in the the list of dicts, something like this would help:
next((item for item in dicts if item.get("name") and item["name"] == "Pam"), None)
Simply using list comprehension:
[i for i in dct if i['name'] == 'Pam'][0]
Sample code:
dct = [
{'name': 'Tom', 'age': 10},
{'name': 'Mark', 'age': 5},
{'name': 'Pam', 'age': 7}
]
print([i for i in dct if i['name'] == 'Pam'][0])
> {'age': 7, 'name': 'Pam'}
You can achieve this with the usage of filter and next methods in Python.
filter method filters the given sequence and returns an iterator.
next method accepts an iterator and returns the next element in the list.
So you can find the element by,
my_dict = [
{"name": "Tom", "age": 10},
{"name": "Mark", "age": 5},
{"name": "Pam", "age": 7}
]
next(filter(lambda obj: obj.get('name') == 'Pam', my_dict), None)
and the output is,
{'name': 'Pam', 'age': 7}
Note: The above code will return None incase if the name we are searching is not found.
One simple way using list comprehensions is , if l is the list
l = [
{"name": "Tom", "age": 10},
{"name": "Mark", "age": 5},
{"name": "Pam", "age": 7}
]
then
[d['age'] for d in l if d['name']=='Tom']
def dsearch(lod, **kw):
return filter(lambda i: all((i[k] == v for (k, v) in kw.items())), lod)
lod=[{'a':33, 'b':'test2', 'c':'a.ing333'},
{'a':22, 'b':'ihaha', 'c':'fbgval'},
{'a':33, 'b':'TEst1', 'c':'s.ing123'},
{'a':22, 'b':'ihaha', 'c':'dfdvbfjkv'}]
list(dsearch(lod, a=22))
[{'a': 22, 'b': 'ihaha', 'c': 'fbgval'},
{'a': 22, 'b': 'ihaha', 'c': 'dfdvbfjkv'}]
list(dsearch(lod, a=22, b='ihaha'))
[{'a': 22, 'b': 'ihaha', 'c': 'fbgval'},
{'a': 22, 'b': 'ihaha', 'c': 'dfdvbfjkv'}]
list(dsearch(lod, a=22, c='fbgval'))
[{'a': 22, 'b': 'ihaha', 'c': 'fbgval'}]
This is a general way of searching a value in a list of dictionaries:
def search_dictionaries(key, value, list_of_dictionaries):
return [element for element in list_of_dictionaries if element[key] == value]
dicts=[
{"name": "Tom", "age": 10},
{"name": "Mark", "age": 5},
{"name": "Pam", "age": 7}
]
from collections import defaultdict
dicts_by_name=defaultdict(list)
for d in dicts:
dicts_by_name[d['name']]=d
print dicts_by_name['Tom']
#output
#>>>
#{'age': 10, 'name': 'Tom'}
names = [{'name':'Tom', 'age': 10}, {'name': 'Mark', 'age': 5}, {'name': 'Pam', 'age': 7}]
resultlist = [d for d in names if d.get('name', '') == 'Pam']
first_result = resultlist[0]
This is one way...
You can try this:
''' lst: list of dictionaries '''
lst = [{"name": "Tom", "age": 10}, {"name": "Mark", "age": 5}, {"name": "Pam", "age": 7}]
search = raw_input("What name: ") #Input name that needs to be searched (say 'Pam')
print [ lst[i] for i in range(len(lst)) if(lst[i]["name"]==search) ][0] #Output
>>> {'age': 7, 'name': 'Pam'}
Put the accepted answer in a function to easy re-use
def get_item(collection, key, target):
return next((item for item in collection if item[key] == target), None)
Or also as a lambda
get_item_lambda = lambda collection, key, target : next((item for item in collection if item[key] == target), None)
Result
key = "name"
target = "Pam"
print(get_item(target_list, key, target))
print(get_item_lambda(target_list, key, target))
#{'name': 'Pam', 'age': 7}
#{'name': 'Pam', 'age': 7}
In case the key may not be in the target dictionary use dict.get and avoid KeyError
def get_item(collection, key, target):
return next((item for item in collection if item.get(key, None) == target), None)
get_item_lambda = lambda collection, key, target : next((item for item in collection if item.get(key, None) == target), None)
My first thought would be that you might want to consider creating a dictionary of these dictionaries ... if, for example, you were going to be searching it more a than small number of times.
However that might be a premature optimization. What would be wrong with:
def get_records(key, store=dict()):
'''Return a list of all records containing name==key from our store
'''
assert key is not None
return [d for d in store if d['name']==key]
Most (if not all) implementations proposed here have two flaws:
They assume only one key to be passed for searching, while it may be interesting to have more for complex dict
They assume all keys passed for searching exist in the dicts, hence they don't deal correctly with KeyError occuring when it is not.
An updated proposition:
def find_first_in_list(objects, **kwargs):
return next((obj for obj in objects if
len(set(obj.keys()).intersection(kwargs.keys())) > 0 and
all([obj[k] == v for k, v in kwargs.items() if k in obj.keys()])),
None)
Maybe not the most pythonic, but at least a bit more failsafe.
Usage:
>>> obj1 = find_first_in_list(list_of_dict, name='Pam', age=7)
>>> obj2 = find_first_in_list(list_of_dict, name='Pam', age=27)
>>> obj3 = find_first_in_list(list_of_dict, name='Pam', address='nowhere')
>>>
>>> print(obj1, obj2, obj3)
{"name": "Pam", "age": 7}, None, {"name": "Pam", "age": 7}
The gist.
Here is a comparison using iterating throuhg list, using filter+lambda or refactoring(if needed or valid to your case) your code to dict of dicts rather than list of dicts
import time
# Build list of dicts
list_of_dicts = list()
for i in range(100000):
list_of_dicts.append({'id': i, 'name': 'Tom'})
# Build dict of dicts
dict_of_dicts = dict()
for i in range(100000):
dict_of_dicts[i] = {'name': 'Tom'}
# Find the one with ID of 99
# 1. iterate through the list
lod_ts = time.time()
for elem in list_of_dicts:
if elem['id'] == 99999:
break
lod_tf = time.time()
lod_td = lod_tf - lod_ts
# 2. Use filter
f_ts = time.time()
x = filter(lambda k: k['id'] == 99999, list_of_dicts)
f_tf = time.time()
f_td = f_tf- f_ts
# 3. find it in dict of dicts
dod_ts = time.time()
x = dict_of_dicts[99999]
dod_tf = time.time()
dod_td = dod_tf - dod_ts
print 'List of Dictionries took: %s' % lod_td
print 'Using filter took: %s' % f_td
print 'Dict of Dicts took: %s' % dod_td
And the output is this:
List of Dictionries took: 0.0099310874939
Using filter took: 0.0121960639954
Dict of Dicts took: 4.05311584473e-06
Conclusion:
Clearly having a dictionary of dicts is the most efficient way to be able to search in those cases, where you know say you will be searching by id's only.
interestingly using filter is the slowest solution.
I would create a dict of dicts like so:
names = ["Tom", "Mark", "Pam"]
ages = [10, 5, 7]
my_d = {}
for i, j in zip(names, ages):
my_d[i] = {"name": i, "age": j}
or, using exactly the same info as in the posted question:
info_list = [{"name": "Tom", "age": 10}, {"name": "Mark", "age": 5}, {"name": "Pam", "age": 7}]
my_d = {}
for d in info_list:
my_d[d["name"]] = d
Then you could do my_d["Pam"] and get {"name": "Pam", "age": 7}
Ducks will be a lot faster than a list comprehension or filter. It builds an index on your objects so lookups don't need to scan every item.
pip install ducks
from ducks import Dex
dicts = [
{"name": "Tom", "age": 10},
{"name": "Mark", "age": 5},
{"name": "Pam", "age": 7}
]
# Build the index
dex = Dex(dicts, {'name': str, 'age': int})
# Find matching objects
dex[{'name': 'Pam', 'age': 7}]
Result: [{'name': 'Pam', 'age': 7}]
You have to go through all elements of the list. There is not a shortcut!
Unless somewhere else you keep a dictionary of the names pointing to the items of the list, but then you have to take care of the consequences of popping an element from your list.
I found this thread when I was searching for an answer to the same
question. While I realize that it's a late answer, I thought I'd
contribute it in case it's useful to anyone else:
def find_dict_in_list(dicts, default=None, **kwargs):
"""Find first matching :obj:`dict` in :obj:`list`.
:param list dicts: List of dictionaries.
:param dict default: Optional. Default dictionary to return.
Defaults to `None`.
:param **kwargs: `key=value` pairs to match in :obj:`dict`.
:returns: First matching :obj:`dict` from `dicts`.
:rtype: dict
"""
rval = default
for d in dicts:
is_found = False
# Search for keys in dict.
for k, v in kwargs.items():
if d.get(k, None) == v:
is_found = True
else:
is_found = False
break
if is_found:
rval = d
break
return rval
if __name__ == '__main__':
# Tests
dicts = []
keys = 'spam eggs shrubbery knight'.split()
start = 0
for _ in range(4):
dct = {k: v for k, v in zip(keys, range(start, start+4))}
dicts.append(dct)
start += 4
# Find each dict based on 'spam' key only.
for x in range(len(dicts)):
spam = x*4
assert find_dict_in_list(dicts, spam=spam) == dicts[x]
# Find each dict based on 'spam' and 'shrubbery' keys.
for x in range(len(dicts)):
spam = x*4
assert find_dict_in_list(dicts, spam=spam, shrubbery=spam+2) == dicts[x]
# Search for one correct key, one incorrect key:
for x in range(len(dicts)):
spam = x*4
assert find_dict_in_list(dicts, spam=spam, shrubbery=spam+1) is None
# Search for non-existent dict.
for x in range(len(dicts)):
spam = x+100
assert find_dict_in_list(dicts, spam=spam) is None

Subtract dict A from dict B (deep del)?

If I have a deeply nested dict is there a built-in way to subtract/remove list of "paths" (eg: keyA.keyB.key1, keyA.keyC.key2, etc) or a the keys of a second dict from the original dict? Or maybe there is a common module which has functionality like this?
Here's a suggestion:
D = { "keyA": {
"keyB" : {
"keyC" : 42,
"keyD": 13
},
"keyE" : 55
}
}
def remove_path(dictionary, path):
for node in path[:-1]:
dictionary = dictionary[node]
del dictionary[path[-1]]
remove_path(D, ["keyA", "keyB", "keyD"])
print D # prints {'keyA': {'keyB': {'keyC': 42}, 'keyE': 55}}
You'll probably want to introduce some error checking, too.
Just in case the other answers aren't what you're looking for, here's one that subtracts one dictionary from another.
def subtract(a, b):
""" Remove the keys in b from a. """
for k in b:
if k in a:
if isinstance(b[k], dict):
subtract(a[k], b[k])
else:
del a[k]
Another solution:
d = {
'A' : {
'C' : {
'D' : {
'E' : 4,
},
'F' : 5,
},
},
'B' : 2,
}
def DeepDictDel(path, dict):
for key in path.split('.'):
owner = dict
dict = dict[key]
del owner[key]
print d # prints {'A': {'C': {'D': {'E': 4}, 'F': 5}}, 'B': 2}
DeepDictDel('A.C.D', d)
print d # prints {'A': {'C': {'F': 5}}, 'B': 2}

Categories

Resources