How to check if a string contains a dictionary - python

I want to recursively parse all values in a dict that are strings with ast.literal_eval(value) but not do that eval if the string doesn't contain a dict. I want this, because I have a string in a dict that is a dict in itself and I would like the value to be a dict. Best to give an example
my_dict = {'a': 42, 'b': "my_string", 'c': "{'d': 33, 'e': 'another string'}"}
Now I don't want a do to ast.literal_eval(my_dict['c']) I want a generic solution where I can do convert_to_dict(my_dict)
I wanted to write my own method, but I don't know how to check if a string contains a dict, and then ast.literal_eval will fail, hence the question.

You can check if you have a dict after using literal_eval and reassign:
from ast import literal_eval
def reassign(d):
for k, v in d.items():
try:
evald = literal_eval(v)
if isinstance(evald, dict):
d[k] = evald
except ValueError:
pass
Just pass in the dict:
In [2]: my_dict = {'a': 42, 'b': "my_string", 'c': "{'d': 33, 'e': 'another stri
...: ng'}"}
In [3]: reassign(my_dict)
In [4]: my_dict
Out[4]: {'a': 42, 'b': 'my_string', 'c': {'d': 33, 'e': 'another string'}}
In [5]: my_dict = {'a': '42', 'b': "my_string", '5': "{'d': 33, 'e': 'another st
...: ring', 'other_dict':{'foo':'bar'}}"}
In [6]: reassign(my_dict)
In [7]: my_dict
Out[7]:
{'5': {'d': 33, 'e': 'another string', 'other_dict': {'foo': 'bar'}},
'a': '42',
'b': 'my_string'}
You should also be aware that if you had certain other objects in the dict like datetime objects etc.. then literal_eval would fail so it really depends on what your dict can contain as to whether it will work or not.
If you need a recursive approach, all you need is to call reassign on the new dict.
def reassign(d):
for k, v in d.items():
try:
evald = literal_eval(v)
if isinstance(evald, dict):
d[k] = evald
reassign(evald)
except ValueError:
pass
And again just pass the dict:
In [10]: my_dict = {'a': 42, 'b': "my_string", 'c': "{'d': 33, 'e': \"{'f' : 64}
...: \"}"}
In [11]: reassign(my_dict)
In [12]: my_dict
Out[12]: {'a': 42, 'b': 'my_string', 'c': {'d': 33, 'e': {'f': 64}}}
And if you want a new dict:
from ast import literal_eval
from copy import deepcopy
def reassign(d):
for k, v in d.items():
try:
evald = literal_eval(v)
if isinstance(evald, dict):
yield k, dict(reassign(evald))
except ValueError:
yield k, deepcopy(v)
Which will give you a new dict:
In [17]: my_dict = {'a': [1, 2, [3]], 'b': "my_string", 'c': "{'d': 33, 'e': \"{
...: 'f' : 64}\"}"}
In [18]: new = dict(reassign(my_dict))
In [19]: my_dict["a"][-1].append(4)
In [20]: new
Out[20]: {'a': [1, 2, [3]], 'b': 'my_string', 'c': {'d': 33, 'e': {'f': 64}}}
In [21]: my_dict
Out[21]:
{'a': [1, 2, [3, 4]],
'b': 'my_string',
'c': '{\'d\': 33, \'e\': "{\'f\' : 64}"}'}
You need to make sure to deepcopy objects or you won't get a true independent copy of the dict when you have nested object like the list of lists above.

Here is a proposition that handles recursion. As it was suggested in the comments, it tries to eval everything then check if the result is a dict, if it is we recurse, else we skip the value . I sligthly altered the initial dict to show that it hanldes recusion fine :
import ast
my_dict = {'a': 42, 'b': "my_string", 'c': "{'d': 33, 'e': \"{'f' : 64}\"}"}
def recursive_dict_eval(old_dict):
new_dict = old_dict.copy()
for key,value in old_dict.items():
try:
evaled_value=ast.literal_eval(value)
assert isinstance(evaled_value,dict)
new_dict[key]=recursive_dict_eval(evaled_value)
except (SyntaxError, ValueError, AssertionError):
#SyntaxError, ValueError are for the literal_eval exceptions
pass
return new_dict
print(my_dict)
print(recursive_dict_eval(my_dict))
Output:
{'a': 42, 'b': 'my_string', 'c': '{\'d\': 33, \'e\': "{\'f\' : 64}"}'}
{'a': 42, 'b': 'my_string', 'c': {'e': {'f': 64}, 'd': 33}}

The general idea referenced in my above comment is to run thru the dictionary and try and evaluate. Store that in a local variable, and then check if that evaluated expression is a dictionary. If so, then reassign it to the passed input. If not, leave it alone.
my_dict = {'a': 42, 'b': "my_string", 'c': "{'d': 33, 'e': 'another string'}"}
def convert_to_dict(d):
for key, val in d.items():
try:
check = ast.literal_eval(val)
except:
continue
if isinstance(check, dict):
d[key] = check
return d
convert_to_dict(my_dict)

The other answers were really good and lead me to the right solution, but the previous accepted answer had a bug. Here is my working solution:
def recursive_dict_eval(myDict):
for key,value in myDict.items():
try:
if(isinstance(value, dict)):
recursive_dict_eval(value)
evaled_value=ast.literal_eval(value)
assert isinstance(evaled_value,dict)
myDict[key]=recursive_dict_eval(evaled_value)
except (SyntaxError, ValueError, AssertionError):
#SyntaxError, ValueError are for the literal_eval exceptions
pass
return myDict

If you need to handle nested str defining dict, json.loads with an object_hook might work for you:
import json
def convert_subdicts(d):
for k, v in d.items():
try:
# Try to decode a dict
newv = json.loads(v, object_hook=convert_subdicts)
except Exception:
continue
else:
if isinstance(newv, dict):
d[k] = newv # Replace with decoded dict
return d
origdict = {'a': 42, 'b': "my_string", 'c': "{'d': 33, 'e': 'another string'}"}
newdict = convert_subdicts(origdict.copy()) # Omit .copy() if mutating origdict okay
That should recursively handle the case where the contained dicts might contain strs values that define subdicts. If you don't need to handle that case, you can omit the use of the object_hook, or replace json.loads entirely with ast.literal_eval.

Related

Python3: Remove duplicates from the dictionary list [duplicate]

I have a list of dicts, and I'd like to remove the dicts with identical key and value pairs.
For this list: [{'a': 123}, {'b': 123}, {'a': 123}]
I'd like to return this: [{'a': 123}, {'b': 123}]
Another example:
For this list: [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}, {'a': 123, 'b': 1234}]
I'd like to return this: [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}]
Try this:
[dict(t) for t in {tuple(d.items()) for d in l}]
The strategy is to convert the list of dictionaries to a list of tuples where the tuples contain the items of the dictionary. Since the tuples can be hashed, you can remove duplicates using set (using a set comprehension here, older python alternative would be set(tuple(d.items()) for d in l)) and, after that, re-create the dictionaries from tuples with dict.
where:
l is the original list
d is one of the dictionaries in the list
t is one of the tuples created from a dictionary
Edit: If you want to preserve ordering, the one-liner above won't work since set won't do that. However, with a few lines of code, you can also do that:
l = [{'a': 123, 'b': 1234},
{'a': 3222, 'b': 1234},
{'a': 123, 'b': 1234}]
seen = set()
new_l = []
for d in l:
t = tuple(d.items())
if t not in seen:
seen.add(t)
new_l.append(d)
print new_l
Example output:
[{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}]
Note: As pointed out by #alexis it might happen that two dictionaries with the same keys and values, don't result in the same tuple. That could happen if they go through a different adding/removing keys history. If that's the case for your problem, then consider sorting d.items() as he suggests.
Another one-liner based on list comprehensions:
>>> d = [{'a': 123}, {'b': 123}, {'a': 123}]
>>> [i for n, i in enumerate(d) if i not in d[n + 1:]]
[{'b': 123}, {'a': 123}]
Here since we can use dict comparison, we only keep the elements that are not in the rest of the initial list (this notion is only accessible through the index n, hence the use of enumerate).
If using a third-party package would be okay then you could use iteration_utilities.unique_everseen:
>>> from iteration_utilities import unique_everseen
>>> l = [{'a': 123}, {'b': 123}, {'a': 123}]
>>> list(unique_everseen(l))
[{'a': 123}, {'b': 123}]
It preserves the order of the original list and ut can also handle unhashable items like dictionaries by falling back on a slower algorithm (O(n*m) where n are the elements in the original list and m the unique elements in the original list instead of O(n)). In case both keys and values are hashable you can use the key argument of that function to create hashable items for the "uniqueness-test" (so that it works in O(n)).
In the case of a dictionary (which compares independent of order) you need to map it to another data-structure that compares like that, for example frozenset:
>>> list(unique_everseen(l, key=lambda item: frozenset(item.items())))
[{'a': 123}, {'b': 123}]
Note that you shouldn't use a simple tuple approach (without sorting) because equal dictionaries don't necessarily have the same order (even in Python 3.7 where insertion order - not absolute order - is guaranteed):
>>> d1 = {1: 1, 9: 9}
>>> d2 = {9: 9, 1: 1}
>>> d1 == d2
True
>>> tuple(d1.items()) == tuple(d2.items())
False
And even sorting the tuple might not work if the keys aren't sortable:
>>> d3 = {1: 1, 'a': 'a'}
>>> tuple(sorted(d3.items()))
TypeError: '<' not supported between instances of 'str' and 'int'
Benchmark
I thought it might be useful to see how the performance of these approaches compares, so I did a small benchmark. The benchmark graphs are time vs. list-size based on a list containing no duplicates (that was chosen arbitrarily, the runtime doesn't change significantly if I add some or lots of duplicates). It's a log-log plot so the complete range is covered.
The absolute times:
The timings relative to the fastest approach:
The second approach from thefourtheye is fastest here. The unique_everseen approach with the key function is on the second place, however it's the fastest approach that preserves order. The other approaches from jcollado and thefourtheye are almost as fast. The approach using unique_everseen without key and the solutions from Emmanuel and Scorpil are very slow for longer lists and behave much worse O(n*n) instead of O(n). stpks approach with json isn't O(n*n) but it's much slower than the similar O(n) approaches.
The code to reproduce the benchmarks:
from simple_benchmark import benchmark
import json
from collections import OrderedDict
from iteration_utilities import unique_everseen
def jcollado_1(l):
return [dict(t) for t in {tuple(d.items()) for d in l}]
def jcollado_2(l):
seen = set()
new_l = []
for d in l:
t = tuple(d.items())
if t not in seen:
seen.add(t)
new_l.append(d)
return new_l
def Emmanuel(d):
return [i for n, i in enumerate(d) if i not in d[n + 1:]]
def Scorpil(a):
b = []
for i in range(0, len(a)):
if a[i] not in a[i+1:]:
b.append(a[i])
def stpk(X):
set_of_jsons = {json.dumps(d, sort_keys=True) for d in X}
return [json.loads(t) for t in set_of_jsons]
def thefourtheye_1(data):
return OrderedDict((frozenset(item.items()),item) for item in data).values()
def thefourtheye_2(data):
return {frozenset(item.items()):item for item in data}.values()
def iu_1(l):
return list(unique_everseen(l))
def iu_2(l):
return list(unique_everseen(l, key=lambda inner_dict: frozenset(inner_dict.items())))
funcs = (jcollado_1, Emmanuel, stpk, Scorpil, thefourtheye_1, thefourtheye_2, iu_1, jcollado_2, iu_2)
arguments = {2**i: [{'a': j} for j in range(2**i)] for i in range(2, 12)}
b = benchmark(funcs, arguments, 'list size')
%matplotlib widget
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('ggplot')
mpl.rcParams['figure.figsize'] = '8, 6'
b.plot(relative_to=thefourtheye_2)
For completeness here is the timing for a list containing only duplicates:
# this is the only change for the benchmark
arguments = {2**i: [{'a': 1} for j in range(2**i)] for i in range(2, 12)}
The timings don't change significantly except for unique_everseen without key function, which in this case is the fastest solution. However that's just the best case (so not representative) for that function with unhashable values because it's runtime depends on the amount of unique values in the list: O(n*m) which in this case is just 1 and thus it runs in O(n).
Disclaimer: I'm the author of iteration_utilities.
Other answers would not work if you're operating on nested dictionaries such as deserialized JSON objects. For this case you could use:
import json
set_of_jsons = {json.dumps(d, sort_keys=True) for d in X}
X = [json.loads(t) for t in set_of_jsons]
If you are using Pandas in your workflow, one option is to feed a list of dictionaries directly to the pd.DataFrame constructor. Then use drop_duplicates and to_dict methods for the required result.
import pandas as pd
d = [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}, {'a': 123, 'b': 1234}]
d_unique = pd.DataFrame(d).drop_duplicates().to_dict('records')
print(d_unique)
[{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}]
Sometimes old-style loops are still useful. This code is little longer than jcollado's, but very easy to read:
a = [{'a': 123}, {'b': 123}, {'a': 123}]
b = []
for i in range(len(a)):
if a[i] not in a[i+1:]:
b.append(a[i])
If you want to preserve the Order, then you can do
from collections import OrderedDict
print OrderedDict((frozenset(item.items()),item) for item in data).values()
# [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}]
If the order doesn't matter, then you can do
print {frozenset(item.items()):item for item in data}.values()
# [{'a': 3222, 'b': 1234}, {'a': 123, 'b': 1234}]
Not a universal answer, but if your list happens to be sorted by some key, like this:
l=[{'a': {'b': 31}, 't': 1},
{'a': {'b': 31}, 't': 1},
{'a': {'b': 145}, 't': 2},
{'a': {'b': 25231}, 't': 2},
{'a': {'b': 25231}, 't': 2},
{'a': {'b': 25231}, 't': 2},
{'a': {'b': 112}, 't': 3}]
then the solution is as simple as:
import itertools
result = [a[0] for a in itertools.groupby(l)]
Result:
[{'a': {'b': 31}, 't': 1},
{'a': {'b': 145}, 't': 2},
{'a': {'b': 25231}, 't': 2},
{'a': {'b': 112}, 't': 3}]
Works with nested dictionaries and (obviously) preserves order.
You can use a set, but you need to turn the dicts into a hashable type.
seq = [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}, {'a': 123, 'b': 1234}]
unique = set()
for d in seq:
t = tuple(d.iteritems())
unique.add(t)
Unique now equals
set([(('a', 3222), ('b', 1234)), (('a', 123), ('b', 1234))])
To get dicts back:
[dict(x) for x in unique]
Easiest way, convert each item in the list to string, since dictionary is not hashable. Then you can use set to remove the duplicates.
list_org = [{'a': 123}, {'b': 123}, {'a': 123}]
list_org_updated = [ str(item) for item in list_org]
print(list_org_updated)
["{'a': 123}", "{'b': 123}", "{'a': 123}"]
unique_set = set(list_org_updated)
print(unique_set)
{"{'b': 123}", "{'a': 123}"}
You can use the set, but if you do want a list, then add the following:
import ast
unique_list = [ast.literal_eval(item) for item in unique_set]
print(unique_list)
[{'b': 123}, {'a': 123}]
input_list =[{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}, {'a': 123, 'b': 1234}]
#output required => [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}]
#code
list = [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}, {'a': 123, 'b': 1234}]
empty_list = []
for item in list:
if item not in empty_list:
empty_list.append(item)
print("previous list =",list)
print("Updated list =",empty_list)
#output
previous list = [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}, {'a': 123, 'b': 1234}]
Updated list = [{'a': 123, 'b': 1234}, {'a': 3222, 'b': 1234}]
Here's a quick one-line solution with a doubly-nested list comprehension (based on #Emmanuel 's solution).
This uses a single key (for example, a) in each dict as the primary key, rather than checking if the entire dict matches
[i for n, i in enumerate(list_of_dicts) if i.get(primary_key) not in [y.get(primary_key) for y in list_of_dicts[n + 1:]]]
It's not what OP asked for, but it's what brought me to this thread, so I figured I'd post the solution I ended up with
Not so short but easy to read:
list_of_data = [{'a': 123}, {'b': 123}, {'a': 123}]
list_of_data_uniq = []
for data in list_of_data:
if data not in list_of_data_uniq:
list_of_data_uniq.append(data)
Now, list list_of_data_uniq will have unique dicts.
Remove duplications by custom key:
def remove_duplications(arr, key):
return list({key(x): x for x in arr}.values())
A lot of good examples searching for duplicate values and keys, below is the way we filter out whole dictionary duplicate data in lists. Use dupKeys = [] if your source data is comprised of EXACT formatted dictionaries and looking for duplicates. Otherwise set dupKeys = to the key names of the data you want to not have duplicate entries of, can be 1 to n keys. It aint elegant, but works and is very flexible
import binascii
collected_sensor_data = [{"sensor_id":"nw-180","data":"XXXXXXX"},
{"sensor_id":"nw-163","data":"ZYZYZYY"},
{"sensor_id":"nw-180","data":"XXXXXXX"},
{"sensor_id":"nw-97", "data":"QQQQQZZ"}]
dupKeys = ["sensor_id", "data"]
def RemoveDuplicateDictData(collected_sensor_data, dupKeys):
checkCRCs = []
final_sensor_data = []
if dupKeys == []:
for sensor_read in collected_sensor_data:
ck1 = binascii.crc32(str(sensor_read).encode('utf8'))
if not ck1 in checkCRCs:
final_sensor_data.append(sensor_read)
checkCRCs.append(ck1)
else:
for sensor_read in collected_sensor_data:
tmp = ""
for k in dupKeys:
tmp += str(sensor_read[k])
ck1 = binascii.crc32(tmp.encode('utf8'))
if not ck1 in checkCRCs:
final_sensor_data.append(sensor_read)
checkCRCs.append(ck1)
return final_sensor_data
final_sensor_data = [{"sensor_id":"nw-180","data":"XXXXXXX"},
{"sensor_id":"nw-163","data":"ZYZYZYY"},
{"sensor_id":"nw-97", "data":"QQQQQZZ"}]
If you don't care about scale and crazy performance, simple func:
# Filters dicts with the same value in unique_key
# in: [{'k1': 1}, {'k1': 33}, {'k1': 1}]
# out: [{'k1': 1}, {'k1': 33}]
def remove_dup_dicts(list_of_dicts: list, unique_key) -> list:
unique_values = list()
unique_dicts = list()
for obj in list_of_dicts:
val = obj.get(unique_key)
if val not in unique_values:
unique_values.append(val)
unique_dicts.append(obj)
return unique_dicts

Extract varying levels of nested key value pairs from dictionary [duplicate]

Suppose you have a dictionary like:
{'a': 1,
'c': {'a': 2,
'b': {'x': 5,
'y' : 10}},
'd': [1, 2, 3]}
How would you go about flattening that into something like:
{'a': 1,
'c_a': 2,
'c_b_x': 5,
'c_b_y': 10,
'd': [1, 2, 3]}
Basically the same way you would flatten a nested list, you just have to do the extra work for iterating the dict by key/value, creating new keys for your new dictionary and creating the dictionary at final step.
import collections
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
>>> flatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
For Python >= 3.3, change the import to from collections.abc import MutableMapping to avoid a deprecation warning and change collections.MutableMapping to just MutableMapping.
Or if you are already using pandas, You can do it with json_normalize() like so:
import pandas as pd
d = {'a': 1,
'c': {'a': 2, 'b': {'x': 5, 'y' : 10}},
'd': [1, 2, 3]}
df = pd.json_normalize(d, sep='_')
print(df.to_dict(orient='records')[0])
Output:
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'c_b_y': 10, 'd': [1, 2, 3]}
There are two big considerations that the original poster needs to consider:
Are there keyspace clobbering issues? For example, {'a_b':{'c':1}, 'a':{'b_c':2}} would result in {'a_b_c':???}. The below solution evades the problem by returning an iterable of pairs.
If performance is an issue, does the key-reducer function (which I hereby refer to as 'join') require access to the entire key-path, or can it just do O(1) work at every node in the tree? If you want to be able to say joinedKey = '_'.join(*keys), that will cost you O(N^2) running time. However if you're willing to say nextKey = previousKey+'_'+thisKey, that gets you O(N) time. The solution below lets you do both (since you could merely concatenate all the keys, then postprocess them).
(Performance is not likely an issue, but I'll elaborate on the second point in case anyone else cares: In implementing this, there are numerous dangerous choices. If you do this recursively and yield and re-yield, or anything equivalent which touches nodes more than once (which is quite easy to accidentally do), you are doing potentially O(N^2) work rather than O(N). This is because maybe you are calculating a key a then a_1 then a_1_i..., and then calculating a then a_1 then a_1_ii..., but really you shouldn't have to calculate a_1 again. Even if you aren't recalculating it, re-yielding it (a 'level-by-level' approach) is just as bad. A good example is to think about the performance on {1:{1:{1:{1:...(N times)...{1:SOME_LARGE_DICTIONARY_OF_SIZE_N}...}}}})
Below is a function I wrote flattenDict(d, join=..., lift=...) which can be adapted to many purposes and can do what you want. Sadly it is fairly hard to make a lazy version of this function without incurring the above performance penalties (many python builtins like chain.from_iterable aren't actually efficient, which I only realized after extensive testing of three different versions of this code before settling on this one).
from collections import Mapping
from itertools import chain
from operator import add
_FLAG_FIRST = object()
def flattenDict(d, join=add, lift=lambda x:(x,)):
results = []
def visit(subdict, results, partialKey):
for k,v in subdict.items():
newKey = lift(k) if partialKey==_FLAG_FIRST else join(partialKey,lift(k))
if isinstance(v,Mapping):
visit(v, results, newKey)
else:
results.append((newKey,v))
visit(d, results, _FLAG_FIRST)
return results
To better understand what's going on, below is a diagram for those unfamiliar with reduce(left), otherwise known as "fold left". Sometimes it is drawn with an initial value in place of k0 (not part of the list, passed into the function). Here, J is our join function. We preprocess each kn with lift(k).
[k0,k1,...,kN].foldleft(J)
/ \
... kN
/
J(k0,J(k1,J(k2,k3)))
/ \
/ \
J(J(k0,k1),k2) k3
/ \
/ \
J(k0,k1) k2
/ \
/ \
k0 k1
This is in fact the same as functools.reduce, but where our function does this to all key-paths of the tree.
>>> reduce(lambda a,b:(a,b), range(5))
((((0, 1), 2), 3), 4)
Demonstration (which I'd otherwise put in docstring):
>>> testData = {
'a':1,
'b':2,
'c':{
'aa':11,
'bb':22,
'cc':{
'aaa':111
}
}
}
from pprint import pprint as pp
>>> pp(dict( flattenDict(testData) ))
{('a',): 1,
('b',): 2,
('c', 'aa'): 11,
('c', 'bb'): 22,
('c', 'cc', 'aaa'): 111}
>>> pp(dict( flattenDict(testData, join=lambda a,b:a+'_'+b, lift=lambda x:x) ))
{'a': 1, 'b': 2, 'c_aa': 11, 'c_bb': 22, 'c_cc_aaa': 111}
>>> pp(dict( (v,k) for k,v in flattenDict(testData, lift=hash, join=lambda a,b:hash((a,b))) ))
{1: 12416037344,
2: 12544037731,
11: 5470935132935744593,
22: 4885734186131977315,
111: 3461911260025554326}
Performance:
from functools import reduce
def makeEvilDict(n):
return reduce(lambda acc,x:{x:acc}, [{i:0 for i in range(n)}]+range(n))
import timeit
def time(runnable):
t0 = timeit.default_timer()
_ = runnable()
t1 = timeit.default_timer()
print('took {:.2f} seconds'.format(t1-t0))
>>> pp(makeEvilDict(8))
{7: {6: {5: {4: {3: {2: {1: {0: {0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0}}}}}}}}}
import sys
sys.setrecursionlimit(1000000)
forget = lambda a,b:''
>>> time(lambda: dict(flattenDict(makeEvilDict(10000), join=forget)) )
took 0.10 seconds
>>> time(lambda: dict(flattenDict(makeEvilDict(100000), join=forget)) )
[1] 12569 segmentation fault python
... sigh, don't think that one is my fault...
[unimportant historical note due to moderation issues]
Regarding the alleged duplicate of Flatten a dictionary of dictionaries (2 levels deep) of lists
That question's solution can be implemented in terms of this one by doing sorted( sum(flatten(...),[]) ). The reverse is not possible: while it is true that the values of flatten(...) can be recovered from the alleged duplicate by mapping a higher-order accumulator, one cannot recover the keys. (edit: Also it turns out that the alleged duplicate owner's question is completely different, in that it only deals with dictionaries exactly 2-level deep, though one of the answers on that page gives a general solution.)
If you're using pandas there is a function hidden in pandas.io.json._normalize1 called nested_to_record which does this exactly.
from pandas.io.json._normalize import nested_to_record
flat = nested_to_record(my_dict, sep='_')
1 In pandas versions 0.24.x and older use pandas.io.json.normalize (without the _)
Here is a kind of a "functional", "one-liner" implementation. It is recursive, and based on a conditional expression and a dict comprehension.
def flatten_dict(dd, separator='_', prefix=''):
return { prefix + separator + k if prefix else k : v
for kk, vv in dd.items()
for k, v in flatten_dict(vv, separator, kk).items()
} if isinstance(dd, dict) else { prefix : dd }
Test:
In [2]: flatten_dict({'abc':123, 'hgf':{'gh':432, 'yu':433}, 'gfd':902, 'xzxzxz':{"432":{'0b0b0b':231}, "43234":1321}}, '.')
Out[2]:
{'abc': 123,
'gfd': 902,
'hgf.gh': 432,
'hgf.yu': 433,
'xzxzxz.432.0b0b0b': 231,
'xzxzxz.43234': 1321}
Not exactly what the OP asked, but lots of folks are coming here looking for ways to flatten real-world nested JSON data which can have nested key-value json objects and arrays and json objects inside the arrays and so on. JSON doesn't include tuples, so we don't have to fret over those.
I found an implementation of the list-inclusion comment by #roneo to the answer posted by #Imran :
https://github.com/ScriptSmith/socialreaper/blob/master/socialreaper/tools.py#L8
import collections
def flatten(dictionary, parent_key=False, separator='.'):
"""
Turn a nested dictionary into a flattened dictionary
:param dictionary: The dictionary to flatten
:param parent_key: The string to prepend to dictionary's keys
:param separator: The string used to separate flattened keys
:return: A flattened dictionary
"""
items = []
for key, value in dictionary.items():
new_key = str(parent_key) + separator + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten(value, new_key, separator).items())
elif isinstance(value, list):
for k, v in enumerate(value):
items.extend(flatten({str(k): v}, new_key).items())
else:
items.append((new_key, value))
return dict(items)
Test it:
flatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3] })
>> {'a': 1, 'c.a': 2, 'c.b.x': 5, 'c.b.y': 10, 'd.0': 1, 'd.1': 2, 'd.2': 3}
Annd that does the job I need done: I throw any complicated json at this and it flattens it out for me.
All credits to https://github.com/ScriptSmith .
Code:
test = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}
def parse_dict(init, lkey=''):
ret = {}
for rkey,val in init.items():
key = lkey+rkey
if isinstance(val, dict):
ret.update(parse_dict(val, key+'_'))
else:
ret[key] = val
return ret
print(parse_dict(test,''))
Results:
$ python test.py
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
I am using python3.2, update for your version of python.
This is not restricted to dictionaries, but every mapping type that implements .items(). Further ist faster as it avoides an if condition. Nevertheless credits go to Imran:
def flatten(d, parent_key=''):
items = []
for k, v in d.items():
try:
items.extend(flatten(v, '%s%s_' % (parent_key, k)).items())
except AttributeError:
items.append(('%s%s' % (parent_key, k), v))
return dict(items)
How about a functional and performant solution in Python3.5?
from functools import reduce
def _reducer(items, key, val, pref):
if isinstance(val, dict):
return {**items, **flatten(val, pref + key)}
else:
return {**items, pref + key: val}
def flatten(d, pref=''):
return(reduce(
lambda new_d, kv: _reducer(new_d, *kv, pref),
d.items(),
{}
))
This is even more performant:
def flatten(d, pref=''):
return(reduce(
lambda new_d, kv: \
isinstance(kv[1], dict) and \
{**new_d, **flatten(kv[1], pref + kv[0])} or \
{**new_d, pref + kv[0]: kv[1]},
d.items(),
{}
))
In use:
my_obj = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]}
print(flatten(my_obj))
# {'d': [1, 2, 3], 'cby': 10, 'cbx': 5, 'ca': 2, 'a': 1}
If you are a fan of pythonic oneliners:
my_dict={'a': 1,'c': {'a': 2,'b': {'x': 5,'y' : 10}},'d': [1, 2, 3]}
list(pd.json_normalize(my_dict).T.to_dict().values())[0]
returns:
{'a': 1, 'c.a': 2, 'c.b.x': 5, 'c.b.y': 10, 'd': [1, 2, 3]}
You can leave the [0] from the end, if you have a list of dictionaries and not just a single dictionary.
My Python 3.3 Solution using generators:
def flattenit(pyobj, keystring=''):
if type(pyobj) is dict:
if (type(pyobj) is dict):
keystring = keystring + "_" if keystring else keystring
for k in pyobj:
yield from flattenit(pyobj[k], keystring + k)
elif (type(pyobj) is list):
for lelm in pyobj:
yield from flatten(lelm, keystring)
else:
yield keystring, pyobj
my_obj = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]}
#your flattened dictionary object
flattened={k:v for k,v in flattenit(my_obj)}
print(flattened)
# result: {'c_b_y': 10, 'd': [1, 2, 3], 'c_a': 2, 'a': 1, 'c_b_x': 5}
Utilizing recursion, keeping it simple and human readable:
def flatten_dict(dictionary, accumulator=None, parent_key=None, separator="."):
if accumulator is None:
accumulator = {}
for k, v in dictionary.items():
k = f"{parent_key}{separator}{k}" if parent_key else k
if isinstance(v, dict):
flatten_dict(dictionary=v, accumulator=accumulator, parent_key=k)
continue
accumulator[k] = v
return accumulator
Call is simple:
new_dict = flatten_dict(dictionary)
or
new_dict = flatten_dict(dictionary, separator="_")
if we want to change the default separator.
A little breakdown:
When the function is first called, it is called only passing the dictionary we want to flatten. The accumulator parameter is here to support recursion, which we see later. So, we instantiate accumulator to an empty dictionary where we will put all of the nested values from the original dictionary.
if accumulator is None:
accumulator = {}
As we iterate over the dictionary's values, we construct a key for every value. The parent_key argument will be None for the first call, while for every nested dictionary, it will contain the key pointing to it, so we prepend that key.
k = f"{parent_key}{separator}{k}" if parent_key else k
In case the value v the key k is pointing to is a dictionary, the function calls itself, passing the nested dictionary, the accumulator (which is passed by reference, so all changes done to it are done on the same instance) and the key k so that we can construct the concatenated key. Notice the continue statement. We want to skip the next line, outside of the if block, so that the nested dictionary doesn't end up in the accumulator under key k.
if isinstance(v, dict):
flatten_dict(dict=v, accumulator=accumulator, parent_key=k)
continue
So, what do we do in case the value v is not a dictionary? Just put it unchanged inside the accumulator.
accumulator[k] = v
Once we're done we just return the accumulator, leaving the original dictionary argument untouched.
NOTE
This will work only with dictionaries that have strings as keys. It will work with hashable objects implementing the __repr__ method, but will yield unwanted results.
Simple function to flatten nested dictionaries. For Python 3, replace .iteritems() with .items()
def flatten_dict(init_dict):
res_dict = {}
if type(init_dict) is not dict:
return res_dict
for k, v in init_dict.iteritems():
if type(v) == dict:
res_dict.update(flatten_dict(v))
else:
res_dict[k] = v
return res_dict
The idea/requirement was:
Get flat dictionaries with no keeping parent keys.
Example of usage:
dd = {'a': 3,
'b': {'c': 4, 'd': 5},
'e': {'f':
{'g': 1, 'h': 2}
},
'i': 9,
}
flatten_dict(dd)
>> {'a': 3, 'c': 4, 'd': 5, 'g': 1, 'h': 2, 'i': 9}
Keeping parent keys is simple as well.
I was thinking of a subclass of UserDict to automagically flat the keys.
class FlatDict(UserDict):
def __init__(self, *args, separator='.', **kwargs):
self.separator = separator
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
if isinstance(value, dict):
for k1, v1 in FlatDict(value, separator=self.separator).items():
super().__setitem__(f"{key}{self.separator}{k1}", v1)
else:
super().__setitem__(key, value)
‌
The advantages it that keys can be added on the fly, or using standard dict instanciation, without surprise:
‌
>>> fd = FlatDict(
... {
... 'person': {
... 'sexe': 'male',
... 'name': {
... 'first': 'jacques',
... 'last': 'dupond'
... }
... }
... }
... )
>>> fd
{'person.sexe': 'male', 'person.name.first': 'jacques', 'person.name.last': 'dupond'}
>>> fd['person'] = {'name': {'nickname': 'Bob'}}
>>> fd
{'person.sexe': 'male', 'person.name.first': 'jacques', 'person.name.last': 'dupond', 'person.name.nickname': 'Bob'}
>>> fd['person.name'] = {'civility': 'Dr'}
>>> fd
{'person.sexe': 'male', 'person.name.first': 'jacques', 'person.name.last': 'dupond', 'person.name.nickname': 'Bob', 'person.name.civility': 'Dr'}
This is similar to both imran's and ralu's answer. It does not use a generator, but instead employs recursion with a closure:
def flatten_dict(d, separator='_'):
final = {}
def _flatten_dict(obj, parent_keys=[]):
for k, v in obj.iteritems():
if isinstance(v, dict):
_flatten_dict(v, parent_keys + [k])
else:
key = separator.join(parent_keys + [k])
final[key] = v
_flatten_dict(d)
return final
>>> print flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
The answers above work really well. Just thought I'd add the unflatten function that I wrote:
def unflatten(d):
ud = {}
for k, v in d.items():
context = ud
for sub_key in k.split('_')[:-1]:
if sub_key not in context:
context[sub_key] = {}
context = context[sub_key]
context[k.split('_')[-1]] = v
return ud
Note: This doesn't account for '_' already present in keys, much like the flatten counterparts.
Davoud's solution is very nice but doesn't give satisfactory results when the nested dict also contains lists of dicts, but his code be adapted for that case:
def flatten_dict(d):
items = []
for k, v in d.items():
try:
if (type(v)==type([])):
for l in v: items.extend(flatten_dict(l).items())
else:
items.extend(flatten_dict(v).items())
except AttributeError:
items.append((k, v))
return dict(items)
def flatten(unflattened_dict, separator='_'):
flattened_dict = {}
for k, v in unflattened_dict.items():
if isinstance(v, dict):
sub_flattened_dict = flatten(v, separator)
for k2, v2 in sub_flattened_dict.items():
flattened_dict[k + separator + k2] = v2
else:
flattened_dict[k] = v
return flattened_dict
I actually wrote a package called cherrypicker recently to deal with this exact sort of thing since I had to do it so often!
I think the following code would give you exactly what you're after:
from cherrypicker import CherryPicker
dct = {
'a': 1,
'c': {
'a': 2,
'b': {
'x': 5,
'y' : 10
}
},
'd': [1, 2, 3]
}
picker = CherryPicker(dct)
picker.flatten().get()
You can install the package with:
pip install cherrypicker
...and there's more docs and guidance at https://cherrypicker.readthedocs.io.
Other methods may be faster, but the priority of this package is to make such tasks easy. If you do have a large list of objects to flatten though, you can also tell CherryPicker to use parallel processing to speed things up.
here's a solution using a stack. No recursion.
def flatten_nested_dict(nested):
stack = list(nested.items())
ans = {}
while stack:
key, val = stack.pop()
if isinstance(val, dict):
for sub_key, sub_val in val.items():
stack.append((f"{key}_{sub_key}", sub_val))
else:
ans[key] = val
return ans
Using generators:
def flat_dic_helper(prepand,d):
if len(prepand) > 0:
prepand = prepand + "_"
for k in d:
i = d[k]
if isinstance(i, dict):
r = flat_dic_helper(prepand + k,i)
for j in r:
yield j
else:
yield (prepand + k,i)
def flat_dic(d):
return dict(flat_dic_helper("",d))
d = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}
print(flat_dic(d))
>> {'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
Here's an algorithm for elegant, in-place replacement. Tested with Python 2.7 and Python 3.5. Using the dot character as a separator.
def flatten_json(json):
if type(json) == dict:
for k, v in list(json.items()):
if type(v) == dict:
flatten_json(v)
json.pop(k)
for k2, v2 in v.items():
json[k+"."+k2] = v2
Example:
d = {'a': {'b': 'c'}}
flatten_json(d)
print(d)
unflatten_json(d)
print(d)
Output:
{'a.b': 'c'}
{'a': {'b': 'c'}}
I published this code here along with the matching unflatten_json function.
If you want to flat nested dictionary and want all unique keys list then here is the solution:
def flat_dict_return_unique_key(data, unique_keys=set()):
if isinstance(data, dict):
[unique_keys.add(i) for i in data.keys()]
for each_v in data.values():
if isinstance(each_v, dict):
flat_dict_return_unique_key(each_v, unique_keys)
return list(set(unique_keys))
I always prefer access dict objects via .items(), so for flattening dicts I use the following recursive generator flat_items(d). If you like to have dict again, simply wrap it like this: flat = dict(flat_items(d))
def flat_items(d, key_separator='.'):
"""
Flattens the dictionary containing other dictionaries like here: https://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys
>>> example = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}
>>> flat = dict(flat_items(example, key_separator='_'))
>>> assert flat['c_b_y'] == 10
"""
for k, v in d.items():
if type(v) is dict:
for k1, v1 in flat_items(v, key_separator=key_separator):
yield key_separator.join((k, k1)), v1
else:
yield k, v
def flatten_nested_dict(_dict, _str=''):
'''
recursive function to flatten a nested dictionary json
'''
ret_dict = {}
for k, v in _dict.items():
if isinstance(v, dict):
ret_dict.update(flatten_nested_dict(v, _str = '_'.join([_str, k]).strip('_')))
elif isinstance(v, list):
for index, item in enumerate(v):
if isinstance(item, dict):
ret_dict.update(flatten_nested_dict(item, _str= '_'.join([_str, k, str(index)]).strip('_')))
else:
ret_dict['_'.join([_str, k, str(index)]).strip('_')] = item
else:
ret_dict['_'.join([_str, k]).strip('_')] = v
return ret_dict
Using dict.popitem() in straightforward nested-list-like recursion:
def flatten(d):
if d == {}:
return d
else:
k,v = d.popitem()
if (dict != type(v)):
return {k:v, **flatten(d)}
else:
flat_kv = flatten(v)
for k1 in list(flat_kv.keys()):
flat_kv[k + '_' + k1] = flat_kv[k1]
del flat_kv[k1]
return {**flat_kv, **flatten(d)}
If you do not mind recursive functions, here is a solution. I have also taken the liberty to include an exclusion-parameter in case there are one or more values you wish to maintain.
Code:
def flatten_dict(dictionary, exclude = [], delimiter ='_'):
flat_dict = dict()
for key, value in dictionary.items():
if isinstance(value, dict) and key not in exclude:
flatten_value_dict = flatten_dict(value, exclude, delimiter)
for k, v in flatten_value_dict.items():
flat_dict[f"{key}{delimiter}{k}"] = v
else:
flat_dict[key] = value
return flat_dict
Usage:
d = {'a':1, 'b':[1, 2], 'c':3, 'd':{'a':4, 'b':{'a':7, 'b':8}, 'c':6}, 'e':{'a':1,'b':2}}
flat_d = flatten_dict(dictionary=d, exclude=['e'], delimiter='.')
print(flat_d)
Output:
{'a': 1, 'b': [1, 2], 'c': 3, 'd.a': 4, 'd.b.a': 7, 'd.b.b': 8, 'd.c': 6, 'e': {'a': 1, 'b': 2}}
Variation of this Flatten nested dictionaries, compressing keys with max_level and custom reducer.
def flatten(d, max_level=None, reducer='tuple'):
if reducer == 'tuple':
reducer_seed = tuple()
reducer_func = lambda x, y: (*x, y)
else:
raise ValueError(f'Unknown reducer: {reducer}')
def impl(d, pref, level):
return reduce(
lambda new_d, kv:
(max_level is None or level < max_level)
and isinstance(kv[1], dict)
and {**new_d, **impl(kv[1], reducer_func(pref, kv[0]), level + 1)}
or {**new_d, reducer_func(pref, kv[0]): kv[1]},
d.items(),
{}
)
return impl(d, reducer_seed, 0)
I tried some of the solutions on this page - though not all - but those I tried failed to handle the nested list of dict.
Consider a dict like this:
d = {
'owner': {
'name': {'first_name': 'Steven', 'last_name': 'Smith'},
'lottery_nums': [1, 2, 3, 'four', '11', None],
'address': {},
'tuple': (1, 2, 'three'),
'tuple_with_dict': (1, 2, 'three', {'is_valid': False}),
'set': {1, 2, 3, 4, 'five'},
'children': [
{'name': {'first_name': 'Jessica',
'last_name': 'Smith', },
'children': []
},
{'name': {'first_name': 'George',
'last_name': 'Smith'},
'children': []
}
]
}
}
Here's my makeshift solution:
def flatten_dict(input_node: dict, key_: str = '', output_dict: dict = {}):
if isinstance(input_node, dict):
for key, val in input_node.items():
new_key = f"{key_}.{key}" if key_ else f"{key}"
flatten_dict(val, new_key, output_dict)
elif isinstance(input_node, list):
for idx, item in enumerate(input_node):
flatten_dict(item, f"{key_}.{idx}", output_dict)
else:
output_dict[key_] = input_node
return output_dict
which produces:
{
owner.name.first_name: Steven,
owner.name.last_name: Smith,
owner.lottery_nums.0: 1,
owner.lottery_nums.1: 2,
owner.lottery_nums.2: 3,
owner.lottery_nums.3: four,
owner.lottery_nums.4: 11,
owner.lottery_nums.5: None,
owner.tuple: (1, 2, 'three'),
owner.tuple_with_dict: (1, 2, 'three', {'is_valid': False}),
owner.set: {1, 2, 3, 4, 'five'},
owner.children.0.name.first_name: Jessica,
owner.children.0.name.last_name: Smith,
owner.children.1.name.first_name: George,
owner.children.1.name.last_name: Smith,
}
A makeshift solution and it's not perfect.
NOTE:
it doesn't keep empty dicts such as the address: {} k/v pair.
it won't flatten dicts in nested tuples - though it would be easy to add using the fact that python tuples act similar to lists.
You can use recursion in order to flatten your dictionary.
import collections
def flatten(
nested_dict,
seperator='.',
name=None,
):
flatten_dict = {}
if not nested_dict:
return flatten_dict
if isinstance(
nested_dict,
collections.abc.MutableMapping,
):
for key, value in nested_dict.items():
if name is not None:
flatten_dict.update(
flatten(
nested_dict=value,
seperator=seperator,
name=f'{name}{seperator}{key}',
),
)
else:
flatten_dict.update(
flatten(
nested_dict=value,
seperator=seperator,
name=key,
),
)
else:
flatten_dict[name] = nested_dict
return flatten_dict
if __name__ == '__main__':
nested_dict = {
1: 'a',
2: {
3: 'c',
4: {
5: 'e',
},
6: [1, 2, 3, 4, 5, ],
},
}
print(
flatten(
nested_dict=nested_dict,
),
)
Output:
{
"1":"a",
"2.3":"c",
"2.4.5":"e",
"2.6":[1, 2, 3, 4, 5]
}

Need help finishing this code to flatten a dictionary [duplicate]

Suppose you have a dictionary like:
{'a': 1,
'c': {'a': 2,
'b': {'x': 5,
'y' : 10}},
'd': [1, 2, 3]}
How would you go about flattening that into something like:
{'a': 1,
'c_a': 2,
'c_b_x': 5,
'c_b_y': 10,
'd': [1, 2, 3]}
Basically the same way you would flatten a nested list, you just have to do the extra work for iterating the dict by key/value, creating new keys for your new dictionary and creating the dictionary at final step.
import collections
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
>>> flatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
For Python >= 3.3, change the import to from collections.abc import MutableMapping to avoid a deprecation warning and change collections.MutableMapping to just MutableMapping.
Or if you are already using pandas, You can do it with json_normalize() like so:
import pandas as pd
d = {'a': 1,
'c': {'a': 2, 'b': {'x': 5, 'y' : 10}},
'd': [1, 2, 3]}
df = pd.json_normalize(d, sep='_')
print(df.to_dict(orient='records')[0])
Output:
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'c_b_y': 10, 'd': [1, 2, 3]}
There are two big considerations that the original poster needs to consider:
Are there keyspace clobbering issues? For example, {'a_b':{'c':1}, 'a':{'b_c':2}} would result in {'a_b_c':???}. The below solution evades the problem by returning an iterable of pairs.
If performance is an issue, does the key-reducer function (which I hereby refer to as 'join') require access to the entire key-path, or can it just do O(1) work at every node in the tree? If you want to be able to say joinedKey = '_'.join(*keys), that will cost you O(N^2) running time. However if you're willing to say nextKey = previousKey+'_'+thisKey, that gets you O(N) time. The solution below lets you do both (since you could merely concatenate all the keys, then postprocess them).
(Performance is not likely an issue, but I'll elaborate on the second point in case anyone else cares: In implementing this, there are numerous dangerous choices. If you do this recursively and yield and re-yield, or anything equivalent which touches nodes more than once (which is quite easy to accidentally do), you are doing potentially O(N^2) work rather than O(N). This is because maybe you are calculating a key a then a_1 then a_1_i..., and then calculating a then a_1 then a_1_ii..., but really you shouldn't have to calculate a_1 again. Even if you aren't recalculating it, re-yielding it (a 'level-by-level' approach) is just as bad. A good example is to think about the performance on {1:{1:{1:{1:...(N times)...{1:SOME_LARGE_DICTIONARY_OF_SIZE_N}...}}}})
Below is a function I wrote flattenDict(d, join=..., lift=...) which can be adapted to many purposes and can do what you want. Sadly it is fairly hard to make a lazy version of this function without incurring the above performance penalties (many python builtins like chain.from_iterable aren't actually efficient, which I only realized after extensive testing of three different versions of this code before settling on this one).
from collections import Mapping
from itertools import chain
from operator import add
_FLAG_FIRST = object()
def flattenDict(d, join=add, lift=lambda x:(x,)):
results = []
def visit(subdict, results, partialKey):
for k,v in subdict.items():
newKey = lift(k) if partialKey==_FLAG_FIRST else join(partialKey,lift(k))
if isinstance(v,Mapping):
visit(v, results, newKey)
else:
results.append((newKey,v))
visit(d, results, _FLAG_FIRST)
return results
To better understand what's going on, below is a diagram for those unfamiliar with reduce(left), otherwise known as "fold left". Sometimes it is drawn with an initial value in place of k0 (not part of the list, passed into the function). Here, J is our join function. We preprocess each kn with lift(k).
[k0,k1,...,kN].foldleft(J)
/ \
... kN
/
J(k0,J(k1,J(k2,k3)))
/ \
/ \
J(J(k0,k1),k2) k3
/ \
/ \
J(k0,k1) k2
/ \
/ \
k0 k1
This is in fact the same as functools.reduce, but where our function does this to all key-paths of the tree.
>>> reduce(lambda a,b:(a,b), range(5))
((((0, 1), 2), 3), 4)
Demonstration (which I'd otherwise put in docstring):
>>> testData = {
'a':1,
'b':2,
'c':{
'aa':11,
'bb':22,
'cc':{
'aaa':111
}
}
}
from pprint import pprint as pp
>>> pp(dict( flattenDict(testData) ))
{('a',): 1,
('b',): 2,
('c', 'aa'): 11,
('c', 'bb'): 22,
('c', 'cc', 'aaa'): 111}
>>> pp(dict( flattenDict(testData, join=lambda a,b:a+'_'+b, lift=lambda x:x) ))
{'a': 1, 'b': 2, 'c_aa': 11, 'c_bb': 22, 'c_cc_aaa': 111}
>>> pp(dict( (v,k) for k,v in flattenDict(testData, lift=hash, join=lambda a,b:hash((a,b))) ))
{1: 12416037344,
2: 12544037731,
11: 5470935132935744593,
22: 4885734186131977315,
111: 3461911260025554326}
Performance:
from functools import reduce
def makeEvilDict(n):
return reduce(lambda acc,x:{x:acc}, [{i:0 for i in range(n)}]+range(n))
import timeit
def time(runnable):
t0 = timeit.default_timer()
_ = runnable()
t1 = timeit.default_timer()
print('took {:.2f} seconds'.format(t1-t0))
>>> pp(makeEvilDict(8))
{7: {6: {5: {4: {3: {2: {1: {0: {0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0}}}}}}}}}
import sys
sys.setrecursionlimit(1000000)
forget = lambda a,b:''
>>> time(lambda: dict(flattenDict(makeEvilDict(10000), join=forget)) )
took 0.10 seconds
>>> time(lambda: dict(flattenDict(makeEvilDict(100000), join=forget)) )
[1] 12569 segmentation fault python
... sigh, don't think that one is my fault...
[unimportant historical note due to moderation issues]
Regarding the alleged duplicate of Flatten a dictionary of dictionaries (2 levels deep) of lists
That question's solution can be implemented in terms of this one by doing sorted( sum(flatten(...),[]) ). The reverse is not possible: while it is true that the values of flatten(...) can be recovered from the alleged duplicate by mapping a higher-order accumulator, one cannot recover the keys. (edit: Also it turns out that the alleged duplicate owner's question is completely different, in that it only deals with dictionaries exactly 2-level deep, though one of the answers on that page gives a general solution.)
If you're using pandas there is a function hidden in pandas.io.json._normalize1 called nested_to_record which does this exactly.
from pandas.io.json._normalize import nested_to_record
flat = nested_to_record(my_dict, sep='_')
1 In pandas versions 0.24.x and older use pandas.io.json.normalize (without the _)
Here is a kind of a "functional", "one-liner" implementation. It is recursive, and based on a conditional expression and a dict comprehension.
def flatten_dict(dd, separator='_', prefix=''):
return { prefix + separator + k if prefix else k : v
for kk, vv in dd.items()
for k, v in flatten_dict(vv, separator, kk).items()
} if isinstance(dd, dict) else { prefix : dd }
Test:
In [2]: flatten_dict({'abc':123, 'hgf':{'gh':432, 'yu':433}, 'gfd':902, 'xzxzxz':{"432":{'0b0b0b':231}, "43234":1321}}, '.')
Out[2]:
{'abc': 123,
'gfd': 902,
'hgf.gh': 432,
'hgf.yu': 433,
'xzxzxz.432.0b0b0b': 231,
'xzxzxz.43234': 1321}
Not exactly what the OP asked, but lots of folks are coming here looking for ways to flatten real-world nested JSON data which can have nested key-value json objects and arrays and json objects inside the arrays and so on. JSON doesn't include tuples, so we don't have to fret over those.
I found an implementation of the list-inclusion comment by #roneo to the answer posted by #Imran :
https://github.com/ScriptSmith/socialreaper/blob/master/socialreaper/tools.py#L8
import collections
def flatten(dictionary, parent_key=False, separator='.'):
"""
Turn a nested dictionary into a flattened dictionary
:param dictionary: The dictionary to flatten
:param parent_key: The string to prepend to dictionary's keys
:param separator: The string used to separate flattened keys
:return: A flattened dictionary
"""
items = []
for key, value in dictionary.items():
new_key = str(parent_key) + separator + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten(value, new_key, separator).items())
elif isinstance(value, list):
for k, v in enumerate(value):
items.extend(flatten({str(k): v}, new_key).items())
else:
items.append((new_key, value))
return dict(items)
Test it:
flatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3] })
>> {'a': 1, 'c.a': 2, 'c.b.x': 5, 'c.b.y': 10, 'd.0': 1, 'd.1': 2, 'd.2': 3}
Annd that does the job I need done: I throw any complicated json at this and it flattens it out for me.
All credits to https://github.com/ScriptSmith .
Code:
test = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}
def parse_dict(init, lkey=''):
ret = {}
for rkey,val in init.items():
key = lkey+rkey
if isinstance(val, dict):
ret.update(parse_dict(val, key+'_'))
else:
ret[key] = val
return ret
print(parse_dict(test,''))
Results:
$ python test.py
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
I am using python3.2, update for your version of python.
This is not restricted to dictionaries, but every mapping type that implements .items(). Further ist faster as it avoides an if condition. Nevertheless credits go to Imran:
def flatten(d, parent_key=''):
items = []
for k, v in d.items():
try:
items.extend(flatten(v, '%s%s_' % (parent_key, k)).items())
except AttributeError:
items.append(('%s%s' % (parent_key, k), v))
return dict(items)
How about a functional and performant solution in Python3.5?
from functools import reduce
def _reducer(items, key, val, pref):
if isinstance(val, dict):
return {**items, **flatten(val, pref + key)}
else:
return {**items, pref + key: val}
def flatten(d, pref=''):
return(reduce(
lambda new_d, kv: _reducer(new_d, *kv, pref),
d.items(),
{}
))
This is even more performant:
def flatten(d, pref=''):
return(reduce(
lambda new_d, kv: \
isinstance(kv[1], dict) and \
{**new_d, **flatten(kv[1], pref + kv[0])} or \
{**new_d, pref + kv[0]: kv[1]},
d.items(),
{}
))
In use:
my_obj = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]}
print(flatten(my_obj))
# {'d': [1, 2, 3], 'cby': 10, 'cbx': 5, 'ca': 2, 'a': 1}
If you are a fan of pythonic oneliners:
my_dict={'a': 1,'c': {'a': 2,'b': {'x': 5,'y' : 10}},'d': [1, 2, 3]}
list(pd.json_normalize(my_dict).T.to_dict().values())[0]
returns:
{'a': 1, 'c.a': 2, 'c.b.x': 5, 'c.b.y': 10, 'd': [1, 2, 3]}
You can leave the [0] from the end, if you have a list of dictionaries and not just a single dictionary.
My Python 3.3 Solution using generators:
def flattenit(pyobj, keystring=''):
if type(pyobj) is dict:
if (type(pyobj) is dict):
keystring = keystring + "_" if keystring else keystring
for k in pyobj:
yield from flattenit(pyobj[k], keystring + k)
elif (type(pyobj) is list):
for lelm in pyobj:
yield from flatten(lelm, keystring)
else:
yield keystring, pyobj
my_obj = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]}
#your flattened dictionary object
flattened={k:v for k,v in flattenit(my_obj)}
print(flattened)
# result: {'c_b_y': 10, 'd': [1, 2, 3], 'c_a': 2, 'a': 1, 'c_b_x': 5}
Utilizing recursion, keeping it simple and human readable:
def flatten_dict(dictionary, accumulator=None, parent_key=None, separator="."):
if accumulator is None:
accumulator = {}
for k, v in dictionary.items():
k = f"{parent_key}{separator}{k}" if parent_key else k
if isinstance(v, dict):
flatten_dict(dictionary=v, accumulator=accumulator, parent_key=k)
continue
accumulator[k] = v
return accumulator
Call is simple:
new_dict = flatten_dict(dictionary)
or
new_dict = flatten_dict(dictionary, separator="_")
if we want to change the default separator.
A little breakdown:
When the function is first called, it is called only passing the dictionary we want to flatten. The accumulator parameter is here to support recursion, which we see later. So, we instantiate accumulator to an empty dictionary where we will put all of the nested values from the original dictionary.
if accumulator is None:
accumulator = {}
As we iterate over the dictionary's values, we construct a key for every value. The parent_key argument will be None for the first call, while for every nested dictionary, it will contain the key pointing to it, so we prepend that key.
k = f"{parent_key}{separator}{k}" if parent_key else k
In case the value v the key k is pointing to is a dictionary, the function calls itself, passing the nested dictionary, the accumulator (which is passed by reference, so all changes done to it are done on the same instance) and the key k so that we can construct the concatenated key. Notice the continue statement. We want to skip the next line, outside of the if block, so that the nested dictionary doesn't end up in the accumulator under key k.
if isinstance(v, dict):
flatten_dict(dict=v, accumulator=accumulator, parent_key=k)
continue
So, what do we do in case the value v is not a dictionary? Just put it unchanged inside the accumulator.
accumulator[k] = v
Once we're done we just return the accumulator, leaving the original dictionary argument untouched.
NOTE
This will work only with dictionaries that have strings as keys. It will work with hashable objects implementing the __repr__ method, but will yield unwanted results.
Simple function to flatten nested dictionaries. For Python 3, replace .iteritems() with .items()
def flatten_dict(init_dict):
res_dict = {}
if type(init_dict) is not dict:
return res_dict
for k, v in init_dict.iteritems():
if type(v) == dict:
res_dict.update(flatten_dict(v))
else:
res_dict[k] = v
return res_dict
The idea/requirement was:
Get flat dictionaries with no keeping parent keys.
Example of usage:
dd = {'a': 3,
'b': {'c': 4, 'd': 5},
'e': {'f':
{'g': 1, 'h': 2}
},
'i': 9,
}
flatten_dict(dd)
>> {'a': 3, 'c': 4, 'd': 5, 'g': 1, 'h': 2, 'i': 9}
Keeping parent keys is simple as well.
I was thinking of a subclass of UserDict to automagically flat the keys.
class FlatDict(UserDict):
def __init__(self, *args, separator='.', **kwargs):
self.separator = separator
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
if isinstance(value, dict):
for k1, v1 in FlatDict(value, separator=self.separator).items():
super().__setitem__(f"{key}{self.separator}{k1}", v1)
else:
super().__setitem__(key, value)
‌
The advantages it that keys can be added on the fly, or using standard dict instanciation, without surprise:
‌
>>> fd = FlatDict(
... {
... 'person': {
... 'sexe': 'male',
... 'name': {
... 'first': 'jacques',
... 'last': 'dupond'
... }
... }
... }
... )
>>> fd
{'person.sexe': 'male', 'person.name.first': 'jacques', 'person.name.last': 'dupond'}
>>> fd['person'] = {'name': {'nickname': 'Bob'}}
>>> fd
{'person.sexe': 'male', 'person.name.first': 'jacques', 'person.name.last': 'dupond', 'person.name.nickname': 'Bob'}
>>> fd['person.name'] = {'civility': 'Dr'}
>>> fd
{'person.sexe': 'male', 'person.name.first': 'jacques', 'person.name.last': 'dupond', 'person.name.nickname': 'Bob', 'person.name.civility': 'Dr'}
This is similar to both imran's and ralu's answer. It does not use a generator, but instead employs recursion with a closure:
def flatten_dict(d, separator='_'):
final = {}
def _flatten_dict(obj, parent_keys=[]):
for k, v in obj.iteritems():
if isinstance(v, dict):
_flatten_dict(v, parent_keys + [k])
else:
key = separator.join(parent_keys + [k])
final[key] = v
_flatten_dict(d)
return final
>>> print flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
The answers above work really well. Just thought I'd add the unflatten function that I wrote:
def unflatten(d):
ud = {}
for k, v in d.items():
context = ud
for sub_key in k.split('_')[:-1]:
if sub_key not in context:
context[sub_key] = {}
context = context[sub_key]
context[k.split('_')[-1]] = v
return ud
Note: This doesn't account for '_' already present in keys, much like the flatten counterparts.
Davoud's solution is very nice but doesn't give satisfactory results when the nested dict also contains lists of dicts, but his code be adapted for that case:
def flatten_dict(d):
items = []
for k, v in d.items():
try:
if (type(v)==type([])):
for l in v: items.extend(flatten_dict(l).items())
else:
items.extend(flatten_dict(v).items())
except AttributeError:
items.append((k, v))
return dict(items)
def flatten(unflattened_dict, separator='_'):
flattened_dict = {}
for k, v in unflattened_dict.items():
if isinstance(v, dict):
sub_flattened_dict = flatten(v, separator)
for k2, v2 in sub_flattened_dict.items():
flattened_dict[k + separator + k2] = v2
else:
flattened_dict[k] = v
return flattened_dict
I actually wrote a package called cherrypicker recently to deal with this exact sort of thing since I had to do it so often!
I think the following code would give you exactly what you're after:
from cherrypicker import CherryPicker
dct = {
'a': 1,
'c': {
'a': 2,
'b': {
'x': 5,
'y' : 10
}
},
'd': [1, 2, 3]
}
picker = CherryPicker(dct)
picker.flatten().get()
You can install the package with:
pip install cherrypicker
...and there's more docs and guidance at https://cherrypicker.readthedocs.io.
Other methods may be faster, but the priority of this package is to make such tasks easy. If you do have a large list of objects to flatten though, you can also tell CherryPicker to use parallel processing to speed things up.
here's a solution using a stack. No recursion.
def flatten_nested_dict(nested):
stack = list(nested.items())
ans = {}
while stack:
key, val = stack.pop()
if isinstance(val, dict):
for sub_key, sub_val in val.items():
stack.append((f"{key}_{sub_key}", sub_val))
else:
ans[key] = val
return ans
Using generators:
def flat_dic_helper(prepand,d):
if len(prepand) > 0:
prepand = prepand + "_"
for k in d:
i = d[k]
if isinstance(i, dict):
r = flat_dic_helper(prepand + k,i)
for j in r:
yield j
else:
yield (prepand + k,i)
def flat_dic(d):
return dict(flat_dic_helper("",d))
d = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}
print(flat_dic(d))
>> {'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
Here's an algorithm for elegant, in-place replacement. Tested with Python 2.7 and Python 3.5. Using the dot character as a separator.
def flatten_json(json):
if type(json) == dict:
for k, v in list(json.items()):
if type(v) == dict:
flatten_json(v)
json.pop(k)
for k2, v2 in v.items():
json[k+"."+k2] = v2
Example:
d = {'a': {'b': 'c'}}
flatten_json(d)
print(d)
unflatten_json(d)
print(d)
Output:
{'a.b': 'c'}
{'a': {'b': 'c'}}
I published this code here along with the matching unflatten_json function.
If you want to flat nested dictionary and want all unique keys list then here is the solution:
def flat_dict_return_unique_key(data, unique_keys=set()):
if isinstance(data, dict):
[unique_keys.add(i) for i in data.keys()]
for each_v in data.values():
if isinstance(each_v, dict):
flat_dict_return_unique_key(each_v, unique_keys)
return list(set(unique_keys))
I always prefer access dict objects via .items(), so for flattening dicts I use the following recursive generator flat_items(d). If you like to have dict again, simply wrap it like this: flat = dict(flat_items(d))
def flat_items(d, key_separator='.'):
"""
Flattens the dictionary containing other dictionaries like here: https://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys
>>> example = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}
>>> flat = dict(flat_items(example, key_separator='_'))
>>> assert flat['c_b_y'] == 10
"""
for k, v in d.items():
if type(v) is dict:
for k1, v1 in flat_items(v, key_separator=key_separator):
yield key_separator.join((k, k1)), v1
else:
yield k, v
def flatten_nested_dict(_dict, _str=''):
'''
recursive function to flatten a nested dictionary json
'''
ret_dict = {}
for k, v in _dict.items():
if isinstance(v, dict):
ret_dict.update(flatten_nested_dict(v, _str = '_'.join([_str, k]).strip('_')))
elif isinstance(v, list):
for index, item in enumerate(v):
if isinstance(item, dict):
ret_dict.update(flatten_nested_dict(item, _str= '_'.join([_str, k, str(index)]).strip('_')))
else:
ret_dict['_'.join([_str, k, str(index)]).strip('_')] = item
else:
ret_dict['_'.join([_str, k]).strip('_')] = v
return ret_dict
Using dict.popitem() in straightforward nested-list-like recursion:
def flatten(d):
if d == {}:
return d
else:
k,v = d.popitem()
if (dict != type(v)):
return {k:v, **flatten(d)}
else:
flat_kv = flatten(v)
for k1 in list(flat_kv.keys()):
flat_kv[k + '_' + k1] = flat_kv[k1]
del flat_kv[k1]
return {**flat_kv, **flatten(d)}
If you do not mind recursive functions, here is a solution. I have also taken the liberty to include an exclusion-parameter in case there are one or more values you wish to maintain.
Code:
def flatten_dict(dictionary, exclude = [], delimiter ='_'):
flat_dict = dict()
for key, value in dictionary.items():
if isinstance(value, dict) and key not in exclude:
flatten_value_dict = flatten_dict(value, exclude, delimiter)
for k, v in flatten_value_dict.items():
flat_dict[f"{key}{delimiter}{k}"] = v
else:
flat_dict[key] = value
return flat_dict
Usage:
d = {'a':1, 'b':[1, 2], 'c':3, 'd':{'a':4, 'b':{'a':7, 'b':8}, 'c':6}, 'e':{'a':1,'b':2}}
flat_d = flatten_dict(dictionary=d, exclude=['e'], delimiter='.')
print(flat_d)
Output:
{'a': 1, 'b': [1, 2], 'c': 3, 'd.a': 4, 'd.b.a': 7, 'd.b.b': 8, 'd.c': 6, 'e': {'a': 1, 'b': 2}}
Variation of this Flatten nested dictionaries, compressing keys with max_level and custom reducer.
def flatten(d, max_level=None, reducer='tuple'):
if reducer == 'tuple':
reducer_seed = tuple()
reducer_func = lambda x, y: (*x, y)
else:
raise ValueError(f'Unknown reducer: {reducer}')
def impl(d, pref, level):
return reduce(
lambda new_d, kv:
(max_level is None or level < max_level)
and isinstance(kv[1], dict)
and {**new_d, **impl(kv[1], reducer_func(pref, kv[0]), level + 1)}
or {**new_d, reducer_func(pref, kv[0]): kv[1]},
d.items(),
{}
)
return impl(d, reducer_seed, 0)
I tried some of the solutions on this page - though not all - but those I tried failed to handle the nested list of dict.
Consider a dict like this:
d = {
'owner': {
'name': {'first_name': 'Steven', 'last_name': 'Smith'},
'lottery_nums': [1, 2, 3, 'four', '11', None],
'address': {},
'tuple': (1, 2, 'three'),
'tuple_with_dict': (1, 2, 'three', {'is_valid': False}),
'set': {1, 2, 3, 4, 'five'},
'children': [
{'name': {'first_name': 'Jessica',
'last_name': 'Smith', },
'children': []
},
{'name': {'first_name': 'George',
'last_name': 'Smith'},
'children': []
}
]
}
}
Here's my makeshift solution:
def flatten_dict(input_node: dict, key_: str = '', output_dict: dict = {}):
if isinstance(input_node, dict):
for key, val in input_node.items():
new_key = f"{key_}.{key}" if key_ else f"{key}"
flatten_dict(val, new_key, output_dict)
elif isinstance(input_node, list):
for idx, item in enumerate(input_node):
flatten_dict(item, f"{key_}.{idx}", output_dict)
else:
output_dict[key_] = input_node
return output_dict
which produces:
{
owner.name.first_name: Steven,
owner.name.last_name: Smith,
owner.lottery_nums.0: 1,
owner.lottery_nums.1: 2,
owner.lottery_nums.2: 3,
owner.lottery_nums.3: four,
owner.lottery_nums.4: 11,
owner.lottery_nums.5: None,
owner.tuple: (1, 2, 'three'),
owner.tuple_with_dict: (1, 2, 'three', {'is_valid': False}),
owner.set: {1, 2, 3, 4, 'five'},
owner.children.0.name.first_name: Jessica,
owner.children.0.name.last_name: Smith,
owner.children.1.name.first_name: George,
owner.children.1.name.last_name: Smith,
}
A makeshift solution and it's not perfect.
NOTE:
it doesn't keep empty dicts such as the address: {} k/v pair.
it won't flatten dicts in nested tuples - though it would be easy to add using the fact that python tuples act similar to lists.
You can use recursion in order to flatten your dictionary.
import collections
def flatten(
nested_dict,
seperator='.',
name=None,
):
flatten_dict = {}
if not nested_dict:
return flatten_dict
if isinstance(
nested_dict,
collections.abc.MutableMapping,
):
for key, value in nested_dict.items():
if name is not None:
flatten_dict.update(
flatten(
nested_dict=value,
seperator=seperator,
name=f'{name}{seperator}{key}',
),
)
else:
flatten_dict.update(
flatten(
nested_dict=value,
seperator=seperator,
name=key,
),
)
else:
flatten_dict[name] = nested_dict
return flatten_dict
if __name__ == '__main__':
nested_dict = {
1: 'a',
2: {
3: 'c',
4: {
5: 'e',
},
6: [1, 2, 3, 4, 5, ],
},
}
print(
flatten(
nested_dict=nested_dict,
),
)
Output:
{
"1":"a",
"2.3":"c",
"2.4.5":"e",
"2.6":[1, 2, 3, 4, 5]
}

Python dictionary iterating over nested dictionaries for a particular operation/function

If I have a function which has to be executed for the nested dictionaries inside a dictionary. Then how should I execute it ?
For example:
# I have the below dictionary
d = {'a':1, 'b':2, 'c':3, 'd': {'e':4, 'f':5}}
# I wanted a result as below
{'a': 1, 'b': 2, 'c': 3, 'e': 4, 'f': 5}
#I have executed it by
for i, j in d.items():
if type(j) == dict:
for key,value in d[i].items():
d[key] = value
d.pop(i, None)
print d
#output
{'a': 1, 'c': 3, 'b': 2, 'e': 4, 'f': 5}
But what if there are many nested dictionaries? I am kind of confused on this ? Any suggestions?
Thanks in advance.
I would suggest that this be a form of flattening:
def flatten(d):
es = [d.pop(k) for k in sorted(d) if isinstance(d[k], dict)]
# Due to sorted the dictionaries those keys that are lexicographically after
# will overwrite the key-value pairs from those that are before.
# One would expect that `d.update(*es)` would work
# but it doesn't as `update` only takes one argument.
for e in es:
d.update(e)
def flatten_many(d):
while any(isinstance(d[k], dict) for k in d):
flatten(d)
The first function pops every dictionary from d and then updates d with them. The second function applies flatten the first function while there is a value that is a dictionary.
dd={}
def myprint(d):
for k, v in d.iteritems():
if isinstance(v, dict):
myprint(v)
else:
dd.update({k:v})
return dd
d={'a':1, 'b':2, 'c':3, 'd': {'e':4, 'f':5,'g':{'h':6}}}
print(myprint(d))
output-
{'a': 1, 'c': 3, 'b': 2, 'e': 4, 'f': 5, 'h': 6}

How can I get a list of nested dictionary keys as dot separated strings?

Say I have a dictionary that looks like:
d = {'a': 1, 'b': {'sa': 11, 'sb': 22, 'sc': {'ssa': 111, 'ssb': 222}}, 'c': 3}
I want a list of all the keys whose values aren't other dicts, but represented by their dot notation (assuming you 'dot' at each level of the dict). To put it another way, I want the compound, dot-notation key for all values who have no children.
For example, for the above dict, I would like to get (not necessarily in any order):
['a',
'b.sa',
'b.sb',
'b.sc.ssa',
'b.sc.ssb',
'c']
I'm sure there is a more elegant way to solve this problem but this should get you started.
d = {'a': 1, 'b': {'sa': 11, 'sb': 22, 'sc': {'ssa': 111, 'ssb': 222}}, 'c': 3}
def dotter(d, key, dots):
if isinstance(d, dict):
for k in d:
dotter(d[k], key + '.' + k if key else k, dots)
else:
dots.append(key)
return dots
print dotter(d, '', [])
d = {'a': 1, 'b': {'sa': 11, 'sb': 22, 'sc': {'ssa': 111, 'ssb': 222}}, 'c': 3}
def fun(k, d, pre):
path = '%s.%s' % (pre, k) if pre else k
return path if type(d[k]) is not dict else ",".join([fun(i,d[k], path) for i in d[k]])
print ",".join([fun(k,d, '') for k in d]).split(',')
OUTPUT
['a', 'c', 'b.sc.ssa', 'b.sc.ssb', 'b.sb', 'b.sa']
In case you want the dict with its values
def dotter(mixed, key='', dots={}):
if isinstance(mixed, dict):
for (k, v) in mixed.items():
dotter(mixed[k], '%s.%s' % (key, k) if key else k)
else:
dots[key] = mixed
return dots
>>> d = {'a': 1, 'b': {'sa': 11, 'sb': 22, 'sc': {'ssa': 111, 'ssb': 222}}, 'c': 3}
>>> dotted_dict = dotter(d)
>>> print(dotted_dict)
{'a': 1, 'c': 3, 'b.sa': 11, 'b.sb': 22, 'b.sc.ssb': 222, 'b.sc.ssa': 111}

Categories

Resources