Recursively creates dataclasses based in nested dictionary - python

I have a dataclass called Config that is created through the properties and values of a dictionary. Since this dictionary can have nested dictionaries, i would like to make nested dictionaries as Config objects. Here is an example:
## Dummy example of a config dict
data = {
'a' : 1,
'b' : [2,2,2],
'c': {
'c_1' : 3.1
}
}
final_config = create_config(data)
# Expected result
Config(a=1, b=[2,2,2], c=Config(c_1=3.1) )
Here is what i've came up, using dataclasses.make_dataclass:
def _Config(params_dict):
config = make_dataclass('Config', params_dict.keys())
return config(**params_dict)
def get_inner_dict(d):
for _, v in d.items():
if isinstance(v, dict):
return get_inner_dict(v)
else:
return _Config(**d)
Unfortunately, this doesn't work because the recursion will try to create a dataclass object when it finds a single value. I feel like i'm in the right way, but couldn't figure out what needs to change.

It looks like you (technically) don't need to use dataclasses or make_dataclass in this scenario.
You can implement a custom class with a __dict__ update approach as mentioned by #Stef. Check out the following example:
from __future__ import annotations
## Dummy example of a config dict
data = {
'a': 1,
'b': [2, 2, 2],
'c': {
'c_1': 3.1
},
'd': [
1,
'2',
{'k1': 'v1'}
]
}
_CONTAINER_TYPES = (dict, list)
class Config:
def __init__(self, **kwargs):
self.__dict__ = kwargs
#classmethod
def create(cls, data: dict | list) -> Config | list:
if isinstance(data, list):
return [cls.create(e) if isinstance(e, _CONTAINER_TYPES) else e
for e in data]
new_data = {
k: cls.create(v) if isinstance(v, _CONTAINER_TYPES) else v
for k, v in data.items()
}
return cls(**new_data)
def __repr__(self):
return f"Config({', '.join([f'{name}={val!r}' for name, val in self.__dict__.items()])})"
final_config = Config.create(data)
print(final_config)
# Prints:
# Config(a=1, b=[2, 2, 2], c=Config(c_1=3.1), d=[1, '2', Config(k1='v1')])

Related

Avoid using if statements for checking keys in dict

I am trying to map some values from data to a template.I want to fill in the values (with some manipulations) in the template only if they are already present in it.My template has hundreds of keys and my goal is to avoid the if statement before each manipulation and assignment.
The point of the if statements is to defer evaluation of the manipulations I am performing as they may be expensive to perform. Any solutions should take this into account.
data = {
'a':1,
'b':2,
'c':3,
'd':4,
'e':5
}
template1 = {
'p':'Nan',
'q':'Nan',
'r':'Nan'
}
template2 = {
'p':'Nan',
's':'Nan',
't':'Nan'
}
def func(template,data):
if 'p' in template.keys():
template['p'] = data['a']
if 'q' in template.keys():
template['q'] = data['b'][:2] + 'some manipulation'
if 'r' in template.keys():
template['r'] = data['c']
if 's' in template.keys():
template['s'] = data['d'] + 'some mainpulation'
if 't' in template.keys():
template['t'] = data['e']
I know I am missing something basic, my actual code and requirements are pretty complex and I tried to simplify them and bring them down to this simple structure.
Thanks for your help in advance!
You could also store manipulations directly in your data dict using lambda functions, then check if any retrieved value from the data dict is callable() when using this dict to update the template. Assuming your can't modify the keys in the data dict, then this approach could still work with the template_dict mapping approach suggested by Jlove.
data = {
'p': 1,
'q': 2,
'r': 3,
's': 4,
't': 5,
'u': lambda x: x * 2
}
template1 = {
'p':'Nan',
'q':'Nan',
'r':'Nan',
'u': 2
}
def func(template, data):
for key in template:
if callable(data[key]):
template[key] = data[key](template[key])
else:
template[key] = data[key]
#driver
func(template1, data)
for k in template1.items():
print(k)
--- expanded solution based on comments ---
basically the same as the above, but shows how to use a mapping dict to direct how the data dict and an actions dict can be combined to modify the template dict. Also shows how to map keys to functions using a dict.
from collections import defaultdict
def qManipulation(x):
return x * 10
def sManipulation(x):
return x * 3
data = {
'a':1,
'b':2,
'c':3,
'd':4,
'e':5
}
actions = {
'q': qManipulation,
's': sManipulation,
'u': lambda x: x * 7
}
tempToDataMap = defaultdict(lambda: None, {
'p': 'a',
'q': 'b',
'r': 'c',
's': 'd',
't': 'e'
})
template1 = {
'p':'Nan',
'q':'Nan',
'r':'Nan',
'u': 2
}
def func(template, data):
for key, val in template.items():
dataKey = tempToDataMap[key]
# check if the template key corrosponds to a data dict key
if dataKey is not None:
# if key mapping from template to data is actually in data dict, use data value in template
if dataKey in data:
template[key] = data[dataKey]
# if the template key is registered to an action in action dict, run action
if key in actions:
template[key] = actions[key](data[dataKey])
# use this if you have a manipulation on a template field that is not populated by data.
# this isn't present in the example, but could be handy if the template ever has default values other that Nan
elif key in actions:
template[key] = actions[key](template[key])
func(template1, data)
for k in template1.items():
print(k)
If your manipulations can be expressed as a simple lambda, you could encapsulate the condition/assigment in a function to reduce the code clutter:
def func(template,data):
def apply(k,action):
if k in template: template[k] = action()
apply('p',lambda: data['a'])
apply('q',lambda: data['b'][:2] + 'some manipulation')
apply('r',lambda: data['c'])
apply('s',lambda: data['d'] + 'some mainpulation')
apply('t',lambda: data['e'])
This is probably not a great idea but you could subclass dict and override __setitem__.
class GuardDict(dict):
def __setitem__(self, key, callable_value):
if key in self:
super().__setitem__(key, callable_value())
# we need a method to transform back to a dict
def to_dict(self):
return dict(self)
data = {
'a': 1,
'b': '2',
'c': 3,
'd': '4',
'e': 5
}
template1 = {
'p':'Nan',
'q':'Nan',
'r':'Nan'
}
template2 = {
'p':'Nan',
's':'Nan',
't':'Nan'
}
def func(template,data):
# create a GuardDict from the dict
# this will utilize __setitem__ and only actually set keys
# that already exist in the original dict
template = GuardDict(template)
template['p'] = lambda: data['a']
template['q'] = lambda: data['b'] + 'some manipulation'
template['r'] = lambda: data['c']
template['s'] = lambda: data['d'] + 'some mainpulation'
template['t'] = lambda: data['e']
# set back to a dict
return template.to_dict()
template1 = func(template1, data)
template2 = func(template2, data)
print(template1)
print(template2)
I should probably note if there are other users of your code they will probably hate you for this.
a dynamically functional approach might relieve you from all the ifs and elses, but might complicate the overall program structure.
data = {
'a':1,
'b':2,
'c':3,
'd':4,
'e':5
}
template1 = {
'p': 'Nan',
'q': 'Nan',
'r': 'Nan'
}
template2 = {
'p': 'Nan',
's': 'Nan',
't': 'Nan'
}
# first, define your complex logic in functions, accounting for every possible template key
def p_logic(data, x):
return data[x]
def q_logic(data, x):
return data[x][:2] + 'some manipulation'
# Then build a dict of every possible template key, the associated value and reference to one of the
# functions defined above
logic = {
'p': {
'value': 'a',
'logic': p_logic
},
'q': {
'value': 'b',
'logic': q_logic
},
}
def func(template, data):
# for every key in a template, lookup that key in our logic dict
# grab the value from the data
# and apply the complex logic that has been defined for this template value
for item in template: # template.keys() is not necessary!
template[item] = logic[item]['logic'](data, logic[item]['value'])
The only thing I could think to do here would be to have some sort of dict and run your template through a for loop instead. Such as:
template_dict = {'p': 'a', 'q': 'b', 'r': 'c', 's': 'd', 't': 'e'}
def func(template, data):
for key, value in template_dict.items():
if key in template.keys():
template[key] = data[value]
Otherwise, I'm not sure how you might be able to avoid all those conditionals.

how to stack the keys from nasted dict and to flatten it

I had a task to flatten nested dict, which was easy. This is my code for that:
class Simple:
def __init__(self):
self.store_data = {}
def extract_data(self, config):
for key in config:
if isinstance(config[key], dict):
self.extract_data(config[key])
else:
self.store_data[{key}] = config[key]
return self.store_data
This was my intput:
input = {
'k1_lv1': {
'k1_lv2': 'v1_lv2', 'k2_lv2': 'v2_lv2'},
'k2_lv1': 'v1_lv1',
'k3_lv1': {
'k1_lv2': 'v1_lv2', 'k2_lv2': 'v2_vl2'},
'k4_lv1': 'v1_lv1',
}
and this was my output (imagine that the keys are unique):
output = {
'k1_lv2': 'v1_lv2', 'k2_lv2': 'v2_lv2',
'k2_lv1': 'v1_lv1',
'k1_lv2': 'v1_lv2', 'k2_lv2': 'v2_vl2',
'k4_lv1': 'v1_lv1'
}
but now my task has been changed and my output has to become like this:
output = {
'k1_lv1_k1_lv2': 'v1_lv2',
'k1_lv1_k2_lv2': 'v2_lv2',
'k2_lv1': 'v1_lv1',
'k3_lv1_k1_lv2': 'v1_lv2',
'k3_lv1_k2_lv2': 'v2_vl2',
'k4_lv1': 'v1_lv1'
}
so I have to not only flatten the nested dict, but have to save the keys of nested dicts.
I tried to achieve that output but I am failing.
You can use recursion for the task:
dct = {
"k1_lv1": {"k1_lv2": "v1_lv2", "k2_lv2": "v2_lv2"},
"k2_lv1": "v1_lv1",
"k3_lv1": {"k1_lv2": "v1_lv2", "k2_lv2": "v2_vl2"},
"k4_lv1": "v1_lv1",
}
def flatten(d, path=""):
if isinstance(d, dict):
for k, v in d.items():
yield from flatten(v, (path + "_" + k).strip("_"))
else:
yield (path, d)
out = dict(flatten(dct))
print(out)
Prints:
{
"k1_lv1_k1_lv2": "v1_lv2",
"k1_lv1_k2_lv2": "v2_lv2",
"k2_lv1": "v1_lv1",
"k3_lv1_k1_lv2": "v1_lv2",
"k3_lv1_k2_lv2": "v2_vl2",
"k4_lv1": "v1_lv1",
}
Why don't you loop through the keys using input.keys() and then stack keys using
output['{}_{}'.format(key_level1, key_level2]]= input['key_level1']['key_level2']
You might need to nest for loops and add a condition to test the depth of the keys in your dictionnary.

How can I convert/transform a JSON tree structure to a merkle tree

I'm running a web server, where I receive data in JSON format and planning to store it in a NoSQL database. Here is an example:
data_example = {
"key1": "val1",
"key2": [1, 2, 3],
"key3": {
"subkey1": "subval1",
.
.
}
}
I had thoughts about using a Merkle tree to represent my data since JSON is also a tree-like structure.
Essentially, what I want to do is to store my data in (or as) a more secure decentralized tree-like structure. Many entities will have access to create, read, update or delete (CRUD) a record from it. These CRUD operations will ideally need to be verified from other entities in the network, which will also hold a copy of the database. Just like in blockchain.
I'm having a design/concept problem and I'm trying to understand how can I turn my JSON into a Merkle tree structure. This is my Node class:
class Node:
""" class that represents a node in a merkle tree"""
def __init__(data):
self.data = data
self.hash = self.calculate_some_hash() # based on the data or based on its child nodes
I'm interested in the conception/design of this as I couldn't figure out how this can work. Any idea how to save/store my data_example object in a Merkle tree? (is it possible?)
You can create a Merkle Tree by first converting your dictionary to a class object form, and then recursively traverse the tree, hashing the sum of the child node hashes. Since a Merkle Tree requires a single root node, any input dictionaries that have more than one key at the topmost level should become the child dictionary of an empty root node (with a default key of None):
data_example = {
"key1": "val1",
"key2": [1, 2, 3],
"key3": {
"subkey1": "subval1",
"subkey2": "subval2",
"subkey3": "subval3",
}
}
class MTree:
def __init__(self, key, value):
self.key, self.hash = key, None
self.children = value if not isinstance(value, (dict, list)) else self.__class__.build(value, False)
def compute_hashes(self):
#build hashes up from the bottom
if not isinstance(self.children, list):
self.hash = hash(self.children)
else:
self.hash = hash(sum([i.compute_hashes() for i in self.children]))
return self.hash
def update_kv(self, k, v):
#recursively update a value in the tree with an associated key
if self.key == k:
self.children = v
elif isinstance(self.children, list):
_ = [i.update_kv(k, v) for i in self.children]
def update_tree(self, payload):
#update key-value pairs in the tree from payload
for a, b in payload.items():
self.update_kv(a, b)
self.compute_hashes() #after update is complete, recompute the hashes
#classmethod
def build(cls, dval, root=True):
#convert non-hashible values to strings
vals = [i if isinstance(i, (list, tuple)) else (None, i) for i in getattr(dval, 'items', lambda :dval)()]
if root:
if len(vals) > 1:
return cls(None, dval)
return cls(vals[0][0], vals[0][-1])
return [cls(a, b) for a, b in vals]
def __repr__(self):
return f'{self.__class__.__name__}({self.hash}, {repr(self.children)})'
tree = MTree.build(data_example) #create the basic tree with the input dictionary
_ = tree.compute_hashes() #get the hashes for each node (predicated on its children)
print(tree)
Output:
MTree(-1231139208667999673, [MTree(-8069796171680625903, 'val1'), MTree(6, [MTree(1, 1), MTree(2, 2), MTree(3, 3)]), MTree(-78872064628455629, [MTree(-8491910191379857244, 'subval1'), MTree(1818926376495655970, 'subval2'), MTree(1982425731828357743, 'subval3')])])
Updating the tree with the contents from a payload:
tree.update_tree({"key1": "newVal1"})
Output:
MTree(1039734050960246293, [MTree(5730292134016089818, 'newVal1'), MTree(6, [MTree(1, 1), MTree(2, 2), MTree(3, 3)]), MTree(-78872064628455629, [MTree(-8491910191379857244, 'subval1'), MTree(1818926376495655970, 'subval2'), MTree(1982425731828357743, 'subval3')])])

Copy keys and list contents from JSON in python

I am trying to skim through a dictionary that contains asymmetrical data and make a list of unique headings. Aside from the normal key:value items, the data within the dictionary also includes other dictionaries, lists, lists of dictionaries, NoneTypes, and so on at various levels throughout. I would like to be able to keep the hierarchy of keys/indexes if possible. This will be used to assess the scope of the data and it's availability. The data comes from a JSON file and it's contents are subject to change.
My latest attempt is to do this through a series of type checks within a function, skim(), as seen below.
def skim(obj, header='', level=0):
if obj is None:
return
def skim_iterable(iterable):
lvl = level +1
if isinstance(iterable, (list, tuple)):
for value in iterable:
h = ':'.join([header, iterable.index(value)])
return skim(value, header=h, level=lvl)
elif isinstance(iterable, dict):
for key, value in iterable.items():
h = ':'.join([header, key])
return skim(value, header=h, level=lvl)
if isinstance(obj, (int, float, str, bool)):
return ':'.join([header, obj, level])
elif isinstance(obj, (list, dict, tuple)):
return skim_iterable(obj)
The intent is to make a recursive call to skim() until the key or list index position at the deepest level is passed and then returned. skim has a inner function that handles iterable objects which carries the level along with the key value or list index position forward through each nestled iterable object.
An example below
test = {"level_0Item_1": {
"level_1Item_1": {
"level_2Item_1": "value",
"level_2Item_2": "value"
},
"level_1Item_2": {
"level_2Item_1": "value",
"level_2Item_2": {}
}},
"level_0Item_2": [
{
"level_1Item_1": "value",
"level_1Item_2": 569028742
}
],
"level_0Item_3": []
}
collection = [skim(test)]
Right now I'm getting a return of [None] on the above code and would like some help troubleshooting or guidance on how best to approach this. What I was expecting is something like this:
['level_0Item_1:level_1Item_1:level_2Item_1',
'level_0Item_1:level_1Item_1:level_2Item_2',
'level_0Item_1:level_1Item_2:level_2Item_1',
'level_0Item_1:level_1Item_2:level_2Item_2',
'level_0Item_2:level_1Item_1',
'level_0Item_2:level_1Item_2',
'level_0Item_3]
Among other resources, I recently came across this question (python JSON complex objects (accounting for subclassing)), read it and it's included references. Full disclosure here, I've only began coding recently.
Thank you for your help.
You can try something like:
def skim(obj, connector=':', level=0, builded_str= ''):
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, dict) and v:
yield from skim(v, connector, level + 1, builded_str + k + connector)
elif isinstance(v, list) and v:
yield from skim(v[0], connector, level + 1, builded_str + k + connector)
else:
yield builded_str + k
else:
yield builded_str
Test:
test = {"level_0Item_1": {
"level_1Item_1": {
"level_2Item_1": "value",
"level_2Item_2": "value"
},
"level_1Item_2": {
"level_2Item_1": "value",
"level_2Item_2": {}
}},
"level_0Item_2": [
{
"level_1Item_1": "value",
"level_1Item_2": 569028742
}
],
"level_0Item_3": []
}
lst = list(skim(test))
print(lst)
['level_0Item_1:level_1Item_2:level_2Item_1`',
'level_0Item_1:level_1Item_2:level_2Item_2',
'level_0Item_1:level_1Item_1:level_2Item_1',
'level_0Item_1:level_1Item_1:level_2Item_2',
'level_0Item_2:level_1Item_2',
'level_0Item_2:level_1Item_1',
'level_0Item_3']`

JSON serialize a dictionary with tuples as key

Is there a way in Python to serialize a dictionary that is using a tuple as key?
e.g.
a = {(1, 2): 'a'}
simply using json.dumps(a) raises this error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.6/json/__init__.py", line 230, in dumps
return _default_encoder.encode(obj)
File "/usr/lib/python2.6/json/encoder.py", line 367, in encode
chunks = list(self.iterencode(o))
File "/usr/lib/python2.6/json/encoder.py", line 309, in _iterencode
for chunk in self._iterencode_dict(o, markers):
File "/usr/lib/python2.6/json/encoder.py", line 268, in _iterencode_dict
raise TypeError("key {0!r} is not a string".format(key))
TypeError: key (1, 2) is not a string
You can't serialize that as json, json has a much less flexible idea about what counts as a dict key than python.
You could transform the mapping into a sequence of key, value pairs, something like this:
import json
def remap_keys(mapping):
return [{'key':k, 'value': v} for k, v in mapping.iteritems()]
...
json.dumps(remap_keys({(1, 2): 'foo'}))
>>> '[{"value": "foo", "key": [1, 2]}]'
from json import loads, dumps
from ast import literal_eval
x = {(0, 1): 'la-la la', (0, 2): 'extricate'}
# save: convert each tuple key to a string before saving as json object
s = dumps({str(k): v for k, v in x.items()})
# load in two stages:
# (i) load json object
obj = loads(s)
# (ii) convert loaded keys from string back to tuple
d = {literal_eval(k): v for k, v in obj.items()}
See https://stackoverflow.com/a/12337657/2455413.
JSON only supports strings as keys. You'll need to choose a way to represent those tuples as strings.
You could just use str((1,2)) as key because json only expects the keys as strings but if you use this you'll have to use a[str((1,2))] to get the value.
json can only accept strings as keys for dict,
what you can do, is to replace the tuple keys with string like so
with open("file", "w") as f:
k = dic.keys()
v = dic.values()
k1 = [str(i) for i in k]
json.dump(json.dumps(dict(zip(*[k1,v]))),f)
And than when you want to read it, you can change the keys back to tuples using
with open("file", r) as f:
data = json.load(f)
dic = json.loads(data)
k = dic.keys()
v = dic.values()
k1 = [eval(i) for i in k]
return dict(zip(*[k1,v]))
This solution:
Avoids the security risk of eval().
Is short.
Is copy-pastable as save and load functions.
Keeps the structure of tuple as the key, in case you are editing the JSON by hand.
Adds ugly \" to the tuple representation, which is worse than the other str()/eval() methods here.
Can only handle tuples as keys at the first level for nested dicts (as of this writing no other solution here can do better)
def json_dumps_tuple_keys(mapping):
string_keys = {json.dumps(k): v for k, v in mapping.items()}
return json.dumps(string_keys)
def json_loads_tuple_keys(string):
mapping = json.loads(string)
return {tuple(json.loads(k)): v for k, v in mapping.items()}
m = {(0,"a"): "first", (1, "b"): [9, 8, 7]}
print(m) # {(0, 'a'): 'first', (1, 'b'): [9, 8, 7]}
s = json_dumps_tuple_keys(m)
print(s) # {"[0, \"a\"]": "first", "[1, \"b\"]": [9, 8, 7]}
m2 = json_loads_tuple_keys(s)
print(m2) # {(0, 'a'): 'first', (1, 'b'): [9, 8, 7]}
print(m==m2) # True
Here is one way to do it. It will require the key to be json decoded after the main dictionary is decoded and the whole dictionary re-sequenced, but it is doable:
import json
def jsonEncodeTupleKeyDict(data):
ndict = dict()
# creates new dictionary with the original tuple converted to json string
for key,value in data.iteritems():
nkey = json.dumps(key)
ndict[nkey] = value
# now encode the new dictionary and return that
return json.dumps(ndict)
def main():
tdict = dict()
for i in range(10):
key = (i,"data",5*i)
tdict[key] = i*i
try:
print json.dumps(tdict)
except TypeError,e:
print "JSON Encode Failed!",e
print jsonEncodeTupleKeyDict(tdict)
if __name__ == '__main__':
main()
I make no claim to any efficiency of this method. I needed this for saving some joystick mapping data to a file. I wanted to use something that would create a semi-human readable format so it could be edited if needed.
You can actually not serialize tuples as key to json, but you can convert the tuple to a string and recover it, after you have deserialized the file.
with_tuple = {(0.1, 0.1): 3.14} ## this will work in python but is not serializable in json
{(0.1, 0.1): 3.14}
But you cannot serialize it with json. However, you can use
with_string = {str((0.1, 0.1))[1:-1]: 3.14} ## the expression [1,-1] removes the parenthesis surrounding the tuples in python.
{'0.1, 0.1': 3.14} # This is serializable
With a bit of cheating, you will recover the original tuple (after having deserialized the whole file) by treating each key (as str) separately
tuple(json.loads("["+'0.1, 0.1'+"]")) ## will recover the tuple from string
(0.1, 0.1)
It is a bit of overload to convert a string to a tuple using json.loads, but it will work. Encapsulate it and you are done.
Peace out and happy coding!
Nicolas
Here are two functions you could use to convert a dict_having_tuple_as_key into a json_array_having_key_and_value_as_keys and then de-convert it the way back
import json
def json_dumps_dict_having_tuple_as_key(dict_having_tuple_as_key):
if not isinstance(dict_having_tuple_as_key, dict):
raise Exception('Error using json_dumps_dict_having_tuple_as_key: The input variable is not a dictionary.')
list_of_dicts_having_key_and_value_as_keys = [{'key': k, 'value': v} for k, v in dict_having_tuple_as_key.items()]
json_array_having_key_and_value_as_keys = json.dumps(list_of_dicts_having_key_and_value_as_keys)
return json_array_having_key_and_value_as_keys
def json_loads_dictionary_split_into_key_and_value_as_keys_and_underwent_json_dumps(json_array_having_key_and_value_as_keys):
list_of_dicts_having_key_and_value_as_keys = json.loads(json_array_having_key_and_value_as_keys)
if not all(['key' in diz for diz in list_of_dicts_having_key_and_value_as_keys]) and all(['value' in diz for diz in list_of_dicts_having_key_and_value_as_keys]):
raise Exception('Error using json_loads_dictionary_split_into_key_and_value_as_keys_and_underwent_json_dumps: at least one dictionary in list_of_dicts_having_key_and_value_as_keys ismissing key "key" or key "value".')
dict_having_tuple_as_key = {}
for dict_having_key_and_value_as_keys in list_of_dicts_having_key_and_value_as_keys:
dict_having_tuple_as_key[ tuple(dict_having_key_and_value_as_keys['key']) ] = dict_having_key_and_value_as_keys['value']
return dict_having_tuple_as_key
usage example:
my_dict = {
('1', '1001', '2021-12-21', '1', '484'): {"name": "Carl", "surname": "Black", "score": 0},
('1', '1001', '2021-12-22', '1', '485'): {"name": "Joe", "id_number": 134, "percentage": 11}
}
my_json = json_dumps_dict_having_tuple_as_key(my_dict)
print(my_json)
[{'key': ['1', '1001', '2021-12-21', '1', '484'], 'value': {'name': 'Carl', 'surname': 'Black', 'score': 0}},
{'key': ['1', '1001', '2021-12-22', '1', '485'], 'value': {'name': 'Joe', 'id_number': 134, 'percentage': 11}}]
my_dict_reconverted = json_loads_dictionary_split_into_key_and_value_as_keys_and_underwent_json_dumps(my_json)
print(my_dict_reconverted)
{('1', '1001', '2021-12-21', '1', '484'): {'name': 'Carl', 'surname': 'Black', 'score': 0},
('1', '1001', '2021-12-22', '1', '485'): {'name': 'Joe', 'id_number': 134, 'percentage': 11}}
# proof of working 1
my_dict == my_dict_reconverted
True
# proof of working 2
my_dict == json_loads_dictionary_split_into_key_and_value_as_keys_and_underwent_json_dumps(
json_dumps_dict_having_tuple_as_key(my_dict)
)
True
(Using concepts expressed by #SingleNegationElimination to answer #Kvothe comment)
Here's a complete example to encode/decode nested dictionaries with tuple keys and values into/from json. tuple key will be a string in JSON.
values of types tuple or set will be converted to list
def JSdecoded(item:dict, dict_key=False):
if isinstance(item, list):
return [ JSdecoded(e) for e in item ]
elif isinstance(item, dict):
return { literal_eval(key) : value for key, value in item.items() }
return item
def JSencoded(item, dict_key=False):
if isinstance(item, tuple):
if dict_key:
return str(item)
else:
return list(item)
elif isinstance(item, list):
return [JSencoded(e) for e in item]
elif isinstance(item, dict):
return { JSencoded(key, True) : JSencoded(value) for key, value in item.items() }
elif isinstance(item, set):
return list(item)
return item
usage
import json
pydata = [
{ ('Apple','Green') : "Tree",
('Orange','Yellow'):"Orchard",
('John Doe', 1945) : "New York" }
]
jsstr= json.dumps(JSencoded(pydata), indent='\t')
print(jsstr)
#[
# {
# "('Apple', 'Green')": "Tree",
# "('Orange', 'Yellow')": "Orchard",
# "('John Doe', 1945)": "New York"
# }
#]
data = json.loads(jsstr) #string keys
newdata = JSdecoded(data) #tuple keys
print(newdata)
#[{('Apple', 'Green'): 'Tree', ('Orange', 'Yellow'): 'Orchard', ('John Doe', 1945): 'New York'}]
def stringify_keys(d):
if isinstance(d, dict):
return {str(k): stringify_keys(v) for k, v in d.items()}
if isinstance(d, (list, tuple)):
return type(d)(stringify_keys(v) for v in d)
return d
json.dumps(stringify_keys(mydict))

Categories

Resources