Avoid using if statements for checking keys in dict - python

I am trying to map some values from data to a template.I want to fill in the values (with some manipulations) in the template only if they are already present in it.My template has hundreds of keys and my goal is to avoid the if statement before each manipulation and assignment.
The point of the if statements is to defer evaluation of the manipulations I am performing as they may be expensive to perform. Any solutions should take this into account.
data = {
'a':1,
'b':2,
'c':3,
'd':4,
'e':5
}
template1 = {
'p':'Nan',
'q':'Nan',
'r':'Nan'
}
template2 = {
'p':'Nan',
's':'Nan',
't':'Nan'
}
def func(template,data):
if 'p' in template.keys():
template['p'] = data['a']
if 'q' in template.keys():
template['q'] = data['b'][:2] + 'some manipulation'
if 'r' in template.keys():
template['r'] = data['c']
if 's' in template.keys():
template['s'] = data['d'] + 'some mainpulation'
if 't' in template.keys():
template['t'] = data['e']
I know I am missing something basic, my actual code and requirements are pretty complex and I tried to simplify them and bring them down to this simple structure.
Thanks for your help in advance!

You could also store manipulations directly in your data dict using lambda functions, then check if any retrieved value from the data dict is callable() when using this dict to update the template. Assuming your can't modify the keys in the data dict, then this approach could still work with the template_dict mapping approach suggested by Jlove.
data = {
'p': 1,
'q': 2,
'r': 3,
's': 4,
't': 5,
'u': lambda x: x * 2
}
template1 = {
'p':'Nan',
'q':'Nan',
'r':'Nan',
'u': 2
}
def func(template, data):
for key in template:
if callable(data[key]):
template[key] = data[key](template[key])
else:
template[key] = data[key]
#driver
func(template1, data)
for k in template1.items():
print(k)
--- expanded solution based on comments ---
basically the same as the above, but shows how to use a mapping dict to direct how the data dict and an actions dict can be combined to modify the template dict. Also shows how to map keys to functions using a dict.
from collections import defaultdict
def qManipulation(x):
return x * 10
def sManipulation(x):
return x * 3
data = {
'a':1,
'b':2,
'c':3,
'd':4,
'e':5
}
actions = {
'q': qManipulation,
's': sManipulation,
'u': lambda x: x * 7
}
tempToDataMap = defaultdict(lambda: None, {
'p': 'a',
'q': 'b',
'r': 'c',
's': 'd',
't': 'e'
})
template1 = {
'p':'Nan',
'q':'Nan',
'r':'Nan',
'u': 2
}
def func(template, data):
for key, val in template.items():
dataKey = tempToDataMap[key]
# check if the template key corrosponds to a data dict key
if dataKey is not None:
# if key mapping from template to data is actually in data dict, use data value in template
if dataKey in data:
template[key] = data[dataKey]
# if the template key is registered to an action in action dict, run action
if key in actions:
template[key] = actions[key](data[dataKey])
# use this if you have a manipulation on a template field that is not populated by data.
# this isn't present in the example, but could be handy if the template ever has default values other that Nan
elif key in actions:
template[key] = actions[key](template[key])
func(template1, data)
for k in template1.items():
print(k)

If your manipulations can be expressed as a simple lambda, you could encapsulate the condition/assigment in a function to reduce the code clutter:
def func(template,data):
def apply(k,action):
if k in template: template[k] = action()
apply('p',lambda: data['a'])
apply('q',lambda: data['b'][:2] + 'some manipulation')
apply('r',lambda: data['c'])
apply('s',lambda: data['d'] + 'some mainpulation')
apply('t',lambda: data['e'])

This is probably not a great idea but you could subclass dict and override __setitem__.
class GuardDict(dict):
def __setitem__(self, key, callable_value):
if key in self:
super().__setitem__(key, callable_value())
# we need a method to transform back to a dict
def to_dict(self):
return dict(self)
data = {
'a': 1,
'b': '2',
'c': 3,
'd': '4',
'e': 5
}
template1 = {
'p':'Nan',
'q':'Nan',
'r':'Nan'
}
template2 = {
'p':'Nan',
's':'Nan',
't':'Nan'
}
def func(template,data):
# create a GuardDict from the dict
# this will utilize __setitem__ and only actually set keys
# that already exist in the original dict
template = GuardDict(template)
template['p'] = lambda: data['a']
template['q'] = lambda: data['b'] + 'some manipulation'
template['r'] = lambda: data['c']
template['s'] = lambda: data['d'] + 'some mainpulation'
template['t'] = lambda: data['e']
# set back to a dict
return template.to_dict()
template1 = func(template1, data)
template2 = func(template2, data)
print(template1)
print(template2)
I should probably note if there are other users of your code they will probably hate you for this.

a dynamically functional approach might relieve you from all the ifs and elses, but might complicate the overall program structure.
data = {
'a':1,
'b':2,
'c':3,
'd':4,
'e':5
}
template1 = {
'p': 'Nan',
'q': 'Nan',
'r': 'Nan'
}
template2 = {
'p': 'Nan',
's': 'Nan',
't': 'Nan'
}
# first, define your complex logic in functions, accounting for every possible template key
def p_logic(data, x):
return data[x]
def q_logic(data, x):
return data[x][:2] + 'some manipulation'
# Then build a dict of every possible template key, the associated value and reference to one of the
# functions defined above
logic = {
'p': {
'value': 'a',
'logic': p_logic
},
'q': {
'value': 'b',
'logic': q_logic
},
}
def func(template, data):
# for every key in a template, lookup that key in our logic dict
# grab the value from the data
# and apply the complex logic that has been defined for this template value
for item in template: # template.keys() is not necessary!
template[item] = logic[item]['logic'](data, logic[item]['value'])

The only thing I could think to do here would be to have some sort of dict and run your template through a for loop instead. Such as:
template_dict = {'p': 'a', 'q': 'b', 'r': 'c', 's': 'd', 't': 'e'}
def func(template, data):
for key, value in template_dict.items():
if key in template.keys():
template[key] = data[value]
Otherwise, I'm not sure how you might be able to avoid all those conditionals.

Related

Recursively creates dataclasses based in nested dictionary

I have a dataclass called Config that is created through the properties and values of a dictionary. Since this dictionary can have nested dictionaries, i would like to make nested dictionaries as Config objects. Here is an example:
## Dummy example of a config dict
data = {
'a' : 1,
'b' : [2,2,2],
'c': {
'c_1' : 3.1
}
}
final_config = create_config(data)
# Expected result
Config(a=1, b=[2,2,2], c=Config(c_1=3.1) )
Here is what i've came up, using dataclasses.make_dataclass:
def _Config(params_dict):
config = make_dataclass('Config', params_dict.keys())
return config(**params_dict)
def get_inner_dict(d):
for _, v in d.items():
if isinstance(v, dict):
return get_inner_dict(v)
else:
return _Config(**d)
Unfortunately, this doesn't work because the recursion will try to create a dataclass object when it finds a single value. I feel like i'm in the right way, but couldn't figure out what needs to change.
It looks like you (technically) don't need to use dataclasses or make_dataclass in this scenario.
You can implement a custom class with a __dict__ update approach as mentioned by #Stef. Check out the following example:
from __future__ import annotations
## Dummy example of a config dict
data = {
'a': 1,
'b': [2, 2, 2],
'c': {
'c_1': 3.1
},
'd': [
1,
'2',
{'k1': 'v1'}
]
}
_CONTAINER_TYPES = (dict, list)
class Config:
def __init__(self, **kwargs):
self.__dict__ = kwargs
#classmethod
def create(cls, data: dict | list) -> Config | list:
if isinstance(data, list):
return [cls.create(e) if isinstance(e, _CONTAINER_TYPES) else e
for e in data]
new_data = {
k: cls.create(v) if isinstance(v, _CONTAINER_TYPES) else v
for k, v in data.items()
}
return cls(**new_data)
def __repr__(self):
return f"Config({', '.join([f'{name}={val!r}' for name, val in self.__dict__.items()])})"
final_config = Config.create(data)
print(final_config)
# Prints:
# Config(a=1, b=[2, 2, 2], c=Config(c_1=3.1), d=[1, '2', Config(k1='v1')])

Generate pydantic model from a dict

Is there a straight-forward approach to generate a Pydantic model from a dictionary?
Here is a sample of the data I have.
{
'id': '424c015f-7170-4ac5-8f59-096b83fe5f5806082020',
'contacts': [{
'displayName': 'Norma Fisher',
'id': '544aa395-0e63-4f9a-8cd4-767b3040146d'
}],
'startTime': '2020-06-08T09:38:00+00:00'
}
Expecting a model similar to ...
class NewModel(BaseModel):
id: str
contacts: list
startTime: str
You can use MyModel.parse_obj(my_dict) to generate a model from a dictionary. According to the documentation –
this is very similar to the __init__ method of the model, except it takes a dict rather than keyword arguments.
You can also use its __init__ method:
your_mode = YourMode(**your_dict)
I use this method to generate models at run time using a dictionary definition. This approach allows you to define nested models too. The field type syntax borrows from the create_model method.
from pydantic import create_model
m = {
"a":(int,...),
"b":{
"c":(str,"hi"),
"d":{
"e":(bool,True),
"f":(float,0.5)
}
}
}
def dict_model(name:str,dict_def:dict):
fields = {}
for field_name,value in dict_def.items():
if isinstance(value,tuple):
fields[field_name]=value
elif isinstance(value,dict):
fields[field_name]=(dict_model(f'{name}_{field_name}',value),...)
else:
raise ValueError(f"Field {field_name}:{value} has invalid syntax")
return create_model(name,**fields)
model = dict_model("some_name",m)
There's no method for exactly that, but you can use create_model() to create a model if you know the field types.
Or there's datamodel-code-generator (separate package) which allows you to generate models from schema definitions.
If you have a sample json and want to generate a pydantic model for validation and use it, then you can try this website - https://jsontopydantic.com/
which can generate a pydantic model from a sample json
Whilst I like #data_wiz dictionary definition, Here is an alternative suggestion based on what my needs to take simple JSON responses on the fly which are normally CamelCase key elements and be able to process this into a pythonic styled class.
With the standard functions JSON converts to Dict easily, however!
I wanted to work on this in a pythonic style
I also wanted to be able to have some type overrides converting strings to pythonic types
I also wanted to indicated elements that are optional. This is where I start loving Pydantic.
The following code snippet can generate a model from an actual data Dict from a JSON API response, as keys are camelcase it will convert them to pythonic snake style but retain the CamelCase as Alias.
This pydantic aliasing enables easy consumption of a JSON converted to Dict without key conversion and also the direct export of JSON formatted output. NB observe the config of the dynamic model DynamicModel.__config__.allow_population_by_field_name = True this allow the creation of a dynamicModel from Alias or Pythonic field names.
This Code is not fully featured currently cannot handle Lists but it is working well for me for simple cases.
Example of use is in the docstring of the pydanticModelGenerator
from inflection import underscore
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, create_model
class ModelDef(BaseModel):
"""Assistance Class for Pydantic Dynamic Model Generation"""
field: str
field_alias: str
field_type: Any
class pydanticModelGenerator:
"""
Takes source_data:Dict ( a single instance example of something like a JSON node) and self generates a pythonic data model with Alias to original source field names. This makes it easy to popuate or export to other systems yet handle the data in a pythonic way.
Being a pydantic datamodel all the richness of pydantic data validation is available and these models can easily be used in FastAPI and or a ORM
It does not process full JSON data structures but takes simple JSON document with basic elements
Provide a model_name, an example of JSON data and a dict of type overrides
Example:
source_data = {'Name': '48 Rainbow Rd',
'GroupAddressStyle': 'ThreeLevel',
'LastModified': '2020-12-21T07:02:51.2400232Z',
'ProjectStart': '2020-12-03T07:36:03.324856Z',
'Comment': '',
'CompletionStatus': 'Editing',
'LastUsedPuid': '955',
'Guid': '0c85957b-c2ae-4985-9752-b300ab385b36'}
source_overrides = {'Guid':{'type':uuid.UUID},
'LastModified':{'type':datetime },
'ProjectStart':{'type':datetime },
}
source_optionals = {"Comment":True}
#create Model
model_Project=pydanticModelGenerator(
model_name="Project",
source_data=source_data,
overrides=source_overrides,
optionals=source_optionals).generate_model()
#create instance using DynamicModel
project_instance=model_Project(**project_info)
"""
def __init__(
self,
model_name: str = None,
source_data: str = None,
overrides: Dict = {},
optionals: Dict = {},
):
def field_type_generator(k, overrides, optionals):
pass
field_type = str if not overrides.get(k) else overrides[k]["type"]
return field_type if not optionals.get(k) else Optional[field_type]
self._model_name = model_name
self._json_data = source_data
self._model_def = [
ModelDef(
field=underscore(k),
field_alias=k,
field_type=field_type_generator(k, overrides, optionals),
)
for k in source_data.keys()
]
def generate_model(self):
"""
Creates a pydantic BaseModel
from the json and overrides provided at initialization
"""
fields = {
d.field: (d.field_type, Field(alias=d.field_alias)) for d in self._model_def
}
DynamicModel = create_model(self._model_name, **fields)
DynamicModel.__config__.allow_population_by_field_name = True
return DynamicModel
Here is a customized code for data model generation using python dicts.
Code mostly borrowed from #data_wiz
Helper Functions
from pydantic import create_model
# https://stackoverflow.com/questions/62267544/generate-pydantic-model-from-a-dict
from copy import deepcopy
def get_default_values(input_schema_copy):
"""Get the default values from the structured schema dictionary. Recursive Traversal of the Schema is performed here.
Args:
input_schema_copy (dict): The input structured dictionary schema. Preferred deepcopy of the input schema to avoid inplace changes for the same.
Returns:
default_values (dict): The default values of the input schema.
"""
for k, v in input_schema_copy.items():
if isinstance(v, dict):
input_schema_copy[k] = get_default_values(v)
else:
input_schema_copy[k] = v[1]
return input_schema_copy
def get_defaults(input_schema):
"""Wrapper around get_default_values to get the default values of the input schema using a deepcopy of the same to avoid arbitrary value changes.
Args:
input_schema (dict): The input structured dictionary schema.
Returns:
default_values (dict): The default values of the input schema.
"""
input_schema_copy = deepcopy(input_schema)
return get_default_values(input_schema_copy)
def are_any_defaults_empty(default_values):
"""Check if any of the default values are empty (Ellipsis - ...)?
Args:
default_values (dict): The default values of the input schema.
Returns:
Bool: True if any of the default values are empty (Ellipsis - ...), False otherwise.
"""
for _, v in default_values.items():
if isinstance(v, dict):
are_any_defaults_empty(v)
else:
if v is Ellipsis: # ... symbol
return True
return False
def correct_schema_structure(input_schema_copy):
for k, v in input_schema_copy.items():
if isinstance(v, dict):
input_schema_copy[k] = correct_schema_structure(v)
elif type(v) == type:
input_schema_copy[k] = (v,...)
elif not hasattr(v, '__iter__') or isinstance(v, str):
input_schema_copy[k] = (type(v),v)
return input_schema_copy
def dict_model(dict_def:dict, name :str = "Demo_Pydantic_Nested_Model"):
"""Helper function to create the Pydantic Model from the dictionary.
Args:
name (str): The Model Name that you wish to give to the Pydantic Model.
dict_def (dict): The Schema Definition using a Dictionary.
Raises:
ValueError: When the Schema Definition is not a Tuple/Dictionary.
Returns:
pydantic.Model: A Pydantic Model.
"""
fields = {}
for field_name,value in dict_def.items():
if isinstance(value,tuple):
fields[field_name]=value
elif isinstance(value,dict):
# assign defaults to nested structures here (if present)
default_value = get_defaults(value)
default_value = Ellipsis if are_any_defaults_empty(default_value) else default_value
fields[field_name]=(dict_model(value, f'{name}_{field_name}'),default_value)
else:
raise ValueError(f"Field {field_name}:{value} has invalid syntax")
print(fields) # helpful for debugging
return create_model(name,**fields)
Schema Correction
input_schema = {
"a":(int,...),
"b":{
"c":(str,"hi"),
"d":{
"e":(bool,True),
"f":(float,0.5)
},
},
"g":"hello",
"h" : 123,
"i" : str,
"k" : int
}
input_schema_corrected = correct_schema_structure(input_schema)
input_schema_corrected
Output :
{'a': (int, Ellipsis),
'b': {'c': (str, 'hi'), 'd': {'e': (bool, True), 'f': (float, 0.5)}},
'g': (str, 'hello'),
'h': (int, 123),
'i': (str, Ellipsis),
'k': (int, Ellipsis)}
Actual Model Creation
model = dict_model(dict_def= input_schema, name= "Demo_Pydantic_Nested_Model")
Checking the Model Schema
model.schema()
{'title': 'Demo_Pydantic_Nested_Model',
'type': 'object',
'properties': {'a': {'title': 'A', 'type': 'integer'},
'b': {'title': 'B',
'default': {'c': 'hi', 'd': {'e': True, 'f': 0.5}},
'allOf': [{'$ref': '#/definitions/Demo_Pydantic_Nested_Model_b'}]},
'g': {'title': 'G', 'default': 'hello', 'type': 'string'},
'h': {'title': 'H', 'default': 123, 'type': 'integer'},
'i': {'title': 'I', 'type': 'string'},
'k': {'title': 'K', 'type': 'integer'}},
'required': ['a', 'i', 'k'],
'definitions': {'Demo_Pydantic_Nested_Model_b_d': {'title': 'Demo_Pydantic_Nested_Model_b_d',
'type': 'object',
'properties': {'e': {'title': 'E', 'default': True, 'type': 'boolean'},
'f': {'title': 'F', 'default': 0.5, 'type': 'number'}}},
'Demo_Pydantic_Nested_Model_b': {'title': 'Demo_Pydantic_Nested_Model_b',
'type': 'object',
'properties': {'c': {'title': 'C', 'default': 'hi', 'type': 'string'},
'd': {'title': 'D',
'default': {'e': True, 'f': 0.5},
'allOf': [{'$ref': '#/definitions/Demo_Pydantic_Nested_Model_b_d'}]}}}}}
Validation on Test Data
test_dict = { "a" : 0, "i" : "hello", "k" : 123}
model(**test_dict).dict()
Advantages over original answer :
Extended Default Values (for nested structures)
Easier Type Declarations

Flatten a nested dict structure into a dataset

For some post-processing, I need to flatten a structure like this
{'foo': {
'cat': {'name': 'Hodor', 'age': 7},
'dog': {'name': 'Mordor', 'age': 5}},
'bar': { 'rat': {'name': 'Izidor', 'age': 3}}
}
into this dataset:
[{'foobar': 'foo', 'animal': 'dog', 'name': 'Mordor', 'age': 5},
{'foobar': 'foo', 'animal': 'cat', 'name': 'Hodor', 'age': 7},
{'foobar': 'bar', 'animal': 'rat', 'name': 'Izidor', 'age': 3}]
So I wrote this function:
def flatten(data, primary_keys):
out = []
keys = copy.copy(primary_keys)
keys.reverse()
def visit(node, primary_values, prim):
if len(prim):
p = prim.pop()
for key, child in node.iteritems():
primary_values[p] = key
visit(child, primary_values, copy.copy(prim))
else:
new = copy.copy(node)
new.update(primary_values)
out.append(new)
visit(data, { }, keys)
return out
out = flatten(a, ['foo', 'bar'])
I was not really satisfied because I have to use copy.copy to protect my inputs. Obviously, when using flatten one does not want the inputs be altered.
Then I thought about one alternative that uses more global variables (at least global to flatten) and uses an index instead of directly passing primary_keys to visit. However, this does not really help me to get rid of the ugly initial copy:
keys = copy.copy(primary_keys)
keys.reverse()
So here is my final version:
def flatten(data, keys):
data = copy.copy(data)
keys = copy.copy(keys)
keys.reverse()
out = []
values = {}
def visit(node, id):
if id:
id -= 1
for key, child in node.iteritems():
values[keys[id]] = key
visit(child, id)
else:
node.update(values)
out.append(node)
visit(data, len(keys))
return out
Is there a better implementation (that can avoid the use of copy.copy)?
Edit: modified to account for variable dictionary depth.
By using the merge function from my previous answer (below), you can avoid calling update which modifies the caller. There is then no need to copy the dictionary first.
def flatten(data, keys):
out = []
values = {}
def visit(node, id):
if id:
id -= 1
for key, child in node.items():
values[keys[id]] = key
visit(child, id)
else:
out.append(merge(node, values)) # use merge instead of update
visit(data, len(keys))
return out
One thing I don't understand is why you need to protect the keys input. I don't see them being modified anywhere.
Previous answer
How about list comprehension?
def merge(d1, d2):
return dict(list(d1.items()) + list(d2.items()))
[[merge({'foobar': key, 'animal': sub_key}, sub_sub_dict)
for sub_key, sub_sub_dict in sub_dict.items()]
for key, sub_dict in a.items()]
The tricky part was merging the dictionaries without using update (which returns None).

Using recursion to reverse a dictionary around a value in python

I have a data set which follows the structure of the following example:
exampleset = {
'body' : {
'abdomen' : [{
'arms' : {
'value' : 2,
}
},{
'legs': {
'value' : 2,
}
}],
'hands' : {
'fingers' : {
'value' : 5,
}
},
}
}
I am trying to reverse this so I get something like:
{'value': {'value1': {5: {'fingers': {'hands': {'body': {}}}}},
'value2': {2: {'legs': {'abdomen': {'body': {}}}}},
'value3': {2: {'arms': {'abdomen': {'body': {}}}}}},
}
(I hope I got the bracket matching right, but you get the idea.)
I am using a couple of recursion functions to do this, like so:
def recurse_find(data, values, count):
global conf
for key in data:
for v in conf['value_names']:
if key == v:
values[v+str(count)] = {}
values[v+str(count)][data[key]] = {}
count += 1
# originally just using this line:
# values[data[key]] = {}
if type(data[key]) is list:
for i in data[key]:
if type(i) is dict:
values = recurse_find(i, values, count)
values = add_new_level(values, key)
elif type(data[key]) is dict:
values = recurse_find(data[key], values, count)
values = add_new_level(values, key)
return values
def add_new_level(data, new_key):
for key in data:
if data[key] == {}:
data[key][new_key] = {}
else:
data[key] = add_new_level(data[key], new_key)
return data
conf = { "value_names": ["value"] }
for value in conf['value_names']:
values[value] = recurse_find(exampleset, {}, 1)
print(values)
At the moment I only get one value returned correctly, obviously I would like them all. Originally I didn't label the values (value1, value2 etc), but when doing this example set I realised that of course if the values are the same I'll only get one! If I remove the value name keys it finds all the values (unless duplicate) but still doesn't return the correct levels as it includes some of the others while it loops round. I don't care about the order of the values, just that they are labelled differently so I don't miss out any.
Current result:
{'value': {'value1': {5: {'fingers': {'hands': {'body': {}}}}}}}
I think that the solution is the inclusion of a pretty simple step, but I can't see it at the moment and I've already spent too long looking at this.
Any help appreciated.
EDIT:
I've gotten a little further by changing my recursive function to make count a global variable and having count=1 outside the function which has sorted out the getting all the values problem.
I have narrowed down the addition of extra keys to the add_new_level function, but haven't yet figured out how to change it.
Output:
{'value': {'value1': {2: {'arms': {'abdomen': {'legs': {'abdomen': {'fingers': {'hands': {'body': {}}}}}}}}},
'value2': {2: {'legs': {'abdomen': {'fingers': {'hands': {'body': {}}}}}}},
'value3': {5: {'fingers': {'hands': {'body': {}}}}}}}
I have adjusted your output type slightly to make the dictionary containing 'value1' 'value2' etc... to an array. I believe this is better because the order of these will be lost anyway unless an OrderedDict (from collections package) is used and in any case an array will translate quite easily from index 0,1,2,3.. to val1, val2, val3, etc...
res = {'value': []}
def revnest(inp, keys=[]):
res2 = res['value']
if type(inp) == list:
inp = {i:j[i] for j in inp for i in j}
for x in inp:
if x == 'value':
res2.append({inp[x]:{}})
res2 = res2[-1][inp[x]]
for y in keys[::-1]:
res2[y] = {}
res2 = res2[y]
else:
revnest(inp[x], keys+[x])
revnest(exampleset)
print res
which given your exampleset, prints:
{'value': [{2: {'legs': {'abdomen': {'body': {}}}}}, {2: {'arms': {'abdomen': {'body': {}}}}}, {5: {'fingers': {'hands': {'body': {}}}}}]}

How do make serialize / deserialize this entity?

The code below defines a dictionary used to transform field values. Data is read, some of the values are transformed based on this dictionary, and written to a table. It works as-is. The problem, I now want to move this configuration outside the .py file into a JSON configuration file.
lookups = {
11: {
"ST1": ["ABC"],
"UNK01": ["125", "ACD"],
"A": ["52"],
"B": ["91"],
"C": ["92"],
"D": ["95"]
},
10: {
"XYZ01": ["91"],
"XYZ02": ["83"],
"XYZ03": ["27"]
}
}
According to jsonlint.com, in order for the above value being assigned to lookups to be valid JSON, I must quote the 11 and 10 keys. Doing so breaks my Python code and displays TypeError: list indices must be integers, not str.
How do I create valid JSON and minimize changes to my code?
If you want to dump it to a json file:
import json
with open("config.json","w") as f:
json.dump(lookups, f) # dump dict to file
with open("config.json") as f:
s = json.load(f) # load dict from file
print(s)
{'11': {'ST1': ['ABC'], 'A': ['52'], 'D': ['95'], 'UNK01': ['125', 'ACD'], 'B': ['91'], 'C': ['92']}, '10': {'XYZ01': ['91'], 'XYZ03': ['27'], 'XYZ02': ['83']}}
If you need keys as ints you can loop and cast as ints or use pickle:
import pickle
with open("in.pkl","wb") as f:
pickle.dump(lookups, f)
with open("in.pkl","rb") as f:
s = pickle.load(f)
print(s)
{10: {'XYZ03': ['27'], 'XYZ01': ['91'], 'XYZ02': ['83']}, 11: {'UNK01': ['125', 'ACD'], 'B': ['91'], 'D': ['95'], 'ST1': ['ABC'], 'C': ['92'], 'A': ['52']}}
If not just use as is.
If you know what type of data your keys are, a simple int on the keys would suffice:
dictionary_from_json = json.loads(dumped)
newdict = {}
for key, val in dictionary_from_json:
newdict[int(key)] = val
You can extend json.decoder and convert all keys to int when it's possible.
import json
class Json(json.JSONDecoder):
def decode(self,json_string):
default_obj = super(Json,self).decode(json_string)
new_obj = self._rec_serial(default_obj)
return new_obj
def _rec_serial(self,default):
new_dict = {}
for key,value in default.items():
is_dict = isinstance(value,dict)
value = self._rec_serial(value) if is_dict else value
try:
new_dict[int(key)] = value
except ValueError:
new_dict[key] = value
return new_dict
json2= Json()
d = json2.decode(dumped)

Categories

Resources