Adding entries to Python dict in variable locations - python

I have a json file that looks something like this (I've left a lot out to keep it shorter so ignore missing brackets in it):
{
"id": "79cb20b0-02be-42c7-9b45-96407c888dc2",
"tenantId": "00000000-0000-0000-0000-000000000000",
"name": "2-stufiges Stirnradgetriebe",
"description": null,
"visibility": "None",
"method": "IDM_CALCULATE_GEAR_COUPLED",
"created": "2018-10-16T10:25:20.874Z",
"createdBy": "00000000-0000-0000-0000-000000000000",
"lastModified": "2018-10-16T10:25:28.226Z",
"lastModifiedBy": "00000000-0000-0000-0000-000000000000",
"client": "STRING_BEARINX_ONLINE",
"project": {
"id": "10c37dcc-0e4e-4c4d-a6d6-12cf65cceaf9",
"name": "proj 2",
"isBookmarked": false
},
"rootObject": {
"id": "6ff0010c-00fe-485b-b695-4ddd6aca4dcd",
"type": "IDO_GEAR",
"children": [
{
"id": "1dd94d1a-e52d-40b3-a82b-6db02a8fbbab",
"type": "IDO_SYSTEM_LOADCASE",
"children": [],
"childList": "SYSTEMLOADCASE",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "1dd94d1a-e52d-40b3-a82b-6db02a8fbbab"
},
{
"name": "IDCO_DESIGNATION",
"value": "Lastfall 1"
},
{
"name": "IDSLC_TIME_PORTION",
"value": 100
},
{
"name": "IDSLC_DISTANCE_PORTION",
"value": 100
},
{
"name": "IDSLC_OPERATING_TIME_IN_HOURS",
"value": 1
},
{
"name": "IDSLC_OPERATING_TIME_IN_SECONDS",
"value": 3600
},
{
"name": "IDSLC_OPERATING_REVOLUTIONS",
"value": 1
},
{
"name": "IDSLC_OPERATING_DISTANCE",
"value": 1
},
{
"name": "IDSLC_ACCELERATION",
"value": 9.81
},
{
"name": "IDSLC_EPSILON_X",
"value": 0
},
{
"name": "IDSLC_EPSILON_Y",
"value": 0
},
{
"name": "IDSLC_EPSILON_Z",
"value": 0
},
{
"name": "IDSLC_CALCULATION_WITH_OWN_WEIGHT",
"value": "CO_CALCULATION_WITHOUT_OWN_WEIGHT"
},
{
"name": "IDSLC_CALCULATION_WITH_TEMPERATURE",
"value": "CO_CALCULATION_WITH_TEMPERATURE"
},
{
"name": "IDSLC_FLAG_FOR_LOADCASE_CALCULATION",
"value": "LB_CALCULATE_LOADCASE"
},
{
"name": "IDSLC_STATUS_OF_LOADCASE_CALCULATION",
"value": false
}
],
"position": 1,
"order": 1,
"support_vector": {
"x": 0,
"y": 0,
"z": 0
},
"u_axis_vector": {
"x": 1,
"y": 0,
"z": 0
},
"w_axis_vector": {
"x": 0,
"y": 0,
"z": 1
},
"role": "_none_"
},
{
"id": "ab7fbf37-17bb-4e60-a543-634571a0fd73",
"type": "IDO_SHAFT_SYSTEM",
"children": [
{
"id": "7f034e5c-24df-4145-bab8-601f49b43b50",
"type": "IDO_RADIAL_ROLLER_BEARING",
"children": [
{
"id": "0b3e695b-6028-43af-874d-4826ab60dd3f",
"type": "IDO_RADIAL_BEARING_INNER_RING",
"children": [
{
"id": "330aa09d-60fb-40d7-a190-64264b3d44b7",
"type": "IDO_LOADCONTAINER",
"children": [
{
"id": "03036040-fc1a-4e52-8a69-d658e18a8d4a",
"type": "IDO_DISPLACEMENT",
"children": [],
"childList": "DISPLACEMENT",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "03036040-fc1a-4e52-8a69-d658e18a8d4a"
},
{
"name": "IDCO_DESIGNATION",
"value": "Displacement 1"
}
]
I want to add entries to it but the problem is the location I want to add it isn't uniform. The id key will change, for example sometimes I may want to add the entries to "id": "ab7fbf37-17bb-4e60-a543-634571a0fd73" and another I might want to add the entries to "id": "0b3e695b-6028-43af-874d-4826ab60dd3f".
The code I have runs through another file in a loop and every time it finds a id and a property name it stores the id, name, and value.
The code I'm currently using is:
import os
import json
import shutil
import re
import fileinput
#Finds and lists the folders that have been provided
d='.'
folders = list(filter (lambda x: os.path.isdir(os.path.join(d, x)), os.listdir(d)))
print("Folders found: ")
print(folders)
print("\n")
def processModelFolder(inFolder):
#Creating the file names
fileName = os.path.join(d, inFolder, inFolder + ".mdl")
fileNameTwo = os.path.join(d, inFolder, inFolder + ".vg2.json")
fileNameThree = os.path.join(d, inFolder, inFolder + "APPENDED.vg2.json")
#copying the json file so the new copy can be appended
shutil.copyfile(fileNameTwo, fileNameThree)
#assigning IDs and properties to search for in the mdl file
IDs = ["7f034e5c-24df-4145-bab8-601f49b43b50"]
Properties = ["IDSU_FX[0]","IDSU_FY[0]","IDSU_FZ[0]"]
#Basic check to see if IDs and Properties are valid
for i in IDs:
if len(i) != 36:
print("ID may not have been valid and might not return the results you expect, check to ensure the characters are correct: ")
print(i)
print("\n")
if len(IDs) == 0:
print("No IDs were given!")
elif len(Properties) == 0:
print("No Properties were given!")
#Reads code untill an ID is found
else:
with open(fileName , "r") as in_file:
IDCO = None
for n, line in enumerate(in_file, 1):
if line.startswith('IDCO_IDENTIFICATION'):
#Checks if the second part of each line is a ID tag in IDs
if line.split('"')[1] in IDs:
#If ID found it is stored as IDCO
IDCO = line.split('"')[1]
else:
if IDCO:
pass
IDCO = None
#Checks if the first part of each line is a Prop in Propterties
elif IDCO and line.split(' ')[0] in Properties:
print('Found! ID:{} Prop:{} Value: {}'.format(IDCO, line.split('=')[0][:-1], line.split('=')[1][:-1]))
print("\n")
#Stores the property name and value
name = str(line.split(' ')[0])
value = str(line.split(' ')[2])
print(name)
print(value)
#json file editing
with open(fileNameThree , "r+") as json_data:
python_obj = json.load(json_data)
new_element = [{"name": name, "value":value}]
python_obj['"id": "485f5bf4-fb97-415b-8b42-b46e9be080da"']
#foreach in new_elements:
#data['rootObject']['children'][0]['properties'].append(each)
print('Processed {} lines in file {}'.format(n , fileName))
for modelFolder in folders:
processModelFolder(modelFolder)
Is there any obvious way to dynamically change where it places the new entries? I'm having a hard time because the indent level it may be changes.
**Updated code:
import os
import json
import shutil
import re
import fileinput
#Finds and lists the folders that have been provided
d='.'
folders = list(filter (lambda x: os.path.isdir(os.path.join(d, x)), os.listdir(d)))
print("Folders found: ")
print(folders)
print("\n")
def processModelFolder(inFolder):
#Creating the file names
fileName = os.path.join(d, inFolder, inFolder + ".mdl")
fileNameTwo = os.path.join(d, inFolder, inFolder + ".vg2.json")
fileNameThree = os.path.join(d, inFolder, inFolder + "APPENDED.vg2.json")
#copying the json file so the new copy can be appended
shutil.copyfile(fileNameTwo, fileNameThree)
#assigning IDs and properties to search for in the mdl file
IDs = ["7f034e5c-24df-4145-bab8-601f49b43b50"]
Properties = ["IDSU_FX[0]","IDSU_FY[0]","IDSU_FZ[0]"]
#Basic check to see if IDs and Properties are valid
for i in IDs:
if len(i) != 36:
print("ID may not have been valid and might not return the results you expect, check to ensure the characters are correct: ")
print(i)
print("\n")
if len(IDs) == 0:
print("No IDs were given!")
elif len(Properties) == 0:
print("No Properties were given!")
#Reads code untill an ID is found
else:
with open(fileName , "r") as in_file:
IDCO = None
for n, line in enumerate(in_file, 1):
if line.startswith('IDCO_IDENTIFICATION'):
#Checks if the second part of each line is a ID tag in IDs
if line.split('"')[1] in IDs:
#If ID found it is stored as IDCO
IDCO = line.split('"')[1]
else:
if IDCO:
pass
IDCO = None
#Checks if the first part of each line is a Prop in Propterties
elif IDCO and line.split(' ')[0] in Properties:
print('Found! ID:{} Prop:{} Value: {}'.format(IDCO, line.split('=')[0][:-1], line.split('=')[1][:-1]))
print("\n")
#Stores the property name and value
name = str(line.split(' ')[0])
value = str(line.split(' ')[2])
key = os.path.join('"id": "'+IDCO+'"')
print(key)
print(name)
print(value)
#json file editing
with open(fileNameThree , "r+") as json_data:
python_obj = json.load(json_data)
new_element = [{"name": name, "value":value}]
print("NEW ELEMENT:")
print(new_element)
for each in new_element:
children = {x['id']: x for x in python_obj['rootObject']['children']}
children[IDs]['properties'].append(each)
python_obj['rootObject']['children'] = [x for _, x in children.items()]
with open(fileNameThree , "w") as json_data:
json.dump(python_obj, json_data, indent = 3)
print('Processed {} lines in file {}'.format(n , fileName))
for modelFolder in folders:
processModelFolder(modelFolder)

You can convert the 'children' list into an dict.
children = {x['id']: x for x in data['rootObject']['children']}
Or if order matters to you do:
od = OrderedDict()
for child in children:
od[child['id']] = child
Then do the insertion based on id as
children[<<id here>>]['properties'].append(each)
Then convert dict back to list
data['rootObject']['children'] = [x for _, x in children.items()]

Related

How to convert nested json to csv with multiple different names?

I've been trying to convert a nested json file to csv. Here is a small example of the json file.
json_data =
{"labels":
{
"longfilename01:png": {
"events": {
"-N8V6uUR__vvB0qv1lPb": {
"t": "2022-08-02T19:54:23.608Z",
"user": "bmEhwNCZT9Wiftgvsopb7vBjO9o1"
}
},
"questions": {
"would-you": {
"-N8V6uUR__vvB0qv1lPb": {
"answer": "no",
"format": 1
}
}
}
},
"longfilename02:png": {
"events": {
"-N8ILnaH-1ylwp2LGvtP": {
"t": "2022-07-31T08:24:23.698Z",
"user": "Qf7C5cXQkXfQanxKPR0rsKW4QzE2"
}
},
"questions": {
"would-you": {
"-N8ILnaH-1ylwp2LGvtP": {
"answer": "yes",
"format": 1
}
}
}
}
I've tried multiple ways to get this output:
Labels
Event
User
Time
Answer
Long filename 01
-N8V6uUR__vvB0qv1lPb
bmEhwNCZT9Wiftgvsopb7vBjO9o1
2022-08-02T19:54:23.608Z
no
Long filename 02
-N8ILnaH-1ylwp2LGvtP
bmEhwNCZT9Wiftgvsopb7vBjO9o1
2022-07-31T08:24:23.698Z
yes
If I normalise with:
f= open('after_labels.json')
data = json.load(f)
df = pd.json_normalize(data)
Or try to flatten the file with multiple functions such as:
def flatten_json(json):
def process_value(keys, value, flattened):
if isinstance(value, dict):
for key in value.keys():
process_value(keys + [key], value[key], flattened)
elif isinstance(value, list):
for idx, v in enumerate(value):
process_value(keys + [str(idx)], v, flattened)
else:
flattened['__'.join(keys)] = value
flattened = {}
for key in json.keys():
process_value([key], json[key], flattened)
return flattened
df = flatten_json(data)
or
from copy import deepcopy
import pandas
def cross_join(left, right):
new_rows = [] if right else left
for left_row in left:
for right_row in right:
temp_row = deepcopy(left_row)
for key, value in right_row.items():
temp_row[key] = value
new_rows.append(deepcopy(temp_row))
return new_rows
def flatten_list(data):
for elem in data:
if isinstance(elem, list):
yield from flatten_list(elem)
else:
yield elem
def json_to_dataframe(data_in):
def flatten_json(data, prev_heading=''):
if isinstance(data, dict):
rows = [{}]
for key, value in data.items():
rows = cross_join(rows, flatten_json(value, prev_heading + '.' + key))
elif isinstance(data, list):
rows = []
for item in data:
[rows.append(elem) for elem in flatten_list(flatten_json(item, prev_heading))]
else:
rows = [{prev_heading[1:]: data}]
return rows
return pandas.DataFrame(flatten_json(data_in))
df = json_to_dataframe(data)
print(df)
It gives me 292 columns and I suspect this is because of the long unique filenames.
I can't change the json file before processing, because that seems like the simple solution to do "filename": "longfilename01:png" as they would then all be consistent and I wouldn't have this problem.
I would be grateful for any other clever ideas on how to solve this.
Try:
json_data = {
"labels": {
"longfilename01:png": {
"events": {
"-N8V6uUR__vvB0qv1lPb": {
"t": "2022-08-02T19:54:23.608Z",
"user": "bmEhwNCZT9Wiftgvsopb7vBjO9o1",
}
},
"questions": {
"would-you": {
"-N8V6uUR__vvB0qv1lPb": {"answer": "no", "format": 1}
}
},
},
"longfilename02:png": {
"events": {
"-N8ILnaH-1ylwp2LGvtP": {
"t": "2022-07-31T08:24:23.698Z",
"user": "Qf7C5cXQkXfQanxKPR0rsKW4QzE2",
}
},
"questions": {
"would-you": {
"-N8ILnaH-1ylwp2LGvtP": {"answer": "yes", "format": 1}
}
},
},
}
}
df = pd.DataFrame(
[
{
"Labels": k,
"Event": list(v["events"])[0],
"User": list(v["events"].values())[0]["user"],
"Time": list(v["events"].values())[0]["t"],
"Answer": list(list(v["questions"].values())[0].values())[0][
"answer"
],
}
for k, v in json_data["labels"].items()
]
)
print(df)
Prints:
Labels Event User Time Answer
0 longfilename01:png -N8V6uUR__vvB0qv1lPb bmEhwNCZT9Wiftgvsopb7vBjO9o1 2022-08-02T19:54:23.608Z no
1 longfilename02:png -N8ILnaH-1ylwp2LGvtP Qf7C5cXQkXfQanxKPR0rsKW4QzE2 2022-07-31T08:24:23.698Z yes

create yaml with AWS CloudFormation references

I need a python code that would create the yaml code below.
Tags:
- Key: key1
Value: !Ref 'AWS::StackName'
- Key: Key2
Value: !Ref 'AWS::StackId'
Here is what I have that doesn't do the trick.
def generate_resource(ami, source_data):
resource = {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": ami["ImageId"],
"InstanceType": ami["InstanceType"],
"PrivateIpAddress": ami["PrivateIpAddress"],
"KeyName": ami["KeyName"]
"SubnetId": { "Ref": "SubnetId" },
"SecurityGroupIds": { "Ref": "SecurityGroupId" },
"Tags": [
{ "Key": "key1", "Value": "{!Ref 'AWS::StackName'}"},
{ "Key": "key2", "Value": "{!Ref 'AWS::StackId'}"}
]
}
}
The yaml output from this code is not properly formatted so it simply copies {!Ref 'AWS::StackName'}, as the value.
import os, sys
import lib.aws as aws, lib.cft as cft, lib.inventory as inventory
BUCKET_NAME = 'testbucket'
def generate_cft(commit_hash, file_dict, dry_run):
return (
"# Autogenerated CFT for commit hash " + commit_hash + "\n" +
cft.generate(inventory.read(file_dict["path"]))
)
def upload_cft(commit_hash, file_dict, cft_text):
target_key = commit_hash + "/" + file_dict["name"].split("_")[0] + ".yaml"
aws.upload(BUCKET_NAME, target_key, cft_text)
def show_cft(file_dict, cft_text):
print(file_dict["path"] + " generates the following cft:")
print("")
print(cft_text)
print("")
def generate_and_upload(commit_hash, file_dict, dry_run):
cft_text = generate_cft(commit_hash, file_dict, dry_run)
aws.validate_cft(cft_text)
if dry_run:
show_cft(file_dict, cft_text)
else:
upload_cft(commit_hash, file_dict, cft_text)
def generate_and_upload_all(commit_hash, dry_run):
for file_dict in inventory.list():
print("generating cft for " + file_dict["path"])
generate_and_upload(commit_hash, file_dict, dry_run)
if __name__ == "__main__":
if not os.getcwd().endswith("ci"):
print("Please run this script from the ci directory")
exit()
commit_hash = sys.argv[1] if len(sys.argv) >= 2 else "test"
generate_and_upload_all(commit_hash, False)
YES!!!! I played around with the code and figured it out. It was a syntax error during the CFT creation. now when I create a stack with the CFT generated, the tag value is replaced by the actual 'StackId'.. Thanks everyone for your help and guidance.
def generate_resource(ami, source_data):
resource = {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": ami["ImageId"],
"InstanceType": ami["InstanceType"],
"PrivateIpAddress": ami["PrivateIpAddress"],
"KeyName": ami["KeyName"],
"SubnetId": { "Ref": "SubnetId" },
"SecurityGroupIds": { "Ref": "SecurityGroupId" },
"Tags": [
{ "Key": "Name", "Value": ami["Name"] },
{ "Key": "BootUpDependsOn", "Value": ami["BootUpDependsOn"]},
{ "Key": "WaitTimeAfterBootUp", "Value": ami["WaitTimeAfterBootUp"]},
{ "Key": "Key1", "Value": { "Ref": "AWS::StackName" }},
{ "Key": "Key2", "Value": { "Ref": "AWS::StackId" }}
]
}
}

Read JSON index in Python

I want to read a json file with python like this:
{
"id": "27147e64-9ef5-42d8-b32e-b46b19071ee3b84e0e07-669e-4a10-8124-8e0d71a08e7e",
"image": "img0171.png",
"width": 640,
"height": 480,
"tags": [
{
"name": "becks_long_neck",
"parent": null,
"id": "b2d59c98-0bdc-4d13-ad1b-9d4ab5bc1fb3",
"color": "#e62921",
"type": "bounding_box",
"pos": {
"x": 387,
"y": 310.06667073567706,
"w": 62.666666666666686,
"h": 38.219034830729186
}
},
{
"name": "becks_long_neck",
"parent": null,
"id": "75635f60-e6b9-4408-89fb-ed435355dac6",
"color": "#e62921",
"type": "bounding_box",
"pos": {
"x": 358.5,
"y": 354.06667073567706,
"w": 40.833333333333314,
"h": 31.666666666666686
}
}
]
}
When I want to access to the second name I try something like this:
for dictionary in datastore:
filename = dictionary['image']
tag = dictionary['tags'][0]['name']
if(dictionary['tags'][1]['name']):
tag2 = dictionary['tags'][1]['name']
print(tag)
x = dictionary['tags'][0]['pos']['x']
print(x)
y = dictionary['tags'][0]['pos']['y']
print(y)
w = dictionary['tags'][0]['pos']['w']
print(w)
h = dictionary['tags'][0]['pos']['h']
print(h)
but show me this error:
Traceback (most recent call last):
File "json_to_txt.py", line 65, in <module>
if(dictionary['tags'][1]['name']):
IndexError: list index out of range
How can I access to the second 'name' object?
You don't need to define explicitly each individual variable such as tag, tag2 .. etc . Rather leave this operation to looping, e.g. make it dynamically like in the below case by changing the current order of looping index structure from dictionary[datastore] to datastore[dictionary] :
import json
s = '{"id": "27147e64-9ef5-42d8-b32e-b46b19071ee3b84e0e07-669e-4a10-8124-8e0d71a08e7e","image": "img0171.png","width": 640,"height": 480,"tags": [{"name": "becks_long_neck","parent": null,"id": "b2d59c98-0bdc-4d13-ad1b-9d4ab5bc1fb3","color": "#e62921","type": "bounding_box","pos": {"x": 387,"y": 310.06667073567706,"w": 62.666666666666686,"h": 38.219034830729186}},{"name": "becks_long_neck","parent": null,"id": "75635f60-e6b9-4408-89fb-ed435355dac6","color": "#e62921","type": "bounding_box","pos": {"x": 358.5,"y": 354.06667073567706,"w": 40.833333333333314,"h": 31.666666666666686}}]}'
datastore = json.loads(s)
i=0
for dictionary in datastore:
if dictionary == 'image':
filename = datastore[dictionary]
if dictionary == 'tags':
tag = datastore[dictionary]
for dictionary in tag:
print("tag_name",i,tag[i]['name'])
i+=1
>>>
tag_name 0 becks_long_neck
tag_name 1 becks_long_neck

Identify root folder in a file system in Python

I have a recursive method that traverses a file system structure and creates a dictionary from it.
This is the code:
def path_to_dict(path):
d = {'name': os.path.basename(path)}
if os.path.isdir(path):
d['type'] = "directory"
d['path'] = os.path.relpath(path).strip('..\\').replace('\\','/')
d['children'] = [path_to_dict(os.path.join(path, x)) for x in os.listdir\
(path)]
else:
d['type'] = "file"
d['path'] = os.path.relpath(path).strip('..\\').replace('\\','/')
with open(path, 'r', encoding="utf-8", errors='ignore') as myfile:
content = myfile.read().splitlines()
d['content'] = content
At the moment, it checks if it is a folder then puts the keys name, type, path and children where children is an array which can contain further folders or files. If it is a file it has the keys name, type, path and content.
After converting it to JSON, the final structure is like this.
{
"name": "nw",
"type": "directory",
"path": "Parsing/nw",
"children": [{
"name": "New folder",
"type": "directory",
"path": "Parsing/nw/New folder",
"children": [{
"name": "abc",
"type": "directory",
"path": "Parsing/nw/New folder/abc",
"children": [{
"name": "text2.txt",
"type": "file",
"path": "Parsing/nw/New folder/abc/text2.txt",
"content": ["abc", "def", "dfg"]
}]
}, {
"name": "text2.txt",
"type": "file",
"path": "Parsing/nw/New folder/text2.txt",
"content": ["abc", "def", "dfg"]
}]
}, {
"name": "text1.txt",
"type": "file",
"path": "Parsing/nw/text1.txt",
"content": ["aaa "]
}, {
"name": "text2.txt",
"type": "file",
"path": "Parsing/nw/text2.txt",
"content": []
}]
}
Now I want the script to always set the type in only the root folder to the value root. How can I do this?
I think you want something similar than the following implementation. The directories and files in root folder will contain the "type": "root" and the child elements won't contain this key-value pair.
def path_to_dict(path, child=False):
d = {'name': os.path.basename(path)}
if os.path.isdir(path):
if not child:
d['type'] = "root"
d['path'] = os.path.relpath(path).strip('..\\').replace('\\','/')
d['children'] = [path_to_dict(os.path.join(path, x), child=True) for x in os.listdir\
(path)]
else:
if not child:
d['type'] = "root"
d['path'] = os.path.relpath(path).strip('..\\').replace('\\','/')
with open(path, 'r', encoding="utf-8", errors='ignore') as myfile:
content = myfile.read().splitlines()
d['content'] = content

Adding a key/value pair once I have recursively searched a dict

I have searched a nested dict for certain keys, I have succeeded in being able to locate the keys I am looking for, but I am not sure how I can now add a key/value pair to the location the key I am looking for is. Is there a way to tell python to append the data entry to the location it is currently looking at?
Code:
import os
import json
import shutil
import re
import fileinput
from collections import OrderedDict
#Finds and lists the folders that have been provided
d='.'
folders = list(filter (lambda x: os.path.isdir(os.path.join(d, x)), os.listdir(d)))
print("Folders found: ")
print(folders)
print("\n")
def processModelFolder(inFolder):
#Creating the file names
fileName = os.path.join(d, inFolder, inFolder + ".mdl")
fileNameTwo = os.path.join(d, inFolder, inFolder + ".vg2.json")
fileNameThree = os.path.join(d, inFolder, inFolder + "APPENDED.vg2.json")
#copying the json file so the new copy can be appended
shutil.copyfile(fileNameTwo, fileNameThree)
#assigning IDs and properties to search for in the mdl file
IDs = ["7f034e5c-24df-4145-bab8-601f49b43b50"]
Properties = ["IDSU_FX[0]"]
#Basic check to see if IDs and Properties are valid
for i in IDs:
if len(i) != 36:
print("ID may not have been valid and might not return the results you expect, check to ensure the characters are correct: ")
print(i)
print("\n")
if len(IDs) == 0:
print("No IDs were given!")
elif len(Properties) == 0:
print("No Properties were given!")
#Reads code untill an ID is found
else:
with open(fileName , "r") as in_file:
IDCO = None
for n, line in enumerate(in_file, 1):
if line.startswith('IDCO_IDENTIFICATION'):
#Checks if the second part of each line is a ID tag in IDs
if line.split('"')[1] in IDs:
#If ID found it is stored as IDCO
IDCO = line.split('"')[1]
else:
if IDCO:
pass
IDCO = None
#Checks if the first part of each line is a Prop in Propterties
elif IDCO and line.split(' ')[0] in Properties:
print('Found! ID:{} Prop:{} Value: {}'.format(IDCO, line.split('=')[0][:-1], line.split('=')[1][:-1]))
print("\n")
#Stores the property name and value
name = str(line.split(' ')[0])
value = str(line.split(' ')[2])
#creates the entry to be appended to the dict
#json file editing
with open(fileNameThree , "r+") as json_data:
python_obj = json.load(json_data)
#calling recursive search
get_recursively(python_obj, IDCO, name, value)
with open(fileNameThree , "w") as json_data:
json.dump(python_obj, json_data, indent = 1)
print('Processed {} lines in file: {}'.format(n , fileName))
def get_recursively(search_dict, IDCO, name, value):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided, when key "id" is found it checks to,
see if its value is the current IDCO tag, if so it appends the new data.
"""
fields_found = []
for key, value in search_dict.iteritems():
if key == "id":
if value == IDCO:
print("FOUND IDCO IN JSON: " + value +"\n")
elif isinstance(value, dict):
results = get_recursively(value, IDCO, name, value)
for result in results:
x = 1
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
more_results = get_recursively(item, IDCO, name, value)
for another_result in more_results:
x=1
return fields_found
for modelFolder in folders:
processModelFolder(modelFolder)
In short, once it finds a key/id value pair that I want, can I tell it to append name/value to that location directly and then continue?
nested dict:
{
"id": "79cb20b0-02be-42c7-9b45-96407c888dc2",
"tenantId": "00000000-0000-0000-0000-000000000000",
"name": "2-stufiges Stirnradgetriebe",
"description": null,
"visibility": "None",
"method": "IDM_CALCULATE_GEAR_COUPLED",
"created": "2018-10-16T10:25:20.874Z",
"createdBy": "00000000-0000-0000-0000-000000000000",
"lastModified": "2018-10-16T10:25:28.226Z",
"lastModifiedBy": "00000000-0000-0000-0000-000000000000",
"client": "STRING_BEARINX_ONLINE",
"project": {
"id": "10c37dcc-0e4e-4c4d-a6d6-12cf65cceaf9",
"name": "proj 2",
"isBookmarked": false
},
"rootObject": {
"id": "6ff0010c-00fe-485b-b695-4ddd6aca4dcd",
"type": "IDO_GEAR",
"children": [
{
"id": "1dd94d1a-e52d-40b3-a82b-6db02a8fbbab",
"type": "IDO_SYSTEM_LOADCASE",
"children": [],
"childList": "SYSTEMLOADCASE",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "1dd94d1a-e52d-40b3-a82b-6db02a8fbbab"
},
{
"name": "IDCO_DESIGNATION",
"value": "Lastfall 1"
},
{
"name": "IDSLC_TIME_PORTION",
"value": 100
},
{
"name": "IDSLC_DISTANCE_PORTION",
"value": 100
},
{
"name": "IDSLC_OPERATING_TIME_IN_HOURS",
"value": 1
},
{
"name": "IDSLC_OPERATING_TIME_IN_SECONDS",
"value": 3600
},
{
"name": "IDSLC_OPERATING_REVOLUTIONS",
"value": 1
},
{
"name": "IDSLC_OPERATING_DISTANCE",
"value": 1
},
{
"name": "IDSLC_ACCELERATION",
"value": 9.81
},
{
"name": "IDSLC_EPSILON_X",
"value": 0
},
{
"name": "IDSLC_EPSILON_Y",
"value": 0
},
{
"name": "IDSLC_EPSILON_Z",
"value": 0
},
{
"name": "IDSLC_CALCULATION_WITH_OWN_WEIGHT",
"value": "CO_CALCULATION_WITHOUT_OWN_WEIGHT"
},
{
"name": "IDSLC_CALCULATION_WITH_TEMPERATURE",
"value": "CO_CALCULATION_WITH_TEMPERATURE"
},
{
"name": "IDSLC_FLAG_FOR_LOADCASE_CALCULATION",
"value": "LB_CALCULATE_LOADCASE"
},
{
"name": "IDSLC_STATUS_OF_LOADCASE_CALCULATION",
"value": false
}
],
"position": 1,
"order": 1,
"support_vector": {
"x": 0,
"y": 0,
"z": 0
},
"u_axis_vector": {
"x": 1,
"y": 0,
"z": 0
},
"w_axis_vector": {
"x": 0,
"y": 0,
"z": 1
},
"role": "_none_"
},
{
"id": "ab7fbf37-17bb-4e60-a543-634571a0fd73",
"type": "IDO_SHAFT_SYSTEM",
"children": [
{
"id": "7f034e5c-24df-4145-bab8-601f49b43b50",
"type": "IDO_RADIAL_ROLLER_BEARING",
"children": [
{
"id": "0b3e695b-6028-43af-874d-4826ab60dd3f",
"type": "IDO_RADIAL_BEARING_INNER_RING",
"children": [
{
"id": "330aa09d-60fb-40d7-a190-64264b3d44b7",
"type": "IDO_LOADCONTAINER",
"children": [
{
"id": "03036040-fc1a-4e52-8a69-d658e18a8d4a",
"type": "IDO_DISPLACEMENT",
"children": [],
"childList": "DISPLACEMENT",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "03036040-fc1a-4e52-8a69-d658e18a8d4a"
},
{
"name": "IDCO_DESIGNATION",
"value": "Displacement 1"
}
],
"position": 1,
"order": 1,
"support_vector": {
"x": -201.3,
"y": 0,
"z": -229.8
},
"u_axis_vector": {
"x": 1,
"y": 0,
"z": 0
},
"w_axis_vector": {
"x": 0,
"y": 0,
"z": 1
},
"shaftSystemId": "ab7fbf37-17bb-4e60-a543-634571a0fd73",
"role": "_none_"
},
{
"id": "485f5bf4-fb97-415b-8b42-b46e9be080da",
"type": "IDO_CUMULATED_LOAD",
"children": [],
"childList": "CUMULATEDLOAD",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "485f5bf4-fb97-415b-8b42-b46e9be080da"
},
{
"name": "IDCO_DESIGNATION",
"value": "Cumulated load 1"
},
{
"name": "IDCO_X",
"value": 0
},
{
"name": "IDCO_Y",
"value": 0
},
{
"name": "IDCO_Z",
"value": 0
}
],
"position": 2,
"order": 1,
"support_vector": {
"x": -201.3,
"y": 0,
"z": -229.8
},
"u_axis_vector": {
"x": 1,
"y": 0,
"z": 0
},
"w_axis_vector": {
"x": 0,
"y": 0,
"z": 1
},
"shaftSystemId": "ab7fbf37-17bb-4e60-a543-634571a0fd73",
"role": "_none_"
}
],
"childList": "LOADCONTAINER",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "330aa09d-60fb-40d7-a190-64264b3d44b7"
},
{
"name": "IDCO_DESIGNATION",
"value": "Load container 1"
},
{
"name": "IDLC_LOAD_DISPLACEMENT_COMBINATION",
"value": "LOAD_MOMENT"
},
{
"name": "IDLC_TYPE_OF_MOVEMENT",
"value": "LB_ROTATING"
},
{
"name": "IDLC_NUMBER_OF_ARRAY_ELEMENTS",
"value": 20
}
],
"position": 1,
"order": 1,
"support_vector": {
"x": -201.3,
"y": 0,
"z": -229.8
},
"u_axis_vector": {
"x": 1,
"y": 0,
"z": 0
},
"w_axis_vector": {
"x": 0,
"y": 0,
"z": 1
},
"shaftSystemId": "ab7fbf37-17bb-4e60-a543-634571a0fd73",
"role": "_none_"
},
{
"id": "3258d217-e6e4-4a5c-8677-ae1fca26f21e",
"type": "IDO_RACEWAY",
"children": [],
"childList": "RACEWAY",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "3258d217-e6e4-4a5c-8677-ae1fca26f21e"
},
{
"name": "IDCO_DESIGNATION",
"value": "Raceway 1"
},
{
"name": "IDRCW_UPPER_DEVIATION_RACEWAY_DIAMETER",
"value": 0
},
{
"name": "IDRCW_LOWER_DEVIATION_RACEWAY_DIAMETER",
"value": 0
},
{
"name": "IDRCW_PROFILE_OFFSET",
"value": 0
},
{
"name": "IDRCW_PROFILE_ANGLE",
"value": 0
},
{
"name": "IDRCW_PROFILE_CURVATURE_RADIUS",
"value": 0
},
{
"name": "IDRCW_PROFILE_CENTER_POINT_OFFSET",
"value": 0
},
{
"name": "IDRCW_PROFILE_NUMBER_OF_WAVES",
"value": 0
},
{
"name": "IDRCW_PROFILE_AMPLITUDE",
"value": 0
},
{
"name": "IDRCW_PROFILE_POSITION_OF_FIRST_WAVE",
"value": 0
},
Bug
First of all, replace the value variable's name by something else, because you have a value variable as the method argument and another value variable with the same name when iterating over the dictionary:
for key, value in search_dict.iteritems(): # <-- REPLACE value TO SOMETHING ELSE LIKE val
Otherwise you will have bugs, because the value from the dictionary is the new value which you will insert. But if you iterate like for key, val in then you can actually use the outer value variable.
Adding The Value Pair
It seems id is a key inside your search_dict, but reading your JSON file your search_dict may have several nested lists like properties and/or children, so it depends on where you want to add the new pair.
If you want to add it to the same dictionary where your id is:
if key == "id":
if value == IDCO:
print("FOUND IDCO IN JSON: " + value +"\n")
search_dict[name] = value
Result:
{
"id": "3258d217-e6e4-4a5c-8677-ae1fca26f21e",
"type": "IDO_RACEWAY",
"children": [],
"childList": "RACEWAY",
"<new name>": "<new value>",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "3258d217-e6e4-4a5c-8677-ae1fca26f21e"
},
If you want to add it to the children or properties list inside the dictionary where id is:
if key == "id":
if value == IDCO:
print("FOUND IDCO IN JSON: " + value +"\n")
if search_dict.has_key("properties"): # you can swap "properties" to "children", depends on your use case
search_dict["properties"].append({"name": name, "value": value}) # a new dictionary with 'name' and 'value' keys
Result:
{
"id": "3258d217-e6e4-4a5c-8677-ae1fca26f21e",
"type": "IDO_RACEWAY",
"children": [],
"childList": "RACEWAY",
"properties": [
{
"name": "IDCO_IDENTIFICATION",
"value": "3258d217-e6e4-4a5c-8677-ae1fca26f21e"
},
{
"name": "<new name>",
"value": "<new value>"
},

Categories

Resources