code :
userid1='u123'
userid2='u124'
ids= (userid1,userid2)
fake = Faker('en_US')
for ind in ids:
for idx in range(1):
sms = {
"id": ind ,
"name": fake.name(),
"email": fake.email(),
"gender": "MALE",
}
f_name = '{}.json'.format(ind)
with open(f_name, 'w') as fp:
#Save the dictionary
json.dump(sms, fp, indent=4)
print(sms)
file1 = filename.json ( how to get the *ind* value here i.e., userid)
fd1=open("filename.json")
json_content1 = fd1.read()
fd1.close()
how to open file that has been saved f_name = '{}.json'.format(ind) here . without mentioning the file name manually. file names are saved using ind. so how to use ind here and open the file
this code can help you to get data from json file: you can get any filed from the json data by typing data["name-of-filed"]:
import json
userid1='json_file1'
ids= [userid1]
for ind in ids:
f_name = '{}.json'.format(ind)
with open(f_name, 'r') as outfile:
data = json.loads(outfile.read())
print(data["name"])
print(data)
here is an exemple :
file.json :
{
"name": "Ghassen",
"apiVersion": "v1"
}
output :
Ghassen
{'name': 'Ghassen', 'apiVersion': 'v1'}
Related
I have json file, i need to rename folder_path key to backup_folder_path using python:
{
"urn:adsk.wipprod:dm.lineage:smth": {
"bim_manifest_urn": "urn:foo/bar/z",
"gs_id": "foobar",
"versions": {
"1": "1"
},
"folder_path": "/foo/bar"
},
"urn:adsk.wipprod:dm.lineage:smth": {
"bim_manifest_urn": "urn:foo/bar",
"gs_id": "foobar1",
"versions": {
"1": "1"
},
"folder_path": "/foo/barŠ”"
},
What I tried to do:
def edit_string_name():
with open(r"smth.json", encoding="utf-8") as json_data:
data = json.load(json_data)
data = {'folder_path'}
data['backup_folder_path'] = data.pop('folder_path')
print(data)
if __name__ == '__main__':
edit_string_name()
But nothing seems to happen.
When I tried to cycle through I got nonsense in terminal.
This should do the job
def edit_string_name():
with open("smth.json", "r+", encoding="utf-8") as file:
data = json.load(file)
content = data["urn:adsk.wipprod:dm.lineage:smth"]
content["backup_folder_path"] = content["folder_path"]
content.pop("folder_path")
data["urn:adsk.wipprod:dm.lineage:smth"] = content
# Updating the file
file.seek(0)
file.write(json.dumps(data, indent=4))
file.truncate()
edit_string_name()
I do have dictionary, with each value as a list.
I want to write individual items to separate JSON files.
For example
data_to_write = {"Names":["name1", "name2", "name3"], "email":["mail1", "mail2", "mail3"]}
Now I want 3 jsons i.e data1.jsob, data2.json, data3.json in the following(approx) format.
data1.json
{
Name: name1,
email: mail1
}
data2.json
{
Name: name2,
email: mail2
}
and so on.
My current approach is
for file_no in range(no_of_files):
for count, (key, info_list) in enumerate(data_to_write.items()):
for info in info_list:
with open(
os.path.join(self.path_to_output_dir, str(file_no)) + ".json",
"a",
) as resume:
json.dump({key: info}, resume)
But this is wrong. Any helps appreciated.
You could use pandas to do the work for you. Read the dictionary into a dataframe, then iterate the rows of the dataframe to produce the json for each row:
import pandas as pd
data_to_write = {"Names":["name1", "name2", "name3"], "email":["mail1", "mail2", "mail3"]}
df = pd.DataFrame(data_to_write).rename(columns={'Names':'Name'})
for i in range(len(df)):
jstr = df.iloc[i].to_json()
with open(f"data{i+1}.json", "w") as f:
f.write(jstr)
Output (each line is in a separate file):
{"Name":"name1","email":"mail1"}
{"Name":"name2","email":"mail2"}
{"Name":"name3","email":"mail3"}
Try:
import json
data_to_write = {
"Names": ["name1", "name2", "name3"],
"email": ["mail1", "mail2", "mail3"],
}
for i, val in enumerate(zip(*data_to_write.values()), 1):
d = dict(zip(data_to_write, val))
with open(f"data{i}.json", "w") as f_out:
json.dump(d, f_out, indent=4)
This writes data(1..3).json with content:
# data1.json
{
"Names": "name1",
"email": "mail1"
}
# data2.json
{
"Names": "name2",
"email": "mail2"
}
...
import json
data_to_write = {
"Names": ["name1", "name2", "name3"],
"email": ["mail1", "mail2", "mail3"],
}
for ind, val in enumerate(zip(*data_to_write.values())):
jsn = dict(zip(data_to_write, val))
print(jsn)
with open("data{}.json".format(ind), "w") as f:
f.write(json.dumps(jsn))
I am trying to delete an element in a json file,
here is my json file:
before:
{
"names": [
{
"PrevStreak": false,
"Streak": 0,
"name": "Brody B#3719",
"points": 0
},
{
"PrevStreak": false,
"Streak": 0,
"name": "XY_MAGIC#1111",
"points": 0
}
]
}
after running script:
{
"names": [
{
"PrevStreak": false,
"Streak": 0,
"name": "Brody B#3719",
"points": 0
}
]
}
how would I do this in python? the file is stored locally and I am deciding which element to delete by the name in each element
Thanks
I would load the file, remove the item, and then save it again. Example:
import json
with open("filename.json") as f:
data = json.load(f)
f.pop(data["names"][1]) # or iterate through entries to find matching name
with open("filename.json", "w") as f:
json.dump(data, f)
You will have to read the file, convert it to python native data type (e.g. dictionary), then delete the element and save the file. In your case something like this could work:
import json
filepath = 'data.json'
with open(filepath, 'r') as fp:
data = json.load(fp)
del data['names'][1]
with open(filepath, 'w') as fp:
json.dump(data, fp)
Try this:
# importing the module
import ast
# reading the data from the file
with open('dictionary.txt') as f:
data = f.read()
print("Data type before reconstruction : ", type(data))
# reconstructing the data as a dictionary
a_dict = ast.literal_eval(data)
{"names":[a for a in a_dict["names"] if a.get("name") !="XY_MAGIC#1111"]}
import json
with open("test.json",'r') as f:
data = json.loads(f.read())
names=data.get('names')
for idx,name in enumerate(names):
if name['name']=='XY_MAGIC#1111':
del names[idx]
break
print(names)
In order to read the file best approach would be using the with statement after which you could just use pythons json library and convert json string to python dict. once you get dict you can access the values and do your operations as required. you could convert it as json using json.dumps() then save it
This does the right thing useing the python json module, and prettyprints the json back to the file afterwards:
import json
jsonpath = '/path/to/json/file.json'
with open(jsonpath) as file:
j = json.loads(file.read())
names_to_remove = ['XY_MAGIC#1111']
for element in j['names']:
if element['name'] in names_to_remove:
j['names'].remove(element)
with open(jsonpath, 'w') as file:
file.write(json.dumps(j, indent=4))
I have 100 JSON files need to merge to 1 JSON file. basically I want to put [] around 100 files and append them all into 1 file.
Each file has the same structure as follow:
[
{
"id": "1",
"title": "Student",
"children": [
{
"ID": "111",
"Name": "John",
"Pattern": "DA0"
},
{
"ID": "222",
"Name": "Tom",
"Pattern": "DA0"
}
]
}
]
I have the following code to achieve this but there is an error for JSON encode? Please have a look:
import glob
import json
read_files = glob.glob("*.json")
output_list = []
with open(read_files, 'w', encoding='utf-8') as jsonf:
for f in read_files:
with open(f, "rb") as infile:
output_list.append(json.load(infile))
all_items = []
for json_file in output_list:
all_items += json_file['items']
textfile_merged = open('merged.json', 'w')
json.dump({ "items": all_items }, textfile_merged)
textfile_merged.close()
The error message:
Traceback (most recent call last):
File "combine.py", line 10, in <module>
with open(read_files, 'w', encoding='utf-8') as jsonf:
TypeError: expected str, bytes or os.PathLike object, not list
The glob.glob("*.json) returns a list of path names per the python documentation. So your code with open(read_files, 'w', encoding='utf-8') as jsonf: will not work properly.
Try something like:
import glob
import json
read_files = glob.glob("*.json")
output_list = []
for f in read_files:
with open(f, "rb") as infile:
output_list.append(json.load(infile))
# rest of your code
I am trying to update the JSON files being updated into my database using the following python script.
#!/usr/bin/env python
# Usage: update json file
import json
import os
json_dir="Downloads/ADGGTNZ_SERVERFILES/Test_JSON/"
json_dir_processed="Downloads/ADGGTNZ_SERVERFILES/Test_JSON/updated"
for json_file in os.listdir(json_dir):
if json_file.endswith(".json"):
processed_json = "%s%s" % (json_dir_processed, json_file)
json_file = json_dir + json_file
print "Processing %s -> %s" % (json_file, processed_json)
with open(json_file, 'r') as f:
json_data = json.load(f)
json_data_extract = json_data['grp_cowmonitoring/rpt_animrec'][0]
if "grp_cowmonitoring/rpt_animrec/grp_animrec/cowtagid" not in json_data_extract:
json_data["grp_cowmonitoring/rpt_animrec/grp_animrec/cowtagid"] = json_data["grp_cowmonitoring/rpt_animrec/grp_animrec/damtagid"]
with open(processed_json, 'w') as f:
f.write(json.dumps(json_data, indent=4))
else:
print "%s not a JSON file" % json_file
The aim of the update script is to find out if
"grp_cowmonitoring/rpt_animrec/grp_animrec/cowtagid" lacks within the array on my JSON; Then i will update the same key with a difference on the name "grp_cowmonitoring/rpt_animrec/grp_animrec/damtagid"
original file
{
"_notes": [],
....
"grp_cowmonitoring/rpt_animrec": [
{
"grp_cowmonitoring/rpt_animrec/grp_animrec/cowtagid": "TZN000403250467",
...
"grp_cowmonitoring/rpt_animrec/grp_milking/grp_calfreg/rpt_reg_calvedets": [
{
"grp_cowmonitoring/rpt_animrec/grp_milking/grp_calfreg/rpt_reg_calvedets/grp_reg_calvedets/calfsex": "1",
"grp_cowmonitoring/rpt_animrec/grp_milking/grp_calfreg/rpt_reg_calvedets/grp_reg_calvedets/calvtype": "1",
....
}
],
"grp_cowmonitoring/rpt_animrec/anim_weight/weight": "343.0",
...
}
],
"fid": 647935,
"grp_cowmonitoring/grp-milkuse/milkprocess": "0.0",
"start_time": "2018-11-30T08:48:32.278+03",
....
}
Expected JSON file
{
"_notes": [],
....
"grp_cowmonitoring/rpt_animrec": [
{
"grp_cowmonitoring/rpt_animrec/grp_animrec/cowtagid": "TZN000403250467",
...
"grp_cowmonitoring/rpt_animrec/grp_milking/grp_calfreg/rpt_reg_calvedets": [
{
"grp_cowmonitoring/rpt_animrec/grp_milking/grp_calfreg/rpt_reg_calvedets/grp_reg_calvedets/calfsex": "1",
"grp_cowmonitoring/rpt_animrec/grp_milking/grp_calfreg/rpt_reg_calvedets/grp_reg_calvedets/calvtype": "1",
"grp_cowmonitoring/rpt_animrec/grp_animrec/damtagid"
....
}
],
"grp_cowmonitoring/rpt_animrec/anim_weight/weight": "343.0",
...
}
],
"fid": 647935,
"grp_cowmonitoring/grp-milkuse/milkprocess": "0.0",
"start_time": "2018-11-30T08:48:32.278+03",
....
}
How can i modify my python script to accommodate the changes in my JSON
Error message after update of the original code
Traceback (most recent call last):
File "/opt/rdm/adggtnz/ADGG-TZA-03/addfidkey2.sh", line 15, in <module>
json_data_extract = json_data['grp_cowmonitoring/rpt_animrec'][0]
KeyError: 'grp_cowmonitoring/rpt_animrec'
You just need to access the right elements from your file:
import json
import os
json_dir="Downloads/ADGGTNZ_SERVERFILES/Test_JSON/"
json_dir_processed="Downloads/ADGGTNZ_SERVERFILES/Test_JSON/updated/"
for json_file in os.listdir(json_dir):
if json_file.endswith(".json"):
processed_json = "%s%s" % (json_dir_processed, json_file)
json_file = json_dir + json_file
print "Processing %s -> %s" % (json_file, processed_json)
with open(json_file, 'r') as f:
json_data = json.load(f)
json_data_extract = json_data.get('grp_cowmonitoring/rpt_animrec', [])
for cow in json_data_extract:
if "grp_cowmonitoring/rpt_animrec/grp_animrec/cowtagid" not in cow:
# Skip if cowtagid is not present
continue
calves = cow.get("grp_cowmonitoring/rpt_animrec/grp_milking/grp_calfreg/rpt_reg_calvedets", [])
for calf in calves:
if "grp_cowmonitoring/rpt_animrec/grp_animrec/damtagid" not in calf:
print "Updating ..."
calf["grp_cowmonitoring/rpt_animrec/grp_animrec/damtagid"] = cow["grp_cowmonitoring/rpt_animrec/grp_animrec/cowtagid"]
with open(processed_json, 'w') as f:
f.write(json.dumps(json_data, indent=4))
else:
print "%s not a JSON file" % json_file