I need to edit a object within a dict that is located inside a json file. but whenever I attempt to do it, it would delete the entire json file and only add the thing I've edited.
Here's my function.
async def Con_Manage(keys):
with open('keys.json') as config_file:
config = json.load(config_file)[keys]
try:
Current_Con = config["curCons"] + 1
with open('keys.json', 'w') as config_file:
json.dump(Current_Con, config_file)
return True
except:
return False
heres my json file before i run it
{
"key1": {
"time": 1500,
"maxCons": 15,
"curCons": 2,
"coolDown": 2
}
}
and here's what it looks like after its ran
3
is there any way that i can run this and not delete all my progress?
config["curCons"] gets you just the value which you then increment it and assign to Current_Con. Instead you need to increment and set the value to +1. From there you would want to save the entire json object that you just read in and not just the value that was updated.
async def Con_Manage(keys):
with open('keys.json') as config_file:
config = json.load(config_file)
config[keys]["curCons"] += 1 # mutates the value in place
with open('keys.json', 'w') as keys:
json.dump(config, keys) # saves the entire dict not just the value
You need to be writing the entire config file
you can do something like this...
with open('keys.json') as config_file:
config = json.load(config_file)
for key in keys:
try:
config[key]["curCons"] += 1
except KeyError:
pass
with open('keys.json', 'w') as config_file:
json.dump(config, config_file)
Related
I have a code that needs to read a JSON file with multiple lines, i.e:
{"c1-line1": "value", "c2-line1": "value"}
{"c1-line2": "value", "c2-line2": "value"}...
and, after change the keys values (already working), I need to write a new json file with these multiple lines, i.e:
{"newc1-line1": "value", "newc2-line1": "value"}
{"newc1-line2": "value", "newc2-line2": "value"}...
My problem is that my code are just writing the last value readed:
{"newc1-line2": "value", "newc2-line2": "value"}
My code:
def main():
... # changeKeyValueCode
writeFile(data)
def writeFile(data):
with open('new_file.json', 'w') as f:
json.dump(data, f)
I already tried with json.dumps and just f.write('') or f.write('\n')
I know that data in writeFile() is correctly with each line value.
How can I resolve this, please?
def main():
... # changeKeyValueCode
writeFile(data)
def writeFile(data):
with open('new_file.json', 'a') as f:
json.dump(data, f)
with open('new_file.json', 'a')
open file with (a), it will search the file if found append data to the end, else it will create empty file and then append data.
Currently, I store just the device_id of a certain object with an assigned sACN universe. The function looks like this:
for device_index in range(device_count):
device_name = sdk.get_device_info(device_index)
device_type = device_name.type
if device_name.id not in conf:
universe = get_free_universe()
conf.update({device_name.id: universe})
print(f"conf= {conf}")
else:
universe = conf[device_name.id] #main.py line 368
save_config(DEVICE_PATH)
Which produces this JSON:
{
"3192efa109cfb5d86f09a82a7cc00c5d": 4,
"42aa42a0bb5fcee780fb1be13dfcb873": 5,
"4b80e1817076307b36c58c31118f6696": 1,
"62c13e2db726382e9c66d9f69020ab5e": 6,
"a51da6fe155f299a3fc474c22310cde9": 2,
"b5ff59af43d6c3572a41d7693b5bec1c": 3
}
Now I wanna store together with the device_id not only the universe, but its device_name.model attribute, kinda in this format:
{
"3192efa109cfb5d86f09a82a7cc00c5d":
"universe": 4,
"model": "Vengance RGB PRO"
"42aa42a0bb5fcee780fb1be13dfcb873":
"universe": 5,
"model": "M65 PRO"
}
I have absolutely no clue how to do it, afaik my conf is a python dict where I cant do something like this and I need to use python lists. I define it like this: conf = load_config(DEVICE_PATH), the function load_config()looks like this:
def load_config(config_path):
if not os.path.isfile(config_path): #Create the file if not present
open(config_path, "w+")
if config_path == MQTT_PATH:
with open(config_path) as f: #load the config file
try:
return json.load(f)
except json.JSONDecodeError:
data = {}
data['enable_MQTT'] = True
data['ip'] =""
data['port']= 1883
data['username'] =""
data['password'] =""
data['base_topic'] =""
with open(config_path, "w", encoding="utf-8") as f: # Save config
json.dump(
data,
f,
ensure_ascii=False,
sort_keys=False,
indent=4
)
print(f"MQTT Config Created, please edit {MQTT_PATH} and restart this program!")
print("For Home Assistant Auto Discovery, set base_topic to homeassistant!")
sys.exit()
with open(config_path) as f: #load the config file
try:
return json.load(f)
except json.JSONDecodeError:
return {}
When I try to convert the list via conf = list(conf), i just get the runtime error
File "c:\Users\tenn0\Documents\Projects\iCue2sACN-mqtt\src\main.py", line 368, in <module>
universe = conf[device_name.id]
TypeError: list indices must be integers or slices, not str
Ive marked in the first snipped the correct line
Edit: As pointed out in the comments, i can simply do a dict inside a dict. conf.update({device_name.id: {"model": device_name.model, "universe": universe}}) did the trick for me.
I have a json file
{
"id_1" : "",
"id_2": ""
}
I'm trying to update the value for each using the following function
async def UpdateID(self):
await self.bot.wait_until_ready()
while not self.bot.is_closed():
id1 = 1
id2 = 2
with open("file.json", "r+") as r:
config_json = json.load(r)
config_json.update({"id_1": "%s" %(id1)})
config_json.update({"id_2": "%s" %(id2)})
json.dump(config_json,r)
await asyncio.sleep(120)
Using mode r+, it copies the file and adds it to the end, thus duplicate all the data instead of replacing. If I use r, I can't write.
If I use w or a, I get an UnsupportedOperation, not readable error on the json.load step. Using w also makes the file empty.
a+ and w+, give a JSONDecodeError,Expecting value: line 1 column 1 (char 0) error on the json.load step.
Am I using the wrong mode, or an improper way of fixing the original problem?
You can do like this
import json
async def updateId(self):
do_something()
with open(file_name) as f:
config = json.load(f)
update_your_config()
with open(file_name, 'w') as f:
json.dumps(config, f)
do_something()
Write a program that inputs a JSON file (format just like
example1.json) and prints out the value of the title field.
import json
# TODO: Read your json file here and return the contents
def read_json(filename):
dt = {}
# read the file and store the contents in the variable 'dt'
with open(filename,"r") as fh:
dt = json.load(fh)
###fh = open(filename, "r")
###dt = json.load(fh)
return dt
# TODO: Pass the json file here and print the value of title field. Remove the `pass` statement
def print_title(dt):
print filename["title"]
# TODO: Input a file from the user
filename = raw_input("Enter the JSON file: ")
# The function calls are already done for you
r = read_json(filename)
print_title(r)
Hi, I'm new with Python and I'm not sure what I am doing wrong. I keep getting the following message:
enter image description here
Your'e almost there, you just confused with the parameter name.
Change this:
def print_title(dt):
print filename["title"]
To:
def print_title(dt):
print dt["title"]
I have a large for loop in which I create json objects and I would like to be able to stream write the object in each iteration to a file. I would like to be able to use the file later in a similar fashion later (read objects one at a time).
My json objects contain newlines and I can't just dump each object as a line in a file.
How can I achieve this?
To make it more concrete, consider the following:
for _id in collection:
dict_obj = build_dict(_id) # build a dictionary object
with open('file.json', 'a') as f:
stream_dump(dict_obj, f)
stream_dump is the function that I want.
Note that I don't want to create a large list and dump the whole list using something like json.dump(obj, file). I want to be able to append the object to the file in each iteration.
Thanks.
You need to work with a subclass of JSONEncoder and then proxy the build_dict function
from __future__ import (absolute_import, division, print_function,)
# unicode_literals)
import collections
import json
mycollection = [1, 2, 3, 4]
def build_dict(_id):
d = dict()
d['my_' + str(_id)] = _id
return d
class SeqProxy(collections.Sequence):
def __init__(self, func, coll, *args, **kwargs):
super(SeqProxy, *args, **kwargs)
self.func = func
self.coll = coll
def __len__(self):
return len(self.coll)
def __getitem__(self, key):
return self.func(self.coll[key])
class JsonEncoderProxy(json.JSONEncoder):
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, o)
jsonencoder = JsonEncoderProxy()
collproxy = SeqProxy(build_dict, mycollection)
for chunk in jsonencoder.iterencode(collproxy):
print(chunk)
Ouput:
[
{
"my_1"
:
1
}
,
{
"my_2"
:
2
}
,
{
"my_3"
:
3
}
,
{
"my_4"
:
4
}
]
To read it back chunk by chunk you need to use JSONDecoder and pass a callable as object_hook. This hook will be called with each new decoded object (each dict in your list) when you call JSONDecoder.decode(json_string)
Since you are generating the files yourself, you can simply write out one JSON object per line:
for _id in collection:
dict_obj = build_dict(_id) # build a dictionary object
with open('file.json', 'a') as f:
f.write(json.dumps(dict_obj))
f.write('\n')
And then read them in by iterating over lines:
with open('file.json', 'r') as f:
for line in f:
dict_obj = json.loads(line)
This isn't a great general solution, but it's a simple one if you are both the generator and consumer.
Simplest solution:
Remove all whitespace characters from your json document:
import string
def remove_whitespaces(txt):
""" We shall remove all whitespaces"""
for chr in string.whitespace:
txt = txt.replace(chr)
Obviously you could also json.dumps(json.loads(json_txt)) (BTW this also verify that the text is a valid json).
Now you could write you documents to a file one line each.
Second solution:
Create an [AnyStr]Io stream, write in the Io a valid document, (your documents being part of an object or list) and then write the io in a file (or upload it to the cloud).