I have a problem.Every timei do this it will just replace the last users wallet.
data = {}
usern = ctx.message.author
usern = str(usern)
data[usern] = []
data[usern].append({
"money": 0
})
Got and answer for it:
with open('config.json', 'r') as infile:
data = json.load(infile) # load from existing
data1 = data
usern = ctx.message.author
usern = str(usern)
data1[usern] = []
data1[usern].append({
"money": 0
})
else: # no file, start from scratch
data = {}
data = {}
usern = ctx.message.author
usern = str(usern)
data[usern] = []
data[usern].append({
"money": 0
})
with open('config.json', 'w') as outfile:
json.dump(data, outfile)
Check whether the file exists & load the existing json structure from it. Change the beginning part of your program to this:
from os.path import isfile
if isfile('config.json'): # check if file exists
with open('config.json', 'r') as infile:
data = json.load(infile) # load from existing
else: # no file, start from scratch
data = {}
Related
I have a text file that has some values as follows:
matlab.file.here.we.go{1} = 50
matlab.file.here.sxd.go{1} = 50
matlab.file.here.asd.go{1} = 50
I want the code to look for "matlab.file.here.sxd.go{1}" and replace the value assigned to it from 50 to 1. But I want it to be dynamic (i.e., later I will have over 20 values to change and I don't want to search for that specific phrase). I'm new to python so I don't have much information in order to search for it online. Thanks
I tried the following
file_path = r'test\testfile.txt'
file_param = 'matlab.file.here.we.go{1}'
changing = 'matlab.file.here.we.go{1} = 1'
with open(file_path, 'r') as f:
content = f.readlines()
content = content.replace(file_param , changing)
with open(file_path, 'w') as f:
f.write(content)
but it didn't achieve what I wanted
You can split on the equal sign. You can read and write files at the same time.
import os
file_path = r'test\testfile.txt'
file_path_temp = r'test\testfile.txt.TEMP'
new_value = 50
changing = 'matlab.file.here.we.go{1} = 1'
with open(file_path, 'r') as rf, open(file_path_temp, 'w') as wf:
for line in rf:
if changing in line:
temp = line.split(' = ')
temp[1] = new_value
line = ' = '.join(temp)
wf.write(line)
os.remove(file_path)
os.rename(file_path_temp, file_path)
I have a csv file and content is as below:
config,"[{""enabled"" : false,""id"" : ""name_of_app""}]"
region, US
app_name, test
I want to open csv, update "enabled" = false to "enabled" = true. I could do that if that's not a csv.
What I have done so far:
for name in filelist:
vfile = open(name, "r")
with open(name) as vfile:
reader = csv.reader(vfile) # Create a new reader
for row in reader:
payload = row[1]
if row[0] == "config":
print("hello")
json_payload = row[1]
json_payload=json.loads(json_payload)
You almost reach the solution of the problem.
import csv
import json
filelist = ['*.csv', '*.csv']
for name in filelist:
rows = []
flag = False
with open(name) as vfile:
reader = csv.reader(vfile) # Create a new reader
for row in reader:
if row[0] == "config":
flag = True
json_payload = json.loads(row[1])
json_payload[0]["enabled"] = True
row[1] = json.dumps(json_payload)
rows.append(row)
if flag:
with open(name, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(rows)
Hello I am trying to create a JSON file, I am having trouble formatting the file.
Below I will provide the code, and the output.
Code:
def writeToFile(self):
self.json_conceivement = os.path.join("./",'NoobPremeNewEggLogIn.json')
self.accounts = {}
if os.path.exists(self.json_conceivement):
try:
with open(self.json_conceivement) as f:
self.accounts = dict(json.loads(f.read()))
except:
pass
self.accounts = {}
else:
try:
with open(self.json_conceivement) as f:
self.accounts = {}
except:
pass
self.accounts['Profiles'] = []
self.autoSave()
def autoSave(self):
with open(self.json_conceivement, "a", encoding='utf-8') as outfile:
json.dump(dict(self.accounts.items()), outfile,ensure_ascii=False, indent=4)
Output:(if I run it once, expected)
{
"Profiles": []
}
Output:(if i run it twice,incorrect)
{
"Profiles": []
}{}
Wanted output:
{
"Profiles": [{}]
}
Any help would be appreciated
Change this line
with open(self.json_conceivement, "a", encoding='utf-8') as outfile:
to
with open(self.json_conceivement, "w", encoding='utf-8') as outfile:
This is a python code that I used to manipulate a file table1 using a reference file pds_ref
So pds_ref looks like this :
|THE_TABLE|THE_KEY
|table1|3
|table1|1
table1 looks like this
|ID|NAME
|1|Imran
|2|Peter
|3|Pedro
|4|Carlos
The idea is to use the references in pds_ref to remove the records in whatever table is being listed and its corresponding key...in this case 1 and 3 are to deleted
This python code works just as python
import csv
with open("pds_ref","rb") as ref_file:
refreader=csv.DictReader(ref_file, delimiter='|')
reftable=[row for row in refreader]
refheader = refreader.fieldnames
for refrow in reftable:
print refrow['THE_TABLE']
print refrow['THE_KEY']
with open(refrow['THE_TABLE'], "rbw") as infile:
reader = csv.DictReader(infile, delimiter='|')
table = [row for row in reader]
header = reader.fieldnames
with open(refrow['THE_TABLE'], "wb") as outfile:
writer = csv.DictWriter(outfile, header,delimiter='|')
writer.writeheader()
for row in table:
if row['ID'] != refrow['THE_KEY'] :
writer.writerow(row)
Now, I want to do this using lambda such that the function is triggered evertime someone uploads the pds_ref file
I got as far as being able to get the pds_ref file and read each line but having trouble doing the equivalent of opening and writing back the amended table1 file. Any help appreciated.
import boto3
import csv
import io
def lambda_handler(event, context):
s3 = boto3.client("s3")
if event:
print ("Event : ", event)
file_obj = event["Records"][0]
filename = str(file_obj['s3']['object']['key'])
bucketname = str(file_obj['s3']['bucket']['name'])
print("Filename: ",filename)
print("Bucket: ",bucketname)
fileObj = s3.get_object(Bucket= "lambda-trig1",Key=filename)
print ("fileObj: ",fileObj)
file_content = fileObj["Body"].read().decode('utf-8')
print(file_content)
f_pds_ref = s3.get_object(Bucket= "lambda-trig1",Key='pds_ref')
fc_pds_ref = f_pds_ref['Body'].read().decode('utf-8').splitlines(True)
for refrow in csv.DictReader(fc_pds_ref,delimiter='|'):
print refrow['THE_TABLE']
print refrow['THE_KEY']
current_table = refrow['THE_TABLE']
current_key = refrow['THE_KEY']
f_the_next_table = s3.get_object(Bucket= "lambda-trig1",Key=current_table)
fc_the_next_table = f_the_next_table['Body'].read().decode('utf-8').splitlines(True)
with open(refrow[f_the_next_table], "rbw") as infile:
reader = csv.DictReader(infile, delimiter='|')
# table = [row for row in reader]
# header = reader.fieldnames
# print (header)
Before running the process to update other table,
you want to ensure that it's running for only Put events.
Here is few additions to your current steps after reading pds_ref:
Group all THE_KEYs by THE_TABLE.
This allows you to perform unique iterations to update table objects
instead of multiple ones for content in the same table object.
For each THE_TABLE group,
read the table object and filter away lines in THE_KEY group,
write the filtered contents to a table object.
This can be implemented in the following manner
from contextlib import contextmanager
from csv import DictReader, DictWriter
from collections import defaultdict
import io
import boto3
s3 = boto3.client("s3")
BUCKET = "creeper-bank"
DELIMITER = "|"
TABLE_OBJECT_COLUMNS = ['', 'ID', 'NAME']
WATCH_KEY = "pds_ref"
def content_as_dict_reader(content):
yield DictReader(
content.splitlines(),
delimiter=DELIMITER)
#contextmanager
def tables_and_lines_for_deletion():
object_ = s3.get_object(
Bucket=BUCKET, Key=WATCH_KEY
)
content = object_["Body"].read().decode('utf-8')
return content_as_dict_reader(content)
#contextmanager
def table_record(table):
object_ = s3.get_object(
Bucket=BUCKET, Key=table
)
content = object_["Body"].read().decode('utf-8')
return content_as_dict_reader(content)
def object_table(table, record):
with io.StringIO() as file_:
writer = DictWriter(
file_,
fieldnames=TABLE_OBJECT_COLUMNS,
delimiter=DELIMITER
)
writer.writeheader()
writer.writerows(list(record))
s3.put_object(
Bucket=BUCKET,
Key=table,
Body=file_.getvalue()
)
def lambda_handler(event, context):
if not event:
print("Function must be triggered via a published event")
return
event_record, *_ = event["Records"]
match_watchkey = True
try:
event_name = str(event_record['eventName'])
if "Put" not in event_name:
match_watchkey = False
s3_event = event_record['s3']
print("checking if S3 event is a put one for :WATCH_KEY")
key = s3_event['object']['key']
bucket = s3_event['bucket']['name']
if key != WATCH_KEY:
match_watchkey = False
if bucket != BUCKET:
match_watchkey = False
except KeyError:
# Handle when event_record isn't an S3 one.
match_watchkey = False
if not match_watchkey:
print("Published event did not match :WATCH_KEY.")
return
print("S3 event is a put one for :WATCH_KEY!")
table_group = defaultdict(list)
print("Reading :WATCH_KEY content")
with tables_and_lines_for_deletion() as tables:
for dct in tables:
table_k = dct['THE_TABLE']
table_v = dct['THE_KEY']
table_group[table_k].append(table_v)
print("Updating objects found in :WATCH_KEY content")
for t, ids in table_group.items():
record_update = None
with table_record(t) as record:
record_update = (
dct
for dct in record
if dct["ID"] not in ids
)
object_table(t, record_update)
print("Update completed!")
return
Testing with sample event
sample_event = {
'Records': [
{
'eventName': 'ObjectCreated:Put',
's3': {
'bucket': {
'name': 'creeper-bank',
},
'object': {
'key': 'pds_ref',
}
},
}
]
}
lambda_handler(sample_event, {})
I must extract data in an IFC file but when i read the file seems I make some mistake I don't undestand:
First: I've a key;
Second: I read a file;
Third: I create a string and I put it in a csv like file.
Fourth: the visual components are in Pyside2.
the code:
orderNr = self.getIFC_ProjectDetail(readFile, self.orderNrLineEdit.text())
custNr = self.getIFC_ProjectDetail(readFile, self.custNoLineEdit.text())
if len(custNr) == 0:
custNr = "9999"
projManager = self.getIFC_ProjectDetail(readFile, self.projManagerLineEdit.text())
drawer = self.getIFC_ProjectDetail(readFile, self.drawerLineEdit.text())
ifcFile = open(readFile, 'r')
csvFile = open(csvFileName, 'w')
lineTokens = []
csvFile.write("GUID;Type;UserText1;UserText2;UserText3;UserText4;UserText5;UserText6;UserText7;\n")
for mainLine in ifcFile:
if ("IFCSLAB" in line or "IFCWALLSTANDARDCASE" in line):
if len(uID) > 0:
if uID == oldID:
uID = "ciao"
csvFile.write("{0};{1};{2};{3};{4};{5};{6};{7};{8};\n".format(uID, matType, orderNr, custNr, assPos, partPos, fab, projManager, drawer))
oldID = uID
uID = ""
matType = ""
assPos = ""
partPos = ""
fab = ""
lineTokens = line.split(",")
headerLine = line[0:line.find("'")]
line = line[line.find("(") +1:len(line)]
lineTokens = line.split(",")
uID = lineTokens[0]
uID = uID[1:len(uID)-1]
matType = lineTokens[2]
matType = matType[1:len(matType)-1]
floorName = lineTokens[4]
floorName = floorName[1:len(matType)-1]
if self.assPosLineEdit.text() in line:
assPos = self.getIFC_EntityProperty(line, self.assPosLineEdit.text())
if self.partPosLineEdit.text() in line:
partPos = self.getIFC_EntityProperty(line, self.partPosLineEdit.text())
if self.fabricatorLineEdit.text() in line:
fab = self.getIFC_EntityProperty(line, self.fabricatorLineEdit.text())
if i == progDlg.maximum():
csvFile.write("{0};{1};{2};{3};{4};{5};{6};{7};{8};\n".format(uID, matType, orderNr, custNr, assPos, partPos, fab, projManager, drawer))
ifcFile.close()
csvFile.close()
def getIFC_EntityProperty(self, row, ifcKey):
s = ""
lineTokens = []
if ifcKey in row:
lineTokens = row.split(",")
ifcTag = lineTokens[2]
ifcTag = ifcTag[0:ifcTag.find("(")]
#print(ifcTag)
if len(ifcTag) > 1:
s = row[row.find(ifcTag)+len(ifcTag)+2:row.rfind(',')-2]
return s
def getIFC_ProjectDetail(self, fileName, ifcKey):
s = ""
content = open(fileName, 'r')
lineTokens = []
for line in content:
if ifcKey in line:
lineTokens = line.split(",")
ifcTag = lineTokens[2]
ifcTag = ifcTag[0:ifcTag.find("(")]
if len(ifcTag) > 1:
s = line[line.find(ifcTag)+len(ifcTag)+2:line.rfind(',')-2]
break
content.close()
return s
The problem is it jumps a value, it shifts a row and post the data in the line below in the csv like file, creating however the line with the right uID but leaveng the fields of the line blanks.
can Anyone help me?