How do I request and export http file to read if file is not present in my directory?
My code :
def data():
try:
with open('sample.json', 'r') as openfile:
json_object = json.load(openfile)
except FileNotFoundError as e:
print(e)
else:
print('Downloading NOW...')
url = 'https://margincalculator.angelbroking.com/OpenAPI_File/files/OpenAPIScripMaster.json'
d = requests.get(url).json()
with open("sample.json", "w") as outfile:
json.dump(d, outfile)
print('sym downloaded')
finally:
with open('sample.json', 'r') as openfile:
json_object = json.load(openfile)
print(json_object)
What am i trying?
step 1 : Try : Read file from directory
step 2 : if file not found in directory than get it from url and export
step 3 : than read again
step 4 : if still erorr than print('Error in code Please Check')
else print the read_file
Thank you for taking your time to response my question.
Code:
Use isfile(<file>) instead, it is a better option in this case.
isfile('sample.json') checks if file exists or not.
from os.path import isfile
def data():
file='sample.json'
if isfile(file):
with open(file, 'r') as openfile:
json_object = json.load(openfile)
else:
print('Downloading NOW...')
url = 'https://margincalculator.angelbroking.com/OpenAPI_File/files/OpenAPIScripMaster.json'
d = requests.get(url).json()
with open("sample.json", "w") as outfile:
json.dump(d, outfile)
print('sym downloaded')
with open('sample.json', 'r') as openfile:
json_object = json.load(openfile)
print(json_object)
Related
Anybody can advise what could be wrong with my code?
I am trying to make a method that removes the single line comments from the content.
Also, the method should return the single line comments that start with '#'.
import os
def deleteComments(file):
try:
my_file = open(file, 'r')
data = my_file.read()
clean = ""
comment= 0
if i[0] == "#":
comment += 1
else:
pass
with open("clean-", "w") as f:
f.write(clean)
f.close()
my_file.close()
except:
print("An error occurred with accessing the files")
return file
def deleteComment(file):
try:
my_file = open(file, 'r')
data = my_file.read()
clean = ""
comment= 0
if i[0] == "#":
comment += 1
else:
pass
with open("clean-", "w") as f:
f.write(clean)
f.close()
my_file.close()
except:
print("An error occurred with accessing the files")
return file
This should make it work.
import os
def deleteComments(file):
try:
my_file = open(file, 'r')
data = my_file.read()
clean = ""
comments_count = 0
for i in data.split('\n'):
if i[0] == "#":
clean += i
clean += '\n'
comments_count += 1
else:
pass
name = os.path.basename(path)
with open("clean-" + name, "w") as f:
f.write(clean)
f.close()
my_file.close()
return comments_count
except:
print("An error occurred with accessing the files")
return file
I have a question about the python read files(its format as txt ). I read a file and argv[1] is once read bytes number, and then put it sotre in a list, but when I write it on another file, it is not the same as the original files. how could i fix this?
readfile:
fh = open(file_name, "rb")
imfor = fh.read(mss)
file_content = []
file_content.append(imfor)
while (imfor):
file_content.append(imfor)
imfor = fh.read(mss)
fh.close()
write File
fh = open("test1R.txt", "wb")
for currContent in file_content:
fh.write(currContent)
fh.close
It is always better to use a the with open context manager to read and write files. You also don't need to manually append the contents to a list. file.readlines() does that for you.
Here is a some code to help you with that:
from sys import argv
# we first check if the file exists
try:
print("reading file")
with open(argv[1], "r") as input_file:
file_contents = input_file.readlines() # stores the file content in an array
except FileNotFoundError:
print(f"File {argv[1]} not found")
exit(1)
# check if the user provided a name for the output file
try:
out_file = argv[2]
except IndexError:
out_file = "outfile.txt"
# write to the new file.
with open(out_file, "w") as out:
try:
out.write("\n".join(file_contents)) # converts the array back to string
print(f"Wrote to {out_file}")
except FileExistsError:
print(f"{out_file} already exists.")
exit(1)
I want to append the same website accounts. But it is Replacing Passwords when trying to save multiple accounts on the same website. What am I doing wrong?
Here is my Code:
def save():
websites = website_entry.get()
email = user_entry.get()
passwords = password_entry.get()
new_data = {
websites: {
"Email": email,
"Password": passwords,
}
}
if len(websites) == 0 or len(email) == 0 or len(passwords) == 0:
messagebox.showinfo(title="Oops!", message="Please give all the required information's")
else:
try:
with open("data.json", "r") as data_file:
data = json.load(data_file)
except FileNotFoundError:
with open("data.json", "w") as data_file:
json.dump(new_data, data_file, indent=4)
else:
data.update(new_data)
with open("data.json", "w") as data_file:
json.dump(data, data_file, indent=4)
finally:
user_entry.delete(0, END)
website_entry.delete(0, END)
password_entry.delete(0, END)
The problem is that you are opening data.json in write versus append mode when you go to add new data to it:
with open("data.json", "w") as data_file:
json.dump(data, data_file, indent=4)
Instead, you should be opening data.json in append mode to avoid overwriting the existing contents of the file:
with open("data.json", "a") as data_file:
json.dump(data, data_file, indent=4)
I am working on script which downloads large audit logs csv file from azure DevOps and filters data according given condition. This works for small csv file but for file with large data it fails with
fields = next(reader)
stopIteration
Can someone help with changes required in script? I am using python 3.7.9 on MacOs
def getproject(url,pat):
response = requests.get(url, auth=HTTPBasicAuth(username='',password=pat))
if response.status_code == 200:
url_data = response.content
tempfile = open("temp.csv","wb")
tempfile.write(url_data)
tempfile.close()
return url_data
else:
print("\nERROR : Unable to conect The server...")
def FilterData():
lists =[]
pro_name=[]
RepoId =[]
RepoName=[]
new_file = open("temp_new.csv", 'w',newline='')
writer = csv.writer(new_file)
with open("temp.csv", 'r') as readFile:
reader = csv.reader(readFile)
fields = next(reader)
lists.append(fields)
for row in reader:
for field in row:
if field == "Git.RepositoryCreated":
lists.append(row)
writer.writerows(lists)
readFile.close()
new_file.close()
os.remove("temp.csv")
timestamp = (datetime.datetime.now())
timestamp = timestamp.strftime("%d%B%Y_%H%M%S")
file_name = "Data2_"+str(timestamp)+".csv"
file1 = open("temp_new.csv",'r')
df = pd.read_csv(file1)
for i in df["Data"]:
res = json.loads(i)
pro_name.append(res['ProjectName'])
RepoId.append(res['RepoId'])
RepoName.append(res['RepoName'])
Disp_Name = df["ActorDisplayName"]
ActionId = df["ActionId"]
TimeStamp = df["Timestamp"]
file1.close()
os.remove("temp_new.csv")
Header = ["Actor Display Name","Project
Name","RepoName","RepoId","ActionId","Timestamp"]
d=[Disp_Name,pro_name,RepoName,RepoId,ActionId,TimeStamp]
export_data = zip_longest(*d, fillvalue = '')
with open(file_name, 'w',newline='') as myfile:
wr = csv.writer(myfile)
wr.writerow(Header)
wr.writerows(export_data)
myfile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser("This is used for getting list of the projects")
parser.add_argument("-o" , dest="org", help="org name")
parser.add_argument("-p" , dest="pat", help="pat value")
parser.add_argument("-sd" , dest="sdate", help="Start Date")
parser.add_argument("-ed" , dest="edate", help="End Date")
args = parser.parse_args()
org = args.org
token = args.pat
startdate = args.sdate
enddate = args.edate
url = "https://auditservice.dev.azure.com/{org_name}/_apis/audit/downloadlog?
format=csv&startTime={startdt}&endTime={enddt}&api-version=6.1-
preview.1".format(org_name=org,startdt=startdate,enddt=enddate)
#call "getproject" function to check url and token to further create required csv
getproject(url,token)
FilterData()
[+] in your getproject function,
you should use a try except block to handle http errors etc.
[+] if the csv file you're trying to download is quite large, it may be best to write the data in chunks.
As for the fields = next(reader) stopIteration errpr.
I'm not sure. ¯_(ツ)_/¯
Try throwing your code in the debugger and stepping through it.
See: download large file in python with requests
def getproject(url,pat):
try:
# NOTE the stream=True parameter below
with requests.get(url, auth=HTTPBasicAuth(username='',password=pat), stream=True) as r:
r.raise_for_status()
with open('tmp.csv', 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
except requests.exceptions.ConnectionError as c_error:
print(f"[-] Connection Error: {c_error}")
except requests.exceptions.Timeout as t_error:
print(f"[-] Connection Timeout Error: {t_error}")
except requests.exceptions.RequestException as req_error:
print(f"[-] Some Ambiguous Exception: {req_error}")
# This way seems faster based upon the comments of the link i shared
import requests
import shutil
def download_file(url):
local_filename = url.split('/')[-1]
with requests.get(url, stream=True) as r:
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return local_filename
I would like to loop through files into a directory, make something on these files and then for each file write out the result.
But my files can't be read because python interprets file names as string objects and not a readable file.
Is there a way to avoid this?
import re
import os
def create_filename_for_fileout (f1):
fileout_n = f1.replace("TT", "out")
fileout = "C:\\Users\\KP\\Desktop\\FSC_Treetag\\out\\"+str(fileout_n)
return fileout
for file_in in os.listdir('C:\\Users\\KP\\Desktop\\FSC_Treetag'):
filename = str(file_in)
file_out = create_filename_for_fileout(filename)
open(file_in, 'r')
open(file_out, 'w')
content_file = file_in.readlines()
for ln in content_file:
regex = re.compile('(.*\t(ADJ|ADV|NOM|VER:cond|VER:futu|VER:impe|VER:impf|VER:infi|VER:pper|VER:pres|VER:pres|VER:simp|VER:subi|VER:subp)\t(.*))')
res = regex.search(ln)
if res:
# categ = res.group(2)
lemme = res.group(3)
file_out.write(str(lemme)+"\n")
file_out.close()
file_in.close()
Result:
content_file = file_in.readlines()
AttributeError: 'str' object has no attribute 'readlines'
>>>
You're not assigning your open to any variable to use.
# Change
open(file_in, 'r')
open(file_out, 'w')
# to
input_file = open(file_in, 'r')
output_file = open(file_out, 'w')
for ln in input_file:
# do your processing
if res:
lemme = res.group(3)
output_file.write(str(lemme) + "\n")
You are not assigning the open functions to the respective handlers (open is returning an object of the file type).
filename = str(file_in)
file_out = create_filename_for_fileout(filename)
open(file_in, 'r')
open(file_out, 'w')
Should be:
file_out = open(create_filename_for_fileout(file_in), 'w')
file_in = open(file_in, 'r')
NOTE: for clarity sake it's a good idea to use another pointer for the infile handler.
Check: https://docs.python.org/2/library/functions.html#open
open(name[, mode[, buffering]])
Open a file, returning an object of the file type described in section File Objects. If the file cannot be opened, IOError is raised.