python: How to write variable-contents to a file? - python

How do I pass the variable-contents to the f.write() function, so that the variable-contents would be written to file?
i.e. move variable jsonData to function f.write()
def get(self):
url = 'http://httpbin.org/get'
r = requests.get(url)
data = r.text
jsonData = json.loads(data)
# #TODO: send jsonData variable to log file.
f = open('test_logs.txt', 'a+')
f.write('jsonData')
f.close()
return jsonData

f.write(str(jsonData))
Though you should be writing to a json file probably

Related

Writing output to a new file in it's own direction with python

I'm struggling with writing the output of the loop to new file. I want to name the new file the name of the variable + .txt. But I'm getting errors on the print part of the code.
import csv
import requests
with open('test.csv', 'r') as file:
next(file) # drop header
varlist = [row[0] for row in csv.reader(file, delimiter=",")]
for var in varlist:
payload = {'api_key': 'API_key', 'query':str(var), 'results':'10', 'country':'gb', 'page':'0'}
resp = requests.get('https://api.example.com/google', params=payload)
print(resp.text, file=(str(var) + '.txt'))
How to handle this correctly?
Solved with the problem with putting file key in a better stream object and use 'w'
for var in varlist:
payload = {'api_key': 'API_key', 'query':str(var), 'results':'10', 'country':'gb', 'page':'0'}
resp = requests.get('https://api.example.com/google', params=payload)
print(resp.text, file=open((str(var) + '.txt'), mode='w'))

Storing API data in Json fromat

I am having one API in that all location id’s and their respective info like(address, Lat, long) present. But if I want to fetch other extra attributes like location name, location area, location access then I need to give location id one by one as parameter in API to fetch their respective extra attributes.
I have written below code.but the problem with below code is the data is coming in console and i don't know how to take this information in json and then convert it into text file.
ids=location_id_df["id"] #stored location id in dataframe
authorization =”####################### "
print("started")
def test_api(url, authorization, rawfile,ids):
for i in range(0,1000,50):
for j in ids:
#print(j)
try:
request = urllib.request.Request('https:….. /locations/{}'.format(j)+"?
offset="+str(i),headers={'authorization':authorization})
response = urllib.request.urlopen(request).read()
print(response)
except HTTPError as e:
print(e)
sys.exit(0)
with open(rawfile + "_offset_" + str(i) + ".json", "wb") as json_download:
json_download.write(response)
test_api(url, authorization, rawfile,ids)
I need to fectch response in json like
5182021_offset_0.json #contains some location id's with extra attribute data
5182021_offset_50.json #contains some location id's with extra attribute data
5182021_offset_100.json #contains some location id's with extra attribute data
........................
.......................
Here is a simplified version of your example that queries an api that returns json and saves each result to a file.
import urllib.request
import json
for i in range(2):
responses = []
for j in range(3):
request = urllib.request.Request("https://www.boredapi.com/api/activity/")
response = urllib.request.urlopen(request)
if response.status == 200:
try:
response_bytes = response.read()
finally:
response.close()
response_string = response_bytes.decode("utf8")
response_data = json.loads(response_string)
responses.append(response_data)
file_name = "data-{}.json".format(i)
with open(file_name, "w") as f:
json.dump(responses, f)
I would suggest using the Requests library as it tends to have a simpler api than urllib, and is widely used by the python community. Here is the same example with the Requests library.
import requests
import json
for i in range(2):
responses = []
for j in range(3):
response = requests.get("https://www.boredapi.com/api/activity/")
if response.status_code == 200:
response_data = response.json()
responses.append(response_data)
file_name = "data-{}.json".format(i)
with open(file_name, "w") as f:
json.dump(responses, f)

importing value to request string from input

I am a sort of new programmer and I am trying to code a script with a request function in it like here r = requests.post("https://google.com", data=) but instead of it being a specific chosen URL from the script I need it to be a selected input, say if i want to change the r = requests.post("https://google.com to Https://www.youtube.com instead of it being in the script it is a input function and can be chosen by the user and then store it in a .txt file then be called again as something like r = requests.post("url.txt")
Ask for url as user input:
my_url = input('enter url')
save it to text file:
with open('url.txt', 'w') as txt_file:
txt_file.write(my_url)
txt_file.close()
Read from the text file:
with open('url.txt', 'r') as my_url_file:
read_url = my_url_file.read()
my_url_file.close()
so you can try something like:
my_url = input('enter url')
with open('url.txt', 'w') as txt_file:
txt_file.write(my_url)
txt_file.close()
with open('url.txt', 'r') as my_url_file:
read_url = my_url_file.read()
my_url_file.close()
r = requests.post(read_url )

python script failling to read csvfile with error - StopIteration

I am working on script which downloads large audit logs csv file from azure DevOps and filters data according given condition. This works for small csv file but for file with large data it fails with
fields = next(reader)
stopIteration
Can someone help with changes required in script? I am using python 3.7.9 on MacOs
def getproject(url,pat):
response = requests.get(url, auth=HTTPBasicAuth(username='',password=pat))
if response.status_code == 200:
url_data = response.content
tempfile = open("temp.csv","wb")
tempfile.write(url_data)
tempfile.close()
return url_data
else:
print("\nERROR : Unable to conect The server...")
def FilterData():
lists =[]
pro_name=[]
RepoId =[]
RepoName=[]
new_file = open("temp_new.csv", 'w',newline='')
writer = csv.writer(new_file)
with open("temp.csv", 'r') as readFile:
reader = csv.reader(readFile)
fields = next(reader)
lists.append(fields)
for row in reader:
for field in row:
if field == "Git.RepositoryCreated":
lists.append(row)
writer.writerows(lists)
readFile.close()
new_file.close()
os.remove("temp.csv")
timestamp = (datetime.datetime.now())
timestamp = timestamp.strftime("%d%B%Y_%H%M%S")
file_name = "Data2_"+str(timestamp)+".csv"
file1 = open("temp_new.csv",'r')
df = pd.read_csv(file1)
for i in df["Data"]:
res = json.loads(i)
pro_name.append(res['ProjectName'])
RepoId.append(res['RepoId'])
RepoName.append(res['RepoName'])
Disp_Name = df["ActorDisplayName"]
ActionId = df["ActionId"]
TimeStamp = df["Timestamp"]
file1.close()
os.remove("temp_new.csv")
Header = ["Actor Display Name","Project
Name","RepoName","RepoId","ActionId","Timestamp"]
d=[Disp_Name,pro_name,RepoName,RepoId,ActionId,TimeStamp]
export_data = zip_longest(*d, fillvalue = '')
with open(file_name, 'w',newline='') as myfile:
wr = csv.writer(myfile)
wr.writerow(Header)
wr.writerows(export_data)
myfile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser("This is used for getting list of the projects")
parser.add_argument("-o" , dest="org", help="org name")
parser.add_argument("-p" , dest="pat", help="pat value")
parser.add_argument("-sd" , dest="sdate", help="Start Date")
parser.add_argument("-ed" , dest="edate", help="End Date")
args = parser.parse_args()
org = args.org
token = args.pat
startdate = args.sdate
enddate = args.edate
url = "https://auditservice.dev.azure.com/{org_name}/_apis/audit/downloadlog?
format=csv&startTime={startdt}&endTime={enddt}&api-version=6.1-
preview.1".format(org_name=org,startdt=startdate,enddt=enddate)
#call "getproject" function to check url and token to further create required csv
getproject(url,token)
FilterData()
[+] in your getproject function,
you should use a try except block to handle http errors etc.
[+] if the csv file you're trying to download is quite large, it may be best to write the data in chunks.
As for the fields = next(reader) stopIteration errpr.
I'm not sure. ¯_(ツ)_/¯
Try throwing your code in the debugger and stepping through it.
See: download large file in python with requests
def getproject(url,pat):
try:
# NOTE the stream=True parameter below
with requests.get(url, auth=HTTPBasicAuth(username='',password=pat), stream=True) as r:
r.raise_for_status()
with open('tmp.csv', 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
except requests.exceptions.ConnectionError as c_error:
print(f"[-] Connection Error: {c_error}")
except requests.exceptions.Timeout as t_error:
print(f"[-] Connection Timeout Error: {t_error}")
except requests.exceptions.RequestException as req_error:
print(f"[-] Some Ambiguous Exception: {req_error}")
# This way seems faster based upon the comments of the link i shared
import requests
import shutil
def download_file(url):
local_filename = url.split('/')[-1]
with requests.get(url, stream=True) as r:
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return local_filename

Cant print items in CSV

When I open sdata.csv file it will not iterate, no error is shown simply not printing. Why could this be? I even did print(g) and it shows its reading properly. I also am trying to write data to the same file and the same blank file occurs with only the heading in it.
import urllib.request as request
import json
from urllib.request import urlopen, Request
import requests
import demjson
import csv
import time
req = Request('https://api.gameslabs.net/1.0.0/exchange', headers={'User-Agent': 'Mozilla/5.0'})
with request.urlopen(req) as response:
if response.getcode() == 200:
source = response.read()
data = json.loads(source)
else:
print('An error occurred while attempting to retrieve data from the API.')
y = json.dumps(data)
x = json.loads(y)
f = csv.writer(open("item-com.csv", "w+"))
# Write CSV Header, If you dont need that, remove this line
f.writerow(["symbol", "buy_game", "buy_item", "buy_name", "sell_game", "sell_item", "sell_name"])
for x in x:
f.writerow([x["symbol"],
x["buy"]["game"],
x["buy"]["item"],
x["buy"]["name"],
x["sell"]["game"],
x["sell"]["item"],
x["sell"]["name"]])
o = csv.DictReader(open("item-com.csv"))
for row in o:
print(row['buy_name'])
req2 = Request('https://api.gameslabs.net/1.0.0/exchange/symbol/MS_IRON_PICKAXE/candles?timeFrame=day',
headers={'User-Agent': 'Mozilla/5.0'})
with request.urlopen(req2) as response:
if response.getcode() == 200:
source2 = response.read()
data2 = json.loads(source2)
else:
print('An error occurred while attempting to retrieve data from the API.')
xdum = json.dumps(data2)
bdum = json.loads(xdum)
ged = csv.writer(open("sdata.csv", "w+"))
ged.writerow(["timestamp", "low", "open", "close", "high", "volume"])
for bdum in bdum:
ged.writerow([bdum["timestamp"],
bdum["low"],
bdum["open"],
bdum["close"],
bdum["high"]])
g = csv.DictReader(open("sdata.csv"))
for row in g:
print(row['timestamp'])
You are writing and reading from the same files. However, you don't ensure the file is closed in between. If you use a context manager it will take care of that for you. I notice you are using context managers for url respones.
I've modified your slightly code to use context managers for file management:
...
with open("item-com.csv", "w+") as csv_file:
f = csv.writer(csv_file)
# Write CSV Header, If you dont need that, remove this line
f.writerow(["symbol", "buy_game", "buy_item", "buy_name", "sell_game", "sell_item", "sell_name"])
for x in x:
f.writerow([x["symbol"],
x["buy"]["game"],
x["buy"]["item"],
x["buy"]["name"],
x["sell"]["game"],
x["sell"]["item"],
x["sell"]["name"]])
with open("item-com.csv") as csv_file:
o = csv.DictReader(csv_file)
for row in o:
print(row['buy_name'])
req2 = Request('https://api.gameslabs.net/1.0.0/exchange/symbol/MS_IRON_PICKAXE/candles?timeFrame=day',
headers={'User-Agent': 'Mozilla/5.0'})
with request.urlopen(req2) as response:
if response.getcode() == 200:
source2 = response.read()
data2 = json.loads(source2)
else:
print('An error occurred while attempting to retrieve data from the API.')
xdum = json.dumps(data2)
bdum = json.loads(xdum)
with open("sdata.csv", "w+") as csv_file:
ged = csv.writer(csv_file)
ged.writerow(["timestamp", "low", "open", "close", "high", "volume"])
for bdum in bdum:
ged.writerow([bdum["timestamp"],
bdum["low"],
bdum["open"],
bdum["close"],
bdum["high"]])
with open("sdata.csv") as csv_file:
g = csv.DictReader(csv_file)
for row in g:
print(row['timestamp'])
Instead of writing line by line to text file try this way. This method reduces repetitive i/o and doesn't have to keep the file open for long time.
lst = []
for x in x:
tmpTuple = ([x["symbol"],
x["buy"]["game"],
x["buy"]["item"],
x["buy"]["name"],
x["sell"]["game"],
x["sell"]["item"],
x["sell"]["name"]])
lst.append(tmpTuple)
#outside loop create a pandas dataframe
df = pd.DataFrame(lst)
#this is several options to save
df.to_csv('filename.csv')

Categories

Resources