I am using a Pokemon API : https://pokeapi.co/api/v2/pokemon/
and I am trying to make a list which can store 6 pokemon ID's then, using a for loop, call to the API and retrieve data for each pokemon. Finally, I want to save this info in a txt file. This is what I have so far:
import random
import requests
from pprint import pprint
pokemon_number = []
for i in range (0,6):
pokemon_number.append(random.randint(1,10))
url = 'https://pokeapi.co/api/v2/pokemon/{}/'.format(pokemon_number)
response = requests.get(url)
pokemon = response.json()
pprint(pokemon)
with open('pokemon.txt', 'w') as pok:
pok.write(pokemon_number)
I don't understand how to get the API to read the IDs from the list.
I hope this is clear, I am in a right pickle.
Thanks
You are passing pokemon_number to the url variable, which is a list. You need to iterate over the list instead.
Also, to actually save the pokemon, you can use either the name or it's ID as the filename. The JSON library allows for easy saving of objects to JSON files.
import random
import requests
import json
# renamed this one to indicate it's not a single number
pokemon_numbers = []
for i in range (0,6):
pokemon_numbers.append(random.randint(1,10))
# looping over the generated IDs
for id in pokemon_numbers:
url = f"https://pokeapi.co/api/v2/pokemon/{id}/"
# if you use response, you overshadow response from the requests library
resp = requests.get(url)
pokemon = resp.json()
print(pokemon['name'])
with open(f"{pokemon['name']}.json", "w") as outfile:
json.dump(pokemon, outfile, indent=4)
I now have this:
import requests
pokemon_number = []
for i in range (0,6):
pokemon_number.append(random.randint(1,50))
x = 0
while x <len(pokemon_number):
print(pokemon_number[x])
x = x +1
url = 'https://pokeapi.co/api/v2/pokemon/{}/'.format(pokemon_number[])
response = requests.get(url)
pokemon = response.json()
print(pokemon)
print(pokemon['name'])
print(pokemon['height'])
print(pokemon['weight'])
with open('pokemon.txt', 'w') as p:
p.write(pokemon['name'])
p.write(pokemon['ability'])
When I open sdata.csv file it will not iterate, no error is shown simply not printing. Why could this be? I even did print(g) and it shows its reading properly. I also am trying to write data to the same file and the same blank file occurs with only the heading in it.
import urllib.request as request
import json
from urllib.request import urlopen, Request
import requests
import demjson
import csv
import time
req = Request('https://api.gameslabs.net/1.0.0/exchange', headers={'User-Agent': 'Mozilla/5.0'})
with request.urlopen(req) as response:
if response.getcode() == 200:
source = response.read()
data = json.loads(source)
else:
print('An error occurred while attempting to retrieve data from the API.')
y = json.dumps(data)
x = json.loads(y)
f = csv.writer(open("item-com.csv", "w+"))
# Write CSV Header, If you dont need that, remove this line
f.writerow(["symbol", "buy_game", "buy_item", "buy_name", "sell_game", "sell_item", "sell_name"])
for x in x:
f.writerow([x["symbol"],
x["buy"]["game"],
x["buy"]["item"],
x["buy"]["name"],
x["sell"]["game"],
x["sell"]["item"],
x["sell"]["name"]])
o = csv.DictReader(open("item-com.csv"))
for row in o:
print(row['buy_name'])
req2 = Request('https://api.gameslabs.net/1.0.0/exchange/symbol/MS_IRON_PICKAXE/candles?timeFrame=day',
headers={'User-Agent': 'Mozilla/5.0'})
with request.urlopen(req2) as response:
if response.getcode() == 200:
source2 = response.read()
data2 = json.loads(source2)
else:
print('An error occurred while attempting to retrieve data from the API.')
xdum = json.dumps(data2)
bdum = json.loads(xdum)
ged = csv.writer(open("sdata.csv", "w+"))
ged.writerow(["timestamp", "low", "open", "close", "high", "volume"])
for bdum in bdum:
ged.writerow([bdum["timestamp"],
bdum["low"],
bdum["open"],
bdum["close"],
bdum["high"]])
g = csv.DictReader(open("sdata.csv"))
for row in g:
print(row['timestamp'])
You are writing and reading from the same files. However, you don't ensure the file is closed in between. If you use a context manager it will take care of that for you. I notice you are using context managers for url respones.
I've modified your slightly code to use context managers for file management:
...
with open("item-com.csv", "w+") as csv_file:
f = csv.writer(csv_file)
# Write CSV Header, If you dont need that, remove this line
f.writerow(["symbol", "buy_game", "buy_item", "buy_name", "sell_game", "sell_item", "sell_name"])
for x in x:
f.writerow([x["symbol"],
x["buy"]["game"],
x["buy"]["item"],
x["buy"]["name"],
x["sell"]["game"],
x["sell"]["item"],
x["sell"]["name"]])
with open("item-com.csv") as csv_file:
o = csv.DictReader(csv_file)
for row in o:
print(row['buy_name'])
req2 = Request('https://api.gameslabs.net/1.0.0/exchange/symbol/MS_IRON_PICKAXE/candles?timeFrame=day',
headers={'User-Agent': 'Mozilla/5.0'})
with request.urlopen(req2) as response:
if response.getcode() == 200:
source2 = response.read()
data2 = json.loads(source2)
else:
print('An error occurred while attempting to retrieve data from the API.')
xdum = json.dumps(data2)
bdum = json.loads(xdum)
with open("sdata.csv", "w+") as csv_file:
ged = csv.writer(csv_file)
ged.writerow(["timestamp", "low", "open", "close", "high", "volume"])
for bdum in bdum:
ged.writerow([bdum["timestamp"],
bdum["low"],
bdum["open"],
bdum["close"],
bdum["high"]])
with open("sdata.csv") as csv_file:
g = csv.DictReader(csv_file)
for row in g:
print(row['timestamp'])
Instead of writing line by line to text file try this way. This method reduces repetitive i/o and doesn't have to keep the file open for long time.
lst = []
for x in x:
tmpTuple = ([x["symbol"],
x["buy"]["game"],
x["buy"]["item"],
x["buy"]["name"],
x["sell"]["game"],
x["sell"]["item"],
x["sell"]["name"]])
lst.append(tmpTuple)
#outside loop create a pandas dataframe
df = pd.DataFrame(lst)
#this is several options to save
df.to_csv('filename.csv')
I'm trying to save the JSON results of an API call to CSV. The API is in a for loop where it subtracts a day and re-runs the API to get the price of a Bitcoin. It's then supposed to save each API call to a row in a CSV but it's currently only saving the last API call.
How can I get it to write a row for each returned API call?
import requests
import json
import csv
timestampCurrent = 1550698057
timestampOneWeekAgo = 1550161800
oneDay = 86400
for timestamp in range(timestampCurrent, timestampOneWeekAgo, -oneDay):
print(timestamp)
base_url = "https://api.gemini.com/v1/trades/btcusd"
payload = {'timestamp' : timestamp,
'limit_trades' : '1'}
r = requests.get(url = base_url, params = payload)
time = r.json()
print(time)
# open a file for writing
bitcoincsv = open('/Users/kanye_west/Desktop/Code/Python/BitcoinTracker/bitcoinyear.csv', 'w')
# create csv writer object
csvwriter = csv.writer(bitcoincsv)
count = 0
for year in time:
if count == 0:
header = year.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(year.values())
bitcoincsv.close()
# append csv results
# save CSV file
This should work.
import requests
import csv
URL = 'https://api.gemini.com/v1/trades/btcusd'
TIMESTAMP_CURRENT = 1550698057
TIMESTAMP_ONE_WEEK_AGO = 1550161800
ONE_DAY = 86400
def get_time_slot_data(timestamp):
r = requests.get(URL, params={'timestamp': timestamp, 'limit_trades': '1'})
if r.status_code == 200:
return r.json()
with open('gemini.csv', 'wb') as out:
headers_written = False
writer = csv.writer(out)
for timestamp in range(TIMESTAMP_CURRENT, TIMESTAMP_ONE_WEEK_AGO, -ONE_DAY):
daily_data = get_time_slot_data(timestamp)
if not headers_written and daily_data:
writer.writerow(daily_data[0].keys())
headers_written = True
if daily_data:
for entry in daily_data:
writer.writerow(entry.values())
How do I pass the variable-contents to the f.write() function, so that the variable-contents would be written to file?
i.e. move variable jsonData to function f.write()
def get(self):
url = 'http://httpbin.org/get'
r = requests.get(url)
data = r.text
jsonData = json.loads(data)
# #TODO: send jsonData variable to log file.
f = open('test_logs.txt', 'a+')
f.write('jsonData')
f.close()
return jsonData
f.write(str(jsonData))
Though you should be writing to a json file probably
I want to get URL in http://www.malware-traffic-analysis.net/
I want to get url when I insert keyword "nuclear".
but I block because I do not get total count.
this is my sample code.I almost done..
import urllib2
import json
import pprint
def customSearch(page,keyword):
url = 'https://www.googleapis.com/customsearch/v1?key=MY_API_KEY'
+ '&cx=007471739612924802870:ak59oowq-cq&num=10&start='
+ str(page)
+ '1&q='
+ keyword
data = urllib2.urlopen(url)
data = json.load(data)
return data
def getURL(keyword):
result=[]
try:
for i in range(1): # i want to get this range
data=customSearch(i,keyword)
for temp in data['items']:
result.append(temp["link"])
except:
None
content=""
for i in result:
content+=(i+"\n")
f=open(keyword+'.txt',"w")
f.write(content)
f.close()
getURL("Nuclear")