Key Error 'Main' when trying an openweathermap python api tutorial - python

I'm currently trying to run through a tutorial of how to set up openweathermap via Python but i'm getting a KeyError and I was wondering if someone could help me out.
The error I am getting is KeyError: 'main'
In the actual code I have put in my API but have taken it out for obvious reasons.
api_key = ""
base_url = "http://api.openweathermap.org/data/2.5/weather?"
city_name = input("Enter city name : ")
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
if x["cod"] != "404":
y = x["main"]
current_temperature = y["temp"]
current_pressure = y["pressure"]
current_humidiy = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
print(" Temperature (in kelvin unit) = " +
str(current_temperature) +
"\n atmospheric pressure (in hPa unit) = " +
str(current_pressure) +
"\n humidity (in percentage) = " +
str(current_humidiy) +
"\n description = " +
str(weather_description))
else:
print(" City Not Found ")

The following works:
import requests
OPEN_WEATHER_MAP_APIKEY = '<your key>'
def get_weather_data_by_location( lat, long):
url = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={long}&appid={OPEN_WEATHER_MAP_APIKEY}&units=metric'
print(f"Getting data via {url}")
r = requests.get(url)
return r.json()
if r.status_code == 200:
return r.json()
else:
return None
if __name__ == '__main__':
print("Getting Weather Data")
print( get_weather_data_by_location( '22.300910042194783', '114.17070449064359') )
I have an beginners guide to open weather map which you can follow here: https://pythonhowtoprogram.com/get-weather-forecasts-and-show-it-on-a-chart-using-python-3/

Related

I want to load specific data parameters I pulled from a weather api and load it into an sql database as the hours change

Okay so I am pulling data from visual crossing weather api and I want to pull the date, hours, weather conditions, uv index, temperature, sunrise and sunset. I extracted the data using the json commands, but I am unable to get information for each hour. I have tried everything could someone please help me out. How could read temperature conditions for each hour for a specific location.
This is what I have so far:
import mysql.connector
import requests
import json
from urllib import parse
from datetime import datetime
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="qwww",
database="weather_information"
)
BaseURL = 'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/'
GeoUser = "xxx"
while True:
try:
Location = input("Enter the city name:") # user enters City name
Base_GeoURL = 'http://api.geonames.org/searchJSON' #sets GeoNames URL
Geo_params = {"q": f"{Location}"} #setting the provided city name as a parameter
Geo_queryString = parse.urlencode(Geo_params)
Geo_URL = Base_GeoURL + "?" + Geo_queryString + "&maxRows=5&username=" + GeoUser
Geo_URL = Geo_URL.replace(" ", "")
response = requests.get(Geo_URL, verify=True) # Find a way to get this go through SSL
Locateinfo = response.json()
if(Locateinfo["totalResultsCount"] == 0):
print("Invalid City, Please Re-enter city name")
else:
print("Valid City!")
break
except ConnectionError:
print("Connection Error!!")
while True:
try:
SDate_String = input("Please enter start date in yyyy/mm/dd format")
format = ' %Y/%m/%d'
StartDate = datetime.strptime(SDate_String, format)
break
except ValueError:
print("Invalid Date")
while True:
try:
EDate_String = input("Please enter end date in yyyy/mm/dd format")
format = ' %Y/%m/%d'
EndDate = datetime.strptime(EDate_String, format)
print('Loading...')
break
except ValueError:
print("Invalid Date")
diff_days = (EndDate - StartDate)
print(diff_days.days)
if ((StartDate.year == EndDate.year) and (StartDate.month == EndDate.month) and (StartDate.day == EndDate.day)):
SStartDate = StartDate.strftime(" %Y/%m/%d")
StartDate = SStartDate.split('/')
Syear = StartDate[0]
Smonth = StartDate[1]
Sday = StartDate[2]
SEndDate = EndDate.strftime(" %Y/%m/%d")
EndDate = SEndDate.split('/')
Eyear = EndDate[0]
Emonth = EndDate[1]
Eday = EndDate[2]
print('Request for forecast data in progress...')
params = {"unitGroup": "metric", "key": "wwww", "contentType": "json"}
querystring = parse.urlencode(params)
URL = BaseURL + Location + "/" + Syear + "-" + Smonth + "-" + Sday + "/" + Eyear + "-" + Emonth + "-" + Eday + "?" + querystring
URL = URL.replace(" ", "")
print(URL)
else:
SStartDate = StartDate.strftime(" %Y/%m/%d")
StartDate = SStartDate.split('/')
Syear = StartDate[0]
Smonth = StartDate[1]
Sday = StartDate[2]
SEndDate = EndDate.strftime(" %Y/%m/%d")
EndDate = SEndDate.split('/')
Eyear = EndDate[0]
Emonth = EndDate[1]
Eday = EndDate[2]
print('Request for history data in progress...')
params = {"unitGroup": "metric","key": "www", "contentType": "json"}
querystring = parse.urlencode(params)
URL = BaseURL + Location + "/" + Syear + "-" + Smonth + "-" + Sday + "/" + Eyear + "-" + Emonth + "-" + Eday + "?"+ querystring
URL = URL.replace(" ", "")
print(URL)
response = requests.get(URL).json()
weatherdata = json.loads(response.content)
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="qwww",
database="weather_information"
)
cursor = db.cursor
for item in weatherdata:
date=item.get("resolveAddress")
cursor.execute()
# weatherdata = response.json()
# weatherdata = json.loads(response.content)
# print(weatherdata['resolvedAddress'])
#for x in range(diff_days.days + 1):
# date = weatherdata['days'][x]['datetime']
#print(date)
# for i in range(24):
# print('HOUR:', weatherdata['days'][x]['hours'][i]['datetime'])
# print('TEMP:', weatherdata['days'][x]['hours'][i]['temp'])
# print('UVINDEX:', weatherdata['days'][x]['hours'][i]['uvindex'])
# print('WINDSPEED:', weatherdata['days'][x]['hours'][i]['windspeed'])
I am no python dev so this is likely sub-optimal but at least it works. I have stripped out the MySQL bits as they are not yet implemented, nor are they key to
read temperature conditions for each hour for a specific location
Your requirement for the space at the start of the date inputs makes no sense so I stripped that -
while True:
try:
SDate_String = input("Please enter start date in yyyy/mm/dd format: ")
format = '%Y/%m/%d'
StartDate = datetime.strptime(SDate_String, format)
break
except ValueError:
print("Invalid Date")
The if...else conditional blocks serve no purpose as both branches do the same thing so I have removed them. If you need to add them back in, you can compare the two datetime objects directly instead of comparing their year, month & day attributes. The first two blocks of code in the conditional branches achieve nothing -
if ((StartDate.year == EndDate.year) and (StartDate.month == EndDate.month) and (StartDate.day == EndDate.day)):
SStartDate = StartDate.strftime(" %Y/%m/%d")
StartDate = SStartDate.split('/')
Syear = StartDate[0]
Smonth = StartDate[1]
Sday = StartDate[2]
SEndDate = EndDate.strftime(" %Y/%m/%d")
EndDate = SEndDate.split('/')
Eyear = EndDate[0]
Emonth = EndDate[1]
Eday = EndDate[2]
print('Request for forecast data in progress...')
params = {"unitGroup": "metric", "key": "wwww", "contentType": "json"}
querystring = parse.urlencode(params)
URL = BaseURL + Location + "/" + Syear + "-" + Smonth + "-" + Sday + "/" + Eyear + "-" + Emonth + "-" + Eday + "?" + querystring
URL = URL.replace(" ", "")
print(URL)
else:
SStartDate = StartDate.strftime(" %Y/%m/%d")
StartDate = SStartDate.split('/')
Syear = StartDate[0]
Smonth = StartDate[1]
Sday = StartDate[2]
SEndDate = EndDate.strftime(" %Y/%m/%d")
EndDate = SEndDate.split('/')
Eyear = EndDate[0]
Emonth = EndDate[1]
Eday = EndDate[2]
print('Request for history data in progress...')
params = {"unitGroup": "metric","key": "www", "contentType": "json"}
querystring = parse.urlencode(params)
URL = BaseURL + Location + "/" + Syear + "-" + Smonth + "-" + Sday + "/" + Eyear + "-" + Emonth + "-" + Eday + "?"+ querystring
URL = URL.replace(" ", "")
print(URL)
becomes -
print('Request for forecast data in progress...')
params = {'unitGroup': 'uk', 'key': 'wwww', 'contentType': 'json'}
querystring = parse.urlencode(params)
URL = BaseURL + parse.quote(Location) + "/" + StartDate.strftime("%Y-%m-%d") + "/" + EndDate.strftime("%Y-%m-%d") + "?" + querystring
print(URL)
The next block of code, which sends the request to the API, is still inside your else: block but presumably should be outside.
# could be
response = requests.get(URL)
weatherdata = response.json()
# or
response = requests.get(URL)
weatherdata = json.loads(response.text)
# but not half and half
response = requests.get(URL).json()
weatherdata = json.loads(response.content)
Piecing it back together you end up with something like this -
import requests
import json
from urllib import parse
from datetime import datetime
BaseURL = 'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/'
GeoUser = "xxx"
while True:
try:
Location = input("Enter the city name: ") # user enters City name
# put your api.geonames.org code back in here
break
except ConnectionError:
print("Connection Error!!")
while True:
try:
SDate_String = input("Please enter start date in yyyy/mm/dd format: ")
format = '%Y/%m/%d'
StartDate = datetime.strptime(SDate_String, format)
break
except ValueError:
print("Invalid Date")
while True:
try:
EDate_String = input("Please enter end date in yyyy/mm/dd format: ")
format = '%Y/%m/%d'
EndDate = datetime.strptime(EDate_String, format)
break
except ValueError:
print("Invalid Date")
diff_days = (EndDate - StartDate)
print('DiffDays: ', diff_days.days)
print('Request for forecast data in progress...')
params = {'unitGroup': 'uk', 'key': 'wwww', 'contentType': 'json'}
querystring = parse.urlencode(params)
URL = BaseURL + parse.quote(Location) + "/" + StartDate.strftime("%Y-%m-%d") + "/" + EndDate.strftime("%Y-%m-%d") + "?" + querystring
print(URL)
response = requests.get(URL)
weatherdata = response.json()
print('Resolved address: ' + weatherdata['resolvedAddress'])
for day in weatherdata['days']:
print('Date: ', day['datetime'])
for hour in day['hours']:
print('HOUR: ', hour['datetime'])
print('TEMP: ', hour['temp'])
print('UVINDEX: ', hour['uvindex'])
print('WINDSPEED: ', hour['windspeed'])

Python Won't Enter Try/Except In For Loop On 50th Iteration

First off I want to apologize to everyone who's about to read this code... I know it's a mess.
For anyone who is able to decipher it: I have a list of ~16,500 website URL's that I am scraping and then using googles NLP to categorize. The list of URL's is created with the following chunk of code, as far as I can tell nothing is broken here.
url_list = open("/Users/my_name/Documents/Website Categorization and Scrapper Python/url_list copy", "r")
indexed_url_list = url_list.readlines()
clean_url_list = []
clean_url_list = [x[:-1] for x in indexed_url_list]
When I print the length of this list it correctly gives me the count of ~16,500.
The main block of code is as follows:
for x in clean_url_list:
print('1')
url = x
print('1.1')
try:
r = scraper.get(url, headers = headers,)
print('1.2')
soup = BeautifulSoup(r.text, 'html.parser')
print('1.3')
title = soup.find('title').text
print('1.4')
description = soup.find('meta', attrs={'name': 'description'})["content"]
print('2')
if "content" in str(description):
description = description.get("content")
else:
description = ""
h1 = soup.find_all('h1')
h1_all = ""
for x in range (len(h1)):
if x == len(h1) -1:
h1_all = h1_all + h1[x].text
else:
h1_all = h1_all + h1[x].text + ". "
paragraphs_all = ""
paragraphs = soup.find_all('p')
for x in range (len(paragraphs)):
if x == len(paragraphs) -1:
paragraphs_all = paragraphs_all + paragraphs[x].text
else:
paragraphs_all = paragraphs_all + paragraphs[x].text + ". "
h2 = soup.find_all('h2')
h2_all = ""
for x in range (len(h2)):
if x == len(h2) -1:
h2_all = h2_all + h2[x].text
else:
h2_all = h2_all + h2[x].text + ". "
h3 = soup.find_all('h3')
h3_all = ""
for x in range (len(h3)):
if x == len(h3) -1:
h3_all = h3_all + h3[x].text
else:
h3_all = h3_all + h3[x].text + ". "
allthecontent = ""
allthecontent = str(title) + " " + str(description) + " " + str(h1_all) + " " + str(h2_all) + " " + str(h3_all) + " " + str(paragraphs_all)
allthecontent = str(allthecontent)[0:999]
print(allthecontent)
except Exception as e:
print(e)
When I run this it successfully categorizes the first 49 URL's, but ALWAYS stops on the 50th, no matter what URL it is. No error is thrown, and even if it did the try/except should handle it. Using the print statements to debug it seems to not enter the "try" section on the 50th iteration for whatever reason and it's always the 50th iteration
Any help would be much appreciated and I hope you have some good eye wash to wipe away the code you just had to endure.
I helped look at this at work. The actual issue was a bad 50th url that would not return. Adding a timeout allowed the code to escape the try/catch block and move on to the next url in a manageable fashion.
try:
r = scraper.get(url, headers = headers, timeout=5)
except:
continue # handle next url in list

displaying output in html with a python code

I'm still learning the ins and outs of python and my goal right now is I want to display the outputs of my python code into a html webpage. How can I do that, provided that I already have a html webpage, but instead of the output displaying on the webpage itself, it displays on the console only.
app = Flask(__name__)
__author__ = 'kai'
from flask import Flask, render_template, request
import urllib.parse
import requests
#app.route('/')
def index():
return render_template('index.html')
#app.route('/hello', methods=['POST'])
def hello():
while True:
orig = request.form['starting']
if orig == "quit" or orig == "q":
break
dest = request.form['destination']
if dest == "quit" or dest == "q":
break
url = main_api + urllib.parse.urlencode({"key": key, "from": orig, "to":dest})
print("URL: " +(url))
json_data = requests.get(url).json()
json_status = json_data["info"]["statuscode"]
if json_status == 0:
print("API Status: " + str(json_status) + " = A successful route call.\n")
print("=============================================")
print("Directions from " + (orig) + " to " + (dest))
print("Trip Duration: " + (json_data["route"]["formattedTime"]))
print("-----------------Distance in different units-----------------")
print("Kilometers: " + str("{:.2f}".format((json_data["route"]["distance"])*1.61)))
print("Miles: " + str(json_data["route"]["distance"]))
print("Yards: " + str("{:.2f}".format((json_data["route"]["distance"])*1760)))
print("-----------------Fuel Used in different units-----------------")
print("Fuel Used (Ltr): " + str("{:.2f}".format((json_data["route"]["fuelUsed"])*3.78)))
print("Fuel Used (US Gal): " + str(json_data["route"]["fuelUsed"]))
print("Fuel Used (Imperial Gal): " + str("{:.2f}".format((json_data["route"]["fuelUsed"])*0.8327)))
print("=============================================")
for each in json_data["route"]["legs"][0]["maneuvers"]:
print((each["narrative"]) + " (" + str("{:.2f}".format((each["distance"])*1.61) + " km)"))
print("=============================================\n")
elif json_status == 402:
print("**********************************************")
print("Status Code: " + str(json_status) + "; Invalid user inputs for one or both locations.")
print("**********************************************\n")
elif json_status == 611:
print("**********************************************")
print("Status Code: " + str(json_status) + "; Missing an entry for one or both locations.")
print("**********************************************\n")
else:
print("************************************************************************")
print("Try Again")
print("************************************************************************\n")
if __name__ == '__main__':
app.run(host = '0.0.0.0', port = 3000)

Python - Extracts only one when using openAPI

I'm extracting movie data from open API.
I want to bring the first director and actor, but everyone is printed out.
This is my code.
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key='KeyValue'&movieCd=20177478"
res = requests.get(url)
test = res.text
d = json.loads(test)
movieinfo = d['movieInfoResult']['movieInfo']
moviename = movieinfo['movieNm']
print("movie_name = " + moviename)
moviedt = movieinfo['openDt']
print("movie_dt = " + moviedt)
for b in d["movieInfoResult"]["movieInfo"]["directors"]:
print("director_name = " + b["peopleNm"])
When I run this code result is like this.
movie_name = avengers
movie_dt = 20180425
director_name = Anthony Russo
director_name = Joe Russo
How can I bring only one person like this.
I need just the first person.
movie_name = avengers
movie_dt = 20180425
director_name = Anthony Russo
Open API site(korean) - https://www.kobis.or.kr/kobisopenapi/homepg/apiservice/searchServiceInfo.do
You can break for loop after printing or you can directly access the first value (if you are sure directors array is not empty)
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key='KeyValue'&movieCd=20177478"
res = requests.get(url)
test = res.text
d = json.loads(test)
movieinfo = d['movieInfoResult']['movieInfo']
moviename = movieinfo['movieNm']
print("movie_name = " + moviename)
moviedt = movieinfo['openDt']
print("movie_dt = " + moviedt)
for b in d["movieInfoResult"]["movieInfo"]["directors"]:
print("director_name = " + b["peopleNm"])
break
or
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key='KeyValue'&movieCd=20177478"
res = requests.get(url)
test = res.text
d = json.loads(test)
movieinfo = d['movieInfoResult']['movieInfo']
moviename = movieinfo['movieNm']
print("movie_name = " + moviename)
moviedt = movieinfo['openDt']
print("movie_dt = " + moviedt)
print("director_name = " + d["movieInfoResult"]["movieInfo"]["directors"][0]["peopleNm"])

Craigslist Multi City Search python script. Adding GUI more cities

So i am very new to python programing. Just trying to figure out a good project to get me started. Wanted to attempt searching craigslist in multiple cities. I found a dated example online and used it as a starting point. The below script currently only has cities in ohio but i plan on adding all us cities. The "homecity" is currently set to Dayton. It asks for a search radius, search term, min price, and max price. Based on lat lon of cities it only searches cities in the radius. I also have it searching all pages if there is more than 1 page of results. At the end it creates an html file of the results and opens it in a browser. It seems to be working fine, but was hoping to get feedback on if i am doing everything efficiently. I would also like to add in a GUI to capture user inputs but not even sure where to start. Any advice there? Thanks!
#Craigslist Search
"""
Created on Thu Mar 27 11:56:54 2014
used http://cal.freeshell.org/2010/05/python-craigslist-search-script-version-2/ as
starting point.
"""
import re
import os
import os.path
import time
import urllib2
import webbrowser
from math import *
results = re.compile('<p.+</p>', re.DOTALL) #Find pattern for search results.
prices = re.compile('<span class="price".*?</span>', re.DOTALL) #Find pattern for
pages = re.compile('button pagenum">.*?</span>')
delay = 10
def search_all():
for city in list(set(searchcities)):#add another for loop for all pages
#Setup headers to spoof Mozilla
dat = None
ua = "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.4) Gecko/20091007 Firefox/3.5.4"
head = {'User-agent': ua}
errorcount=0
#Do a quick search to see how many pages of results
url = "http://" + city + ".craigslist.org/search/" + "sss?s=" + "0" + "&catAbb=sss&query=" + query.replace(' ', '+') + "&minAsk=" + pricemin + "&maxAsk=" + pricemax
req = urllib2.Request(url, dat, head)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError:
if errorcount < 1:
errorcount = 1
print "Request failed, retrying in " + str(delay) + " seconds"
time.sleep(int(delay))
response = urllib2.urlopen(req)
msg = response.read()
errorcount = 0
pglist = pages.findall(msg)
pg = pglist.pop(0)
if pg.find('of') == -1:
pg=100
else:
pg =pg[int((pg.find('of'))+3) : int((pg.find('</span>'))) ]
if int(pg)/100 == 0:
pg = 100
numpages = range(int(pg)/100)
for page in numpages:
print "searching...."
page = page*100
url = "http://" + city + ".craigslist.org/search/" + "sss?s=" + str(page) + "&catAbb=sss&query=" + query.replace(' ', '+') + "&minAsk=" + pricemin + "&maxAsk=" + pricemax
cityurl = "http://" + city + ".craigslist.org"
errorcount = 0
#Get page
req = urllib2.Request(url, dat, head)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError:
if errorcount < 1:
errorcount = 1
print "Request failed, retrying in " + str(delay) + " seconds"
time.sleep(int(delay))
response = urllib2.urlopen(req)
msg = response.read()
errorcount = 0
res = results.findall(msg)
res = str(res)
res = res.replace('[', '')
res = res.replace(']', '')
res = res.replace('<a href="' , '<a href="' + cityurl )
#res = re.sub(prices,'',res)
res = "<BLOCKQUOTE>"*6 + res + "</BLOCKQUOTE>"*6
outp = open("craigresults.html", "a")
outp.write(city)
outp.write(str(res))
outp.close()
def calcDist(lat_A, long_A, lat_B, long_B):#This was found at zip code database project
distance = (sin(radians(lat_A)) *
sin(radians(lat_B)) +
cos(radians(lat_A)) *
cos(radians(lat_B)) *
cos(radians(long_A - long_B)))
distance = (degrees(acos(distance))) * 69.09
return distance
cities = """akroncanton:41.043955,-81.51919
ashtabula:41.871212,-80.79178
athensohio:39.322847,-82.09728
cincinnati:39.104410,-84.50774
cleveland:41.473451,-81.73580
columbus:39.990764,-83.00117
dayton:39.757758,-84.18848
limaohio:40.759451,-84.08458
mansfield:40.759156,-82.51118
sandusky:41.426460,-82.71083
toledo:41.646649,-83.54935
tuscarawas:40.397916,-81.40527
youngstown:41.086279,-80.64563
zanesville:39.9461,-82.0122
"""
if os.path.exists("craigresults.html")==True:
os.remove("craigresults.html")
homecity = "dayton"
radius = raw_input("Search Distance from Home in Miles: ")
query = raw_input("Search Term: ")
pricemin = raw_input("Min Price: ")
pricemax = raw_input("Max Price: ")
citylist = cities.split()
#create dictionary
citdict = {}
for city in citylist:
items=city.split(":")
citdict[items[0]] = items[1]
homecord = str(citdict.get(homecity)).split(",")
homelat = float(homecord[0])
homelong = float(homecord[1])
searchcities = []
for key,value in citdict.items():
distcity=key
distcord=str(value).split(",")
distlat = float(distcord[0])
distlong = float(distcord[1])
dist = calcDist(homelat,homelong,distlat,distlong)
if dist < int(radius):
searchcities.append(key)
print searchcities
search_all()
webbrowser.open_new('craigresults.html')

Categories

Resources