Getting a Request Error 404 in Python while accessing an API - python

Previously, I used a Morningstar API to get stock data; however, now that I am away from USA for a week, I am not being able to access the data.
This is the code snippet:
import datetime as dt
from dateutil.relativedelta
import relativedelta
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
import csv
from mpl_finance
import candlestick_ohlc
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter, MonthLocator, YearLocator, DayLocator, WeekdayLocator
style.use( 'ggplot' )
end = dt.date.today()
start_48 = end - relativedelta( years=4 )
start_120 = end - relativedelta( years=10 )
ticker = input( 'Ticker: ' ) #should be in Uppercase
ticker = ticker.upper()
df_w = web.DataReader( ticker, 'morningstar', start_48, end )
df_m = web.DataReader( ticker, 'morningstar', start_120, end )
print()
file_name_w = ticker + 'weekly.csv'
file_name_m = ticker + 'monthly.csv'
df_w.to_csv( file_name_w )
df_m.to_csv( file_name_m )
df_w = pd.read_csv( file_name_w, parse_dates=True, index_col=0 )
df_m = pd.read_csv( file_name_m, parse_dates=True, index_col=0 )
This is the error message:
Ticker: spy
Traceback (most recent call last):
File "/Users/zubairjohal/Documents/OHLC.py", line 24, in <module>
df_w = web.DataReader( ticker, 'morningstar', start_48, end )
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas_datareader/data.py", line 391, in DataReader
session=session, interval="d").read()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas_datareader/mstar/daily.py", line 219, in read
df = self._dl_mult_symbols(symbols=symbols)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas_datareader/mstar/daily.py", line 130, in _dl_mult_symbols
resp.status_code, resp.reason))
Exception: Request Error!: 404 : Not Found
Is it an IP issue, and is there a way to fix this? I know that this code is fine because it worked perfectly well two days ago.

I had the same problem too, here in the USA. The datareader service (morningstar) worked 3 days ago and it stopped working a day before yesterday. I believe that morningstar changed here REST interface, so there is nothing much we can do except waiting on for the developers to fix it.

404 means not found, assuming you didn't make any change and suddenly doesn't work I would say it is either that the API URL is not accessible in that country (or blocked in that specific network) or their API changed (or is under maintenance). If you know the API URL try it directly in a browser with different Internet connections.

Related

How to fix ARM mach-o file compatibility error when importing prophet module on python 3.9?

When running my file, I get an error saying /Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust.abi3.so' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64'))
import streamlit as st
from datetime import date
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly import graph_objs as go
START = "2015-01-01"
TODAY = date.today().strftime("%Y-%m-%d")
st.title('Stock Forecast App')
stocks = ('GOOG', 'AAPL', 'MSFT', 'GME')
selected_stock = st.selectbox('Select dataset for prediction', stocks)
n_years = st.slider('Years of prediction:', 1, 4)
period = n_years * 365
#st.cache
def load_data(ticker):
data = yf.download(ticker, START, TODAY)
data.reset_index(inplace=True)
return data
data_load_state = st.text('Loading data...')
data = load_data(selected_stock)
data_load_state.text('Loading data... done!')
st.subheader('Raw data')
st.write(data.tail())
# Plot raw data
def plot_raw_data():
fig = go.Figure()
fig.add_trace(go.Scatter(x=data['Date'], y=data['Open'], name="stock_open"))
fig.add_trace(go.Scatter(x=data['Date'], y=data['Close'], name="stock_close"))
fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
plot_raw_data()
# Predict forecast with Prophet.
df_train = data[['Date','Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
# Show and plot forecast
st.subheader('Forecast data')
st.write(forecast.tail())
st.write(f'Forecast plot for {n_years} years')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write("Forecast components")
fig2 = m.plot_components(forecast)
st.write(fig2)
type hereTraceback (most recent call last):
File "/Users/srijan/Code/PP/stockpredictor3.py", line 4, in <module>
import yfinance as yf
File "/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/yfinance/__init__.py", line 23, in <module>
from .ticker import Ticker
File "/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/yfinance/ticker.py", line 29, in <module>
from .base import TickerBase
File "/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/yfinance/base.py", line 32, in <module>
from .data import TickerData
File "/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/yfinance/data.py", line 12, in <module>
from cryptography.hazmat.primitives import padding
File "/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/cryptography/hazmat/primitives/padding.py", line 11, in <module>
from cryptography.hazmat.bindings._rust import (
ImportError: dlopen(/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust.abi3.so, 0x0002): tried: '/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust.abi3.so' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust.abi3.so' (no such file), '/Users/srijan/miniforge3/envs/PersonalProject/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust.abi3.so' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64'))
I tried lots of things like downgrading python, reinstalling all my dependencies but nothing works, the error keeps popping up.
Run it on x86 😉.
Some of the dependencies clearly don't support ARM architecture.

I am getting a "'NoneType' object is not subscriptable" when trying to bring in data from a URL

Here is my code:
#Import libraries
import os
import pandas as pd
import requests
import matplotlib.pyplot as plt
import numpy as np
from datetime import date
import matplotlib.ticker as ticker
# API Key from EIA
api_key = 'blah blah'
# api_key = os.getenv("EIA_API_KEY")
# PADD Names to Label Columns
# Change to whatever column labels you want to use.
PADD_NAMES = ['PADD 1','PADD 2','PADD 3','PADD 4','PADD 5']
# Enter all your Series IDs here separated by commas
PADD_KEY = ['PET.MCRRIP12.M',
'PET.MCRRIP22.M',
'PET.MCRRIP32.M',
'PET.MCRRIP42.M',
'PET.MCRRIP52.M']
# Initialize list - this is the final list that you will store all the data from the json pull. Then you will use this list to concat into a pandas dataframe.
final_data = []
# Choose start and end dates
startDate = '2009-01-01'
endDate = '2021-01-01'
# Pull in data via EIA API
for i in range(len(PADD_KEY)):
url = 'http://api.eia.gov/series/?api_key=' + api_key + PADD_KEY[i]
r = requests.get(url)
json_data = r.json()
if r.status_code == 200:
print('Success!')
else:
print('Error')
df = pd.DataFrame(json_data.get('series')[0].get('data'),
columns = ['Date', PADD_NAMES[i]])
df.set_index('Date', drop=True, inplace=True)
final_data.append(df)
Here is my error:
TypeError Traceback (most recent call last)
<ipython-input-38-4de082165a0d> in <module>
10 print('Error')
11
---> 12 df = pd.DataFrame(json_data.get('series')[0].get('data'),
13 columns = ['Date', PADD_NAMES[i]])
14 df.set_index('Date', drop=True, inplace=True)
TypeError: 'NoneType' object is not subscriptable
'NoneType' object is not subscriptable comes when you try to find value in a none object like df["key"] where df is None.
Do you have PADD_NAMES defined somewhere in your code. For me the error looks like the issue of your json data. have you tried printing your json data?
The API you are calling requires HTTPS protocol to access, try to change "http" to "https"
https://api.eia.gov/series/?api_key=
Consider adding some debug output to check for other errors, by changing if...else block like this
if r.status_code == 200:
print('Success!')
else:
print('Error')
print(json_data)

Live variable to live animated graph - Python

I am new to python and am currently working with Variables, APis and visualisation.
Currently I am pulling some sensor data from a API in the form of a temperature for a room.
I want to be able to visualise the data that is being pulled, as well as the live time that the samples have been collected at.
I have been able to get to the stage where I have pulled in the live data and the time that the data was collected. This information is then stored in an open variable - one for the time and one for the temperature readings.
I now want to be able to display the variables data in the form of a graph, that will update itself when a new reading has been collected.
I have been able to create and display a singular reading in the graph, but it is only a singular plot, not all the samples that have been collected.
Is there any way in which this could be done?
My code is below - But I have removed the information that is required to connect to the API. Everything works how it should, until the '#Plotting the live data' part is reached.
import matplotlib
import matplotlib.pyplot as plt
import requests
import json
import sched, time
from datetime import datetime
from datetime import date
import csv
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import random
from itertools import count
import pandas as pd
from matplotlib.animation import FuncAnimation
import requests
import json
import sched, time
from datetime import datetime
from datetime import date
from numpy import array
import os
import re
import string
import numpy
#-----------------------------------------------------------------------------------------------------
import urllib3
urllib3.disable_warnings()
import warnings
warnings.filterwarnings("ignore", message="Glyph 13 missing from current font.")
#-----------------------------------------------------------------------------------------------------
#open variables
data =[]
pulltime = []
print("Pulling live data...")
#-----------------------------------------------------------------------------------------------------
#Schedule repeat after 5 seconds
s = sched.scheduler(time.time, time.sleep)
#Pull data
def data_pull(sc):
print('Live reading:')
#Date and time pulled in
now = datetime.now()
today = date.today()
dt_string = now.strftime("%H:%M:%S")
#-----------------------------------------------------------------------------------------------------
#Data request
url = ""
payload={}
headers = {
"Authorization": ""
}
response = requests.request("GET", url, headers=headers, data=payload, verify=False)
#-----------------------------------------------------------------------------------------------------
#Variable appending
#Temperature
data.append(response.json())
#Time of sample
pulltime.append(dt_string)
#Updated Variable
print(pulltime + data)
#------------------------------------------------------------------------------------------------------
#Saving data to file
if not Path("x.csv").is_file():
with open("x.csv", "a", newline = '') as f:
field_names = ['Time', 'R1Temp']
the_writer = csv.DictWriter(f, fieldnames = field_names)
the_writer.writeheader()
with open("x.csv", "a", newline = '') as f:
field_names = ['Time', 'R1Temp']
the_writer = csv.DictWriter(f, fieldnames = field_names)
the_writer.writerow({'Time': dt_string, 'R1Temp': response.text})
print('')
print("Office A: " + dt_string + ' - ' + response.text)
#print("Office A: ", data , ' - ' , pulltime)
#-----------------------------------------------------------------------------------------------------
#plotting the live data
x = []
y = []
d2 = today.strftime("%d/%m/%Y")
# Appending of the axis's
x.append(pulltime)
y.append(data)
# Plotting the line points
plt.plot(x, y, color ="Blue", marker = "o", label = ("R1"))
# Naming x axis
plt.xlabel("Live Time")
plt.ylabel("Temperature °C")
# Title for the graph
plt.title("Live temperature of Rooms in MH")
# Show legend on the plot
plt.legend(loc="upper left")
# Function to show the plot
plt.tight_layout()
plt.show()
#-----------------------------------------------------------------------------------------------------
#repeat after 5 seconds
s.enter(5,1, data_pull, (sc,))
s.enter(5, 1, data_pull, (s,))
s.run()
When the '#Plotting live data' part is included and the code is run, this is the out come;
Pulling live data...
Live reading:
['13:56:35', '13:56:40', 21.0, 20.9]
Office A: 13:56:35 - 20.9
Traceback (most recent call last):
File "C:\Users\gp\Desktop\saving as a variable.py", line 134, in <module>
s.run()
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\sched.py", line 151, in run
action(*argument, **kwargs)
File "C:\Users\gp\Desktop\saving as a variable.py", line 109, in data_pull
plt.plot(x, y, color ="Blue", marker = "o", label = ("R1"))
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\pyplot.py", line 2840, in plot
return gca().plot(
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\axes\_axes.py", line 1743, in plot
lines = [*self._get_lines(*args, data=data, **kwargs)]
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\axes\_base.py", line 273, in __call__
yield from self._plot_args(this, kwargs)
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\axes\_base.py", line 394, in _plot_args
self.axes.xaxis.update_units(x)
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\axis.py", line 1466, in update_units
default = self.converter.default_units(data, self)
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\category.py", line 107, in default_units
axis.set_units(UnitData(data))
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\category.py", line 176, in __init__
self.update(data)
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\category.py", line 209, in update
for val in OrderedDict.fromkeys(data):
TypeError: unhashable type: 'numpy.ndarray'

How do you use the python alpha_vantage API to return extended intraday data?

I have been working with the alpha vantage python API for a while now, but I have only needed to pull daily and intraday timeseries data. I am trying to pull extended intraday data, but am not having any luck getting it to work. Trying to run the following code:
from alpha_vantage.timeseries import TimeSeries
apiKey = 'MY API KEY'
ts = TimeSeries(key = apiKey, output_format = 'pandas')
totalData, _ = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
print(totalData)
gives me the following error:
Traceback (most recent call last):
File "/home/pi/Desktop/test.py", line 9, in <module>
totalData, _ = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 219, in _format_wrapper
self, *args, **kwargs)
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 160, in _call_wrapper
return self._handle_api_call(url), data_key, meta_data_key
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 354, in _handle_api_call
json_response = response.json()
File "/usr/lib/python3/dist-packages/requests/models.py", line 889, in json
self.content.decode(encoding), **kwargs
File "/usr/lib/python3/dist-packages/simplejson/__init__.py", line 518, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3/dist-packages/simplejson/decoder.py", line 370, in decode
obj, end = self.raw_decode(s)
File "/usr/lib/python3/dist-packages/simplejson/decoder.py", line 400, in raw_decode
return self.scan_once(s, idx=_w(s, idx).end())
simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
What is interesting is that if you look at the TimeSeries class, it states that extended intraday is returned as a "time series in one csv_reader object" whereas everything else, which works for me, is returned as "two json objects". I am 99% sure this has something to do with the issue, but I'm not entirely sure because I would think that calling intraday extended function would at least return SOMETHING (despite it being in a different format), but instead just gives me an error.
Another interesting little note is that the function refuses to take "adjusted = True" (or False) as an input despite it being in the documentation... likely unrelated, but maybe it might help diagnose.
Seems like TIME_SERIES_INTRADAY_EXTENDED can return only CSV format, but the alpha_vantage wrapper applies JSON methods, which results in the error.
My workaround:
from alpha_vantage.timeseries import TimeSeries
import pandas as pd
apiKey = 'MY API KEY'
ts = TimeSeries(key = apiKey, output_format = 'csv')
#download the csv
totalData = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
#csv --> dataframe
df = pd.DataFrame(list(totalData[0]))
#setup of column and index
header_row=0
df.columns = df.iloc[header_row]
df = df.drop(header_row)
df.set_index('time', inplace=True)
#show output
print(df)
This is an easy way to do it.
ticker = 'IBM'
date= 'year1month2'
apiKey = 'MY API KEY'
df = pd.read_csv('https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol='+ticker+'&interval=15min&slice='+date+'&apikey='+apiKey+'&datatype=csv&outputsize=full')
#Show output
print(df)
import pandas as pd
symbol = 'AAPL'
interval = '15min'
slice = 'year1month1'
api_key = ''
adjusted = '&adjusted=true&'
csv_url = 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol='+symbol+'&interval='+interval+'&slice='+slice+adjusted+'&apikey='+api_key
data = pd.read_csv(csv_url)
print(data.head)

Loading data from Yahoo! Finance with pandas

I am working my way through Wes McKinney's book Python For Data Analysis and on page 139 under Correlation and Covariance, I am getting an error when I try to run his code to obtain data from Yahoo! Finance.
Here is what I am running:
#CORRELATION AND COVARIANCE
import pandas.io.data as web
all_data = {}
for ticker in ['AAPL', 'IBM', 'MSFT', 'GOOG']:
all_data[ticker] = web.get_data_yahoo(ticker, '1/1/2003', '1/1/2013')
price = DataFrame({tic: data['Adj Close']
for tic, data in all_data.iteritems()})
volume = DataFrame({tic: data['Volume']
for tic, data in all_data.iteritems()})
Here is the error I am getting:
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "C:\Users\eMachine\WinPython-64bit-2.7.5.3\python-2.7.5.amd64\lib\site-packages\pandas\io\data.py", line 390, in get_data_yahoo
adjust_price, ret_index, chunksize, 'yahoo', name)
File "C:\Users\eMachine\WinPython-64bit-2.7.5.3\python-2.7.5.amd64\lib\site-packages\pandas\io\data.py", line 336, in _get_data_from
hist_data = src_fn(symbols, start, end, retry_count, pause)
File "C:\Users\eMachine\WinPython-64bit-2.7.5.3\python-2.7.5.amd64\lib\site-packages\pandas\io\data.py", line 190, in _get_hist_yahoo
return _retry_read_url(url, retry_count, pause, 'Yahoo!')
File "C:\Users\eMachine\WinPython-64bit-2.7.5.3\python-2.7.5.amd64\lib\site-packages\pandas\io\data.py", line 169, in _retry_read_url
"return a 200 for url %r" % (retry_count, name, url))
IOError: after 3 tries, Yahoo! did not return a 200 for url 'http://ichart.yahoo.com/table.csv?s=GOOG&a=0&b=1&c=2000&d=0&e=1&f=2010&g=d&ignore=.csv'
>>> ... >>> >>> ... >>>
Any idea on what the problem is?
As Karl pointed out, the ticker had changed meaning Yahoo returns a 'page not found'.
When polling data from the web, it is a good idea to wrap the call in a try except
all_data = {}
for ticker in ['AAPL', 'IBM', 'MSFT', 'GOOG']:
try:
all_data[ticker] = web.get_data_yahoo(ticker, '1/1/2003', '1/1/2013')
price = DataFrame({tic: data['Adj Close']
for tic, data in all_data.iteritems()})
volume = DataFrame({tic: data['Volume']
for tic, data in all_data.iteritems()})
except:
print "Cant find ", ticker
Had the same problem and changing 'GOOG' to 'GOOGL' seems to work, once you've followed these instructions to switch from pandas.io.data to pandas_datareader.data.
http://pandas-datareader.readthedocs.org/en/latest/remote_data.html#yahoo-finance
As of 6/1/17, I pieced the following together from this page and a couple of others:
from pandas_datareader import data as web
# import pandas.io.data as web
import fix_yahoo_finance
import datetime
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime(2017, 6, 1)
all_data={}
for ticker in ['AAPL', 'IBM', 'MSFT', 'GOOGL']:
all_data[ticker] = web.get_data_yahoo(ticker, start, end)
price = DataFrame({tic: data['Adj Close']
for tic, data in all_data.iteritems()})
volume = DataFrame({tic: data['Volume']
for tic, data in all_data.iteritems()})
Im using the code snippet below to load yahoo finance data.
import pandas_datareader as pdr
from datetime import datetime
from pandas import DataFrame as df
def get_data(selection, sdate, edate):
data = pdr.get_data_yahoo(symbols=selection, start=sdate, end=edate)
data = df(data['Adj Close'])
return data
start_date = datetime(2017, 1, 1)
end_date = datetime(2019,4,28)
selected = [ 'TD.TO', 'AC.TO', 'BNS.TO', 'ENB.TO', 'MFC.TO','RY.TO','BCE.TO']
print(get_data(selected, start_date, end_date).head(1))
https://repl.it/repls/DevotedBetterAlgorithms

Categories

Resources