I am new to python and am currently working with Variables, APis and visualisation.
Currently I am pulling some sensor data from a API in the form of a temperature for a room.
I want to be able to visualise the data that is being pulled, as well as the live time that the samples have been collected at.
I have been able to get to the stage where I have pulled in the live data and the time that the data was collected. This information is then stored in an open variable - one for the time and one for the temperature readings.
I now want to be able to display the variables data in the form of a graph, that will update itself when a new reading has been collected.
I have been able to create and display a singular reading in the graph, but it is only a singular plot, not all the samples that have been collected.
Is there any way in which this could be done?
My code is below - But I have removed the information that is required to connect to the API. Everything works how it should, until the '#Plotting the live data' part is reached.
import matplotlib
import matplotlib.pyplot as plt
import requests
import json
import sched, time
from datetime import datetime
from datetime import date
import csv
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import random
from itertools import count
import pandas as pd
from matplotlib.animation import FuncAnimation
import requests
import json
import sched, time
from datetime import datetime
from datetime import date
from numpy import array
import os
import re
import string
import numpy
#-----------------------------------------------------------------------------------------------------
import urllib3
urllib3.disable_warnings()
import warnings
warnings.filterwarnings("ignore", message="Glyph 13 missing from current font.")
#-----------------------------------------------------------------------------------------------------
#open variables
data =[]
pulltime = []
print("Pulling live data...")
#-----------------------------------------------------------------------------------------------------
#Schedule repeat after 5 seconds
s = sched.scheduler(time.time, time.sleep)
#Pull data
def data_pull(sc):
print('Live reading:')
#Date and time pulled in
now = datetime.now()
today = date.today()
dt_string = now.strftime("%H:%M:%S")
#-----------------------------------------------------------------------------------------------------
#Data request
url = ""
payload={}
headers = {
"Authorization": ""
}
response = requests.request("GET", url, headers=headers, data=payload, verify=False)
#-----------------------------------------------------------------------------------------------------
#Variable appending
#Temperature
data.append(response.json())
#Time of sample
pulltime.append(dt_string)
#Updated Variable
print(pulltime + data)
#------------------------------------------------------------------------------------------------------
#Saving data to file
if not Path("x.csv").is_file():
with open("x.csv", "a", newline = '') as f:
field_names = ['Time', 'R1Temp']
the_writer = csv.DictWriter(f, fieldnames = field_names)
the_writer.writeheader()
with open("x.csv", "a", newline = '') as f:
field_names = ['Time', 'R1Temp']
the_writer = csv.DictWriter(f, fieldnames = field_names)
the_writer.writerow({'Time': dt_string, 'R1Temp': response.text})
print('')
print("Office A: " + dt_string + ' - ' + response.text)
#print("Office A: ", data , ' - ' , pulltime)
#-----------------------------------------------------------------------------------------------------
#plotting the live data
x = []
y = []
d2 = today.strftime("%d/%m/%Y")
# Appending of the axis's
x.append(pulltime)
y.append(data)
# Plotting the line points
plt.plot(x, y, color ="Blue", marker = "o", label = ("R1"))
# Naming x axis
plt.xlabel("Live Time")
plt.ylabel("Temperature °C")
# Title for the graph
plt.title("Live temperature of Rooms in MH")
# Show legend on the plot
plt.legend(loc="upper left")
# Function to show the plot
plt.tight_layout()
plt.show()
#-----------------------------------------------------------------------------------------------------
#repeat after 5 seconds
s.enter(5,1, data_pull, (sc,))
s.enter(5, 1, data_pull, (s,))
s.run()
When the '#Plotting live data' part is included and the code is run, this is the out come;
Pulling live data...
Live reading:
['13:56:35', '13:56:40', 21.0, 20.9]
Office A: 13:56:35 - 20.9
Traceback (most recent call last):
File "C:\Users\gp\Desktop\saving as a variable.py", line 134, in <module>
s.run()
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\sched.py", line 151, in run
action(*argument, **kwargs)
File "C:\Users\gp\Desktop\saving as a variable.py", line 109, in data_pull
plt.plot(x, y, color ="Blue", marker = "o", label = ("R1"))
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\pyplot.py", line 2840, in plot
return gca().plot(
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\axes\_axes.py", line 1743, in plot
lines = [*self._get_lines(*args, data=data, **kwargs)]
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\axes\_base.py", line 273, in __call__
yield from self._plot_args(this, kwargs)
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\axes\_base.py", line 394, in _plot_args
self.axes.xaxis.update_units(x)
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\axis.py", line 1466, in update_units
default = self.converter.default_units(data, self)
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\category.py", line 107, in default_units
axis.set_units(UnitData(data))
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\category.py", line 176, in __init__
self.update(data)
File "C:\Users\gp\AppData\Local\Programs\Python\Python39\lib\site-packages\matplotlib\category.py", line 209, in update
for val in OrderedDict.fromkeys(data):
TypeError: unhashable type: 'numpy.ndarray'
Related
I have imported the libraries, connected the multimeter, but I encounter an error related to a writer function
# =============================================================================
# VISA SETUP
# =============================================================================
import pyvisa as visa
rm = visa.ResourceManager()
rList = rm.list_resources()
my_instrument = rm.open_resource(rList[0])
idn_info = (my_instrument.query('*IDN?'))
print(idn_info)
# =============================================================================
# ============================================================================
import time
import matplotlib.pyplot as plt
import pandas as pd
DMM = my_instrument
#set parameters here:
xInc = 15 #number of seconds between samples
samples = 50 #number of samples you want overall
updateInc = 30 #how often the program reminds you it's running (in seconds)
xValues = []
yValues = []
for i in range(samples):
if (i*xInc) % updateInc == 0:
print("Acquiring data...")
time.sleep(xInc)
xValues.append(i * xInc)
yValues.append(float(DMM.query(':MEASure:VOLTage:DC?')))
#create simple reference plot
print("Plotting...")
xLabel = "Time (s)"
yLabel = "Voltage (V)"
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.title('DM3058E Monitoring')
plt.plot(xValues, yValues)
#export to CSV in current directory
print("Exporting to CSV...")
df = pd.DataFrame({xLabel:xValues, yLabel:yValues})
df.to_csv('DM3058_Monitoring.csv')
print("Done!")
This is the code I receive:
Traceback (most recent call last):
File "C:\Users\rrashid\Desktop\py\QR_code\multimeter.py", line 4, in
import pyvisa as visa
File "C:\Users\rrashid\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\pyvisa_init_.py", line 14, in
from importlib.metadata import version, PackageNotFoundError
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.1264.0_x64__qbz5n2kfra8p0\lib\importlib\metadata_init_.py", line 4, in
import csv
File "C:\Users\rrashid\Desktop\py\QR_code\csv.py", line 4, in
writer = csv.writer(file)
AttributeError: partially initialized module 'csv' has no attribute 'writer' (most likely due to a circular import)
PS C:\Users\rrashid\Desktop\py\QR_code>
I have been working with the alpha vantage python API for a while now, but I have only needed to pull daily and intraday timeseries data. I am trying to pull extended intraday data, but am not having any luck getting it to work. Trying to run the following code:
from alpha_vantage.timeseries import TimeSeries
apiKey = 'MY API KEY'
ts = TimeSeries(key = apiKey, output_format = 'pandas')
totalData, _ = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
print(totalData)
gives me the following error:
Traceback (most recent call last):
File "/home/pi/Desktop/test.py", line 9, in <module>
totalData, _ = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 219, in _format_wrapper
self, *args, **kwargs)
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 160, in _call_wrapper
return self._handle_api_call(url), data_key, meta_data_key
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 354, in _handle_api_call
json_response = response.json()
File "/usr/lib/python3/dist-packages/requests/models.py", line 889, in json
self.content.decode(encoding), **kwargs
File "/usr/lib/python3/dist-packages/simplejson/__init__.py", line 518, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3/dist-packages/simplejson/decoder.py", line 370, in decode
obj, end = self.raw_decode(s)
File "/usr/lib/python3/dist-packages/simplejson/decoder.py", line 400, in raw_decode
return self.scan_once(s, idx=_w(s, idx).end())
simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
What is interesting is that if you look at the TimeSeries class, it states that extended intraday is returned as a "time series in one csv_reader object" whereas everything else, which works for me, is returned as "two json objects". I am 99% sure this has something to do with the issue, but I'm not entirely sure because I would think that calling intraday extended function would at least return SOMETHING (despite it being in a different format), but instead just gives me an error.
Another interesting little note is that the function refuses to take "adjusted = True" (or False) as an input despite it being in the documentation... likely unrelated, but maybe it might help diagnose.
Seems like TIME_SERIES_INTRADAY_EXTENDED can return only CSV format, but the alpha_vantage wrapper applies JSON methods, which results in the error.
My workaround:
from alpha_vantage.timeseries import TimeSeries
import pandas as pd
apiKey = 'MY API KEY'
ts = TimeSeries(key = apiKey, output_format = 'csv')
#download the csv
totalData = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
#csv --> dataframe
df = pd.DataFrame(list(totalData[0]))
#setup of column and index
header_row=0
df.columns = df.iloc[header_row]
df = df.drop(header_row)
df.set_index('time', inplace=True)
#show output
print(df)
This is an easy way to do it.
ticker = 'IBM'
date= 'year1month2'
apiKey = 'MY API KEY'
df = pd.read_csv('https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol='+ticker+'&interval=15min&slice='+date+'&apikey='+apiKey+'&datatype=csv&outputsize=full')
#Show output
print(df)
import pandas as pd
symbol = 'AAPL'
interval = '15min'
slice = 'year1month1'
api_key = ''
adjusted = '&adjusted=true&'
csv_url = 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol='+symbol+'&interval='+interval+'&slice='+slice+adjusted+'&apikey='+api_key
data = pd.read_csv(csv_url)
print(data.head)
So I've got a variety of pandas dataframes and I'd like to reuse a bokeh app to view them, rather than hard coding the data into the app. I've adapted the crossfilter example to show the problem with my approach.
This should be called as
dframe = some Pandas dataframe
invokeh(bk_crossfilter, dframe)
and defined as in the code block below.
It seems that bokeh doesn't like the use of functools.partial in the definition of the app below and gives me a 500 server error in the browser. Earlier today the print statement in the bokeh app was showing the dataframe, but that now seems to get mangled into bokeh.Document during the function entry. The console output is now:
Preparing a bokeh application.
Opening Bokeh application on http://localhost:5006/
CROSSFILTER: <bokeh.document.document.Document object at 0x0000029BAB2783C8>
ERROR:tornado.application:Uncaught exception GET / (::1)
HTTPServerRequest(protocol='http', host='localhost:5006', method='GET', uri='/', version='HTTP/1.1', remote_ip='::1')
Traceback (most recent call last):
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\site-packages\tornado\web.py", line 1703, in _execute
result = await result
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\site-packages\bokeh\server\views\doc_handler.py", line 52, in get
session = await self.get_session()
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\site-packages\bokeh\server\views\session_handler.py", line 120, in get_session
session = await self.application_context.create_session_if_needed(session_id, self.request, token)
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\site-packages\bokeh\server\contexts.py", line 218, in create_session_if_needed
self._application.initialize_document(doc)
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\site-packages\bokeh\application\application.py", line 171, in initialize_document
h.modify_document(doc)
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\site-packages\bokeh\application\handlers\function.py", line 132, in modify_document
self._func(doc)
File "c:\users\jdorsey\code\utils\dorsey\visualisation.py", line 36, in bk_crossfilter
columns = sorted(df.columns)
AttributeError: 'Document' object has no attribute 'columns'
ERROR:tornado.access:500 GET / (::1) 7.98ms
WARNING:tornado.access:404 GET /favicon.ico (::1) 0.99ms
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\users\jdorsey\code\utils\dorsey\visualisation.py", line 27, in invokeh
io_loop.start()
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\site-packages\tornado\platform\asyncio.py", line 149, in start
self.asyncio_loop.run_forever()
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\asyncio\base_events.py", line 422, in run_forever
self._run_once()
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\asyncio\base_events.py", line 1396, in _run_once
event_list = self._selector.select(timeout)
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\selectors.py", line 323, in select
r, w, _ = self._select(self._readers, self._writers, [], timeout)
File "C:\Users\jdorsey\code\conda\environment\REDACTED\lib\selectors.py", line 314, in _select
r, w, x = select.select(r, w, w, timeout)
Adding a print statement to the invokeh function confirms dataset is a valid pandas frame there.
[1379 rows x 82 columns]
And the code:
from tornado.ioloop import IOLoop
from bokeh.application.handlers import FunctionHandler
from bokeh.application import Application
from bokeh.server.server import Server
from bokeh.layouts import column, row
from bokeh.models import Select
from bokeh.palettes import Spectral5
from bokeh.plotting import curdoc, figure
from functools import partial
def invokeh(visual,dataset):
"""
Launch a bokeh server and connect to it.
ARGUMENTS:
visual: A bokeh visualisation
dataset: A dataset to populate the visualisation
"""
print("Preparing a bokeh application.")
io_loop = IOLoop.current()
bokeh_app = Application(FunctionHandler(partial(visual,dataset)))
server = Server({"/": bokeh_app})#, io_loop=io_loop)
server.start()
print("Opening Bokeh application on http://localhost:5006/")
io_loop.add_callback(server.show, "/")
io_loop.start()
def bk_crossfilter(doc,df):
print('CROSSFILTER: ', df)
SIZES = list(range(6, 22, 3))
COLORS = Spectral5
N_SIZES = len(SIZES)
N_COLORS = len(COLORS)
columns = sorted(df.columns)
discrete = [x for x in columns if df[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
def create_figure():
xs = df[x.value].values
ys = df[y.value].values
x_title = x.value.title()
y_title = y.value.title()
kw = dict()
if x.value in discrete:
kw['x_range'] = sorted(set(xs))
if y.value in discrete:
kw['y_range'] = sorted(set(ys))
kw['title'] = "%s vs %s" % (x_title, y_title)
p = figure(plot_height=600, plot_width=800, tools='pan,box_zoom,hover,reset', **kw)
p.xaxis.axis_label = x_title
p.yaxis.axis_label = y_title
if x.value in discrete:
p.xaxis.major_label_orientation = pd.np.pi / 4
sz = 9
if size.value != 'None':
if len(set(df[size.value])) > N_SIZES:
groups = pd.qcut(df[size.value].values, N_SIZES, duplicates='drop')
else:
groups = pd.Categorical(df[size.value])
sz = [SIZES[xx] for xx in groups.codes]
c = "#31AADE"
if color.value != 'None':
if len(set(df[color.value])) > N_COLORS:
groups = pd.qcut(df[color.value].values, N_COLORS, duplicates='drop')
else:
groups = pd.Categorical(df[color.value])
c = [COLORS[xx] for xx in groups.codes]
p.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=0.6, hover_color='white', hover_alpha=0.5)
return p
def update(attr, old, new):
layout.children[1] = create_figure()
print("COLUMNS: ", columns[0], columns[1])
print("DISCRETE: ", discrete, " CONTINUOUS: ", continuous)
x = Select(title='X-Axis', value=columns[0], options=columns)
x.on_change('value', update)
y = Select(title='Y-Axis', value=columns[1], options=columns)
y.on_change('value', update)
size = Select(title='Size', value='None', options=['None'] + continuous)
size.on_change('value', update)
color = Select(title='Color', value='None', options=['None'] + continuous)
color.on_change('value', update)
controls = column(x, y, color, size, width=200)
layout = row(controls, create_figure())
curdoc().add_root(layout)
curdoc().title = "Crossfilter"
Thanks in advance for any suggestions.
By changing :
bokeh_app = Application(FunctionHandler(partial(visual,dataset)))
to:
bokeh_app = Application(FunctionHandler(lambda dataset: visual))
I now only get this in the console:
Opening Bokeh application on http://localhost:5006/
WARNING:tornado.access:404 GET /favicon.ico (::1) 1.00ms
And no error in the browser. But the browser renders a blank page containing the following source (there's more but it doesn't tell me much):
root.Bokeh.embed.embed_items(docs_json, render_items);
}
if (root.Bokeh !== undefined) {
embed_document(root);
} else {
var attempts = 0;
var timer = setInterval(function(root) {
if (root.Bokeh !== undefined) {
clearInterval(timer);
embed_document(root);
} else {
attempts++;
if (attempts > 100) {
clearInterval(timer);
console.log("Bokeh: ERROR: Unable to run BokehJS code because BokehJS library is missing");
}
}
}, 10, root)
}
The console log doesn't actually display the error message mentioned in the page source...
Pretty sure you need to be explicit with partial about which arguments you want bound to what:
partial(visual, df=dataset)
As it is I am nearly certain you are binding dataset to the doc argument, so when Bokeh calls your app function the Document gets passing in as df instead (the only free argument left).
I am trying to following the steps listed here to update a feature on AGOL from a local feature class. I keep getting a circular reference within the for loop and I'm not sure why it's happening.
Please see the code I'm using below.
import arcgis, arcpy, csv, os, time, copy, pandas as pd
from arcgis.gis import GIS
from pandas import DataFrame
from copy import deepcopy
gis = GIS("url", "username","pass")
fc = gis.content.get('ItemID')
flayer = fc.layers[0]
fset=flayer.query()
fields = ('GPS_Time','Visibility','EngineeringSection','Condition')
UpdateLayer = "C:\\Users\\USer\\Documents\\ArcGIS\\Default.gdb\\Data"
UpdateTable=DataFrame(arcpy.da.FeatureClassToNumPyArray(UpdateLayer , fields, skip_nulls=True))
overlap_rows = pd.merge(left=fset.sdf, right = UpdateTable, how='inner', on='EngineeringSection')
features_for_update = []
all_features = fset.features
for EngSec in overlap_rows['EngineeringSection']:
original_feature = [f for f in all_features if f.attributes['EngineeringSection'] == EngSec][0]
feature_to_be_updated = deepcopy(original_feature)
matching_row = UpdateTable.where(UpdateTable['EngineeringSection'] == EngSec).dropna()
original_feature.attributes['GPS_Time'] = (matching_row['GPS_Time'])
original_feature.attributes['Visibility'] = int(matching_row['Visibility'])
original_feature.attributes['Condition'] = str(matching_row['Condition'])
update_result = flayer.edit_features(updates=[original_feature])
flayer.edit_features(updates= features_for_update)
Here is the error I receive:
Traceback (most recent call last):
File "<stdin>", line 9, in <module>
File "C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\lib\site-packages\arcgis\features\layer.py", line 1249, in edit_features
default=_date_handler)
File "C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\lib\json\__init__.py", line 238, in dumps
**kw).encode(obj)
File "C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\lib\json\encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\lib\json\encoder.py", line 257, in iterencode
return _iterencode(o, 0)
ValueError: Circular reference detected
The line below assign a tuple as an attribute value. Is it what you wanted?
original_feature.attributes['GPS_Time'] = (matching_row['GPS_Time'])
If you want to assign the value just do:
original_feature.attributes['GPS_Time'] = matching_row['GPS_Time']
Also, I think this line:
flayer.edit_features(updates= features_for_update)
Should be:
flayer.edit_features(updates=[feature_to_be_updated])
Thanks for your help, I was able to get it all running with this script:
I also added in some timing to see how long it was taking
import arcpy, csv, os, time
import pandas as pd
from arcgis.gis import GIS
from pandas import DataFrame
from copy import deepcopy
start_time = time.time()
gis = GIS("url", "user","pass")
fc = gis.content.get('ContentID')
flayer = fc.layers[0]
fset=flayer.query()
fields = ('GPS_Time','Visibility','EngineeringSection','Condition')
UpdateLayer = "C:\\Users\\user\\Documents\\ArcGIS\\Default.gdb\\data"
UpdateTable=DataFrame(arcpy.da.FeatureClassToNumPyArray(UpdateLayer , fields, skip_nulls=True))
overlap_rows = pd.merge(left=fset.sdf, right = UpdateTable, how='inner', on='EngineeringSection')
features_for_update = []
all_features = fset.features
for EngSec in overlap_rows['EngineeringSection']:
original_feature = [f for f in all_features if f.attributes['EngineeringSection'] == EngSec][0]
feature_to_be_updated = deepcopy(original_feature)
matching_row = UpdateTable.where(UpdateTable['EngineeringSection'] == EngSec).dropna()
feature_to_be_updated.attributes['GPS_Time'] = matching_row['GPS_Time'].iloc[0]
feature_to_be_updated.attributes['Visibility'] = int(matching_row['Visibility'])
feature_to_be_updated.attributes['Condition'] = str(matching_row['Condition'].iloc[0])
update_result = flayer.edit_features(updates=[feature_to_be_updated])
update_result
elapsed_time = time.time() - start_time
totaltime = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
print("Total processing time: "+ totaltime)
Previously, I used a Morningstar API to get stock data; however, now that I am away from USA for a week, I am not being able to access the data.
This is the code snippet:
import datetime as dt
from dateutil.relativedelta
import relativedelta
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
import csv
from mpl_finance
import candlestick_ohlc
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter, MonthLocator, YearLocator, DayLocator, WeekdayLocator
style.use( 'ggplot' )
end = dt.date.today()
start_48 = end - relativedelta( years=4 )
start_120 = end - relativedelta( years=10 )
ticker = input( 'Ticker: ' ) #should be in Uppercase
ticker = ticker.upper()
df_w = web.DataReader( ticker, 'morningstar', start_48, end )
df_m = web.DataReader( ticker, 'morningstar', start_120, end )
print()
file_name_w = ticker + 'weekly.csv'
file_name_m = ticker + 'monthly.csv'
df_w.to_csv( file_name_w )
df_m.to_csv( file_name_m )
df_w = pd.read_csv( file_name_w, parse_dates=True, index_col=0 )
df_m = pd.read_csv( file_name_m, parse_dates=True, index_col=0 )
This is the error message:
Ticker: spy
Traceback (most recent call last):
File "/Users/zubairjohal/Documents/OHLC.py", line 24, in <module>
df_w = web.DataReader( ticker, 'morningstar', start_48, end )
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas_datareader/data.py", line 391, in DataReader
session=session, interval="d").read()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas_datareader/mstar/daily.py", line 219, in read
df = self._dl_mult_symbols(symbols=symbols)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas_datareader/mstar/daily.py", line 130, in _dl_mult_symbols
resp.status_code, resp.reason))
Exception: Request Error!: 404 : Not Found
Is it an IP issue, and is there a way to fix this? I know that this code is fine because it worked perfectly well two days ago.
I had the same problem too, here in the USA. The datareader service (morningstar) worked 3 days ago and it stopped working a day before yesterday. I believe that morningstar changed here REST interface, so there is nothing much we can do except waiting on for the developers to fix it.
404 means not found, assuming you didn't make any change and suddenly doesn't work I would say it is either that the API URL is not accessible in that country (or blocked in that specific network) or their API changed (or is under maintenance). If you know the API URL try it directly in a browser with different Internet connections.