quandl FXH1 Download Data - python

I have a subscription to FXCM 1 hour data. I can see the data when I go to https://www.quandl.com/tables/FXH1/FXCM-H1. The below code keeps giving me
File "quandl1.py", line 17, in <module>
data = quandl.get('FXH1/FXCM-H1')
File "/usr/lib64/python3.5/site-packages/quandl/get.py", line 48, in get
data = Dataset(dataset_args['code']).data(params=kwargs, handle_column_not_found=True)
If I go to the site https://www.quandl.com/tables/FXH1/FXCM-H1 I can see limited data. Wouldn't the library be FXH1/FXCM-H1?
import quandl
quandl.ApiConfig.api_key = "My Key"
data = quandl.get('FXH1/FXCM-H1')
data.tail()
[me#localhost fxcm]$ python3 quandl1.py
Traceback (most recent call last):
File "quandl1.py", line 17, in <module>
data = quandl.get('FXH1/FXCM-H1')
File "/usr/lib64/python3.5/site-packages/quandl/get.py", line 48, in get
data = Dataset(dataset_args['code']).data(params=kwargs, handle_column_not_found=True)
File "/usr/lib64/python3.5/site-packages/quandl/model/dataset.py", line 47, in data
return Data.all(**updated_options)
File "/usr/lib64/python3.5/site-packages/quandl/operations/list.py", line 14, in all
r = Connection.request('get', path, **options)
File "/usr/lib64/python3.5/site-packages/quandl/connection.py", line 36, in request
return cls.execute_request(http_verb, abs_url, **options)
File "/usr/lib64/python3.5/site-packages/quandl/connection.py", line 44, in execute_request
cls.handle_api_error(response)
File "/usr/lib64/python3.5/site-packages/quandl/connection.py", line 85, in handle_api_error
raise klass(message, resp.status_code, resp.text, resp.headers, code)
quandl.errors.quandl_error.NotFoundError: (Status 404) (Quandl Error QECx02) You have submitted an incorrect Quandl code. Please check your Quandl codes and try again.

import quandl
import pandas as pd
quandl.ApiConfig.api_key = "My Key"
pd.data = ({"None", "symbol", "date", "hour", "openbid", "highbid", "lowbid", "closebid", "openask", "highask", "lowask", "closeask", "totalticks"})
pd.data = quandl.get_table('FXCM/H1', date = "2002-02-01,2002-02-02,2002-02-03,2002-02-04,2002-02-05,2002-02-06,2002-02-07,2002-02-08,2002-02-09", symbol='EUR/USD')
for index, row in pd.data.iterrows() :
print(row.to_frame().T)

Related

dom.getElementsByTagNameNS("urn:oasis:names:tc:SAML:1.0:assertion", 'Assertion')[0].toxml() IndexError: list index out of range

We have used "Office365-REST-Python-Client 2.3.11" library to upload file on sharepoint.
Suddenly code stopped working and started giving below issue.
code:
import json
from office365.runtime.auth.user_credential import UserCredential
from office365.runtime.http.request_options import RequestOptions
from office365.sharepoint.client_context import ClientContext
site_url = "https://{your-tenant-prefix}.sharepoint.com"
ctx = ClientContext(site_url).with_credentials(UserCredential("{username}", "{password}"))
request = RequestOptions("{0}/_api/web/".format(site_url))
response = ctx.execute_request_direct(request)
json = json.loads(response.content)
web_title = json['d']['Title']
print("Web title: {0}".format(web_title))
Error:
Traceback (most recent call last):
File "test_upload.py", line 81, in <module>
response = ctx.execute_request_direct(request)
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/runtime/client_runtime_context.py", line 131, in execute_request_direct
return self.pending_request().execute_request_direct(self._normalize_request(request))
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/runtime/odata/odata_request.py", line 36, in execute_request_direct
return super(ODataRequest, self).execute_request_direct(request)
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/runtime/client_request.py", line 91, in execute_request_direct
self.context.authenticate_request(request)
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/sharepoint/client_context.py", line 230, in authenticate_request
self._auth_context.authenticate_request(request)
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/runtime/auth/authentication_context.py", line 89, in authenticate_request
self._provider.authenticate_request(request)
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/runtime/auth/providers/saml_token_provider.py", line 77, in authenticate_request
self.ensure_authentication_cookie()
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/runtime/auth/providers/saml_token_provider.py", line 84, in ensure_authentication_cookie
self._cached_auth_cookies = self.get_authentication_cookie()
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/runtime/auth/providers/saml_token_provider.py", line 97, in get_authentication_cookie
token = self._acquire_service_token_from_adfs(user_realm.STSAuthUrl)
File "/home/ubuntu/atlas/national_kpi_table/venv/lib/python3.8/site-packages/office365/runtime/auth/providers/saml_token_provider.py", line 140, in _acquire_service_token_from_adfs
assertion_node = dom.getElementsByTagNameNS("urn:oasis:names:tc:SAML:1.0:assertion", 'Assertion')[0].toxml()
IndexError: list index out of range
Any help appreciated.

Scraping list from webpage

I am trying to extract items in 'Symbol' column for this webpage:
https://chartink.com/screener/2-short-trend
My code is like this:
from requests_html import HTMLSession
def stockList(url):
session = HTMLSession()
r = session.get(url)
r.html.render(sleep=1)
stock = [{item.text} for item in r.html.xpath('//*[#class="//*[#id="DataTables_Table_0"]/tbody/tr[1]/td[3]/a"]')]
return stock
listStock = stockList('https://chartink.com/screener/2-short-trend')
print(listStock)
Error:
Traceback (most recent call last):
File "C:\Users\kashk\PycharmProjects\test\venv\Scripts\CIscreen2pcShort.py", line 12, in <module>
listStock = stockList('https://chartink.com/screener/2-short-trend')
File "C:\Users\kashk\PycharmProjects\test\venv\Scripts\CIscreen2pcShort.py", line 10, in stockList
stock = [{item.text} for item in r.html.xpath('//*[#class="//*[#id="DataTables_Table_0"]/tbody/tr[1]/td[3]/a"]')]
File "C:\Users\kashk\PycharmProjects\test\venv\lib\site-packages\requests_html.py", line 255, in xpath
selected = self.lxml.xpath(selector)
File "src\lxml\etree.pyx", line 1597, in lxml.etree._Element.xpath
File "src\lxml\xpath.pxi", line 305, in lxml.etree.XPathElementEvaluator.__call__
File "src\lxml\xpath.pxi", line 225, in lxml.etree._XPathEvaluatorBase._handle_result
lxml.etree.XPathEvalError: Invalid predicate
Can you pls suggest where am I going wrong.
Also, is there a way to send this output to a dataframe.
File "src\lxml\etree.pyx", line 1597, in lxml.etree._Element.xpath
File "src\lxml\xpath.pxi", line 305, in lxml.etree.XPathElementEvaluator.call
File "src\lxml\xpath.pxi", line 225, in lxml.etree._XPathEvaluatorBase._handle_result
lxml.etree.XPathEvalError: Invalid expression

webuntis.errors.RemoteError: Request ID was not the same one as returned

can anyone help we with the webuntis api?
I used a standart code:
import webuntis
s = webuntis.Session(
username='Name',
password='Password',
server='klio.webuntis.com',
school='Shool',
useragent='WebUntis Test'
).login()
for klasse in s.klassen():
print(klasse.name)
But its gives me following error:
File "main.py", line 18, in <module>
s = webuntis.Session(
File "/home/bwlok/.local/lib/python3.8/site-packages/webuntis/session.py", line 91, in login
res = self._request('authenticate', {
File "/home/bwlok/.local/lib/python3.8/site-packages/webuntis/session.py", line 118, in _request
data = rpc_request(self.config, method, params or {})
File "/home/bwlok/.local/lib/python3.8/site-packages/webuntis/utils/remote.py", line 90, in rpc_request
return _parse_result(request_body, result_body)
File "/home/bwlok/.local/lib/python3.8/site-packages/webuntis/utils/remote.py", line 118, in _parse_result
raise errors.RemoteError(
webuntis.errors.RemoteError: Request ID was not the same one as returned. 2021-09-22 18:43:23.684083 -- error
Does anyone know what I need to change?
You need to add your Untis username and password. Also you need the correct School name.

Timeout error while batch geocoding with google maps API in python

I'm new to the Google Maps API and I'm not sure why this code isn't working. I have a list of 80 landmarks in a csv file that im trying to retrieve the lon and lat coordinates to.
I believe something may be wrong with how I'm connecting to the API. From my understanding, I should have 2,500 free requests per day but I'm receiving a timeout error that makes me think I've already reached my limit.
Here is a snapshot of my dashboard
Code:
import pandas as pd
import googlemaps
# IMPORT DATASET
df = pd.read_csv('landmarks.csv')
# GOOGLE MAPS API KEY
gmaps_key = googlemaps.Client(key = 'MY KEY')
df['LAT'] = None
df['LON'] = None
for i in range (0, len(df), 1):
geocode_result = gmaps_key.geocode(df.iat[i,0])
try:
lat = geocode_result[0]['geometry']['location']['lat']
lon = geocode_result[0]['geometry']['location']['lon']
df.iat[i, df.comlumns.get_loc('LAT')] = lat
df.iat[i, df.comlumns.get_loc('LON')] = lon
except:
lat = None
lon = None
print(df)
Error Message:
Traceback (most recent call last): File
"C:\Users\JGrov\Anaconda3\lib\site-packages\googlemaps\client.py",
line 253, in _request
result = self._get_body(response) File "C:\Users\JGrov\Anaconda3\lib\site-packages\googlemaps\client.py",
line 276, in _get_body
raise googlemaps.exceptions._RetriableRequest() googlemaps.exceptions._RetriableRequest
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "c:/Users/JGrov/Google
Drive/pythonProjects/Megalith Map/googleMapsAPI_Batch_Megaliths.py",
line 16, in
geocode_result = gmaps_key.geocode(df.iat[i,0]) File "C:\Users\JGrov\Anaconda3\lib\site-packages\googlemaps\client.py",
line 356, in wrapper
result = func(*args, **kwargs) File "C:\Users\JGrov\Anaconda3\lib\site-packages\googlemaps\geocoding.py",
line 68, in geocode
return client._request("/maps/api/geocode/json", params)["results"] File
"C:\Users\JGrov\Anaconda3\lib\site-packages\googlemaps\client.py",
line 260, in _request
extract_body, requests_kwargs, post_json) File "C:\Users\JGrov\Anaconda3\lib\site-packages\googlemaps\client.py",
line 260, in _request
extract_body, requests_kwargs, post_json) File "C:\Users\JGrov\Anaconda3\lib\site-packages\googlemaps\client.py",
line 260, in _request
extract_body, requests_kwargs, post_json) [Previous line repeated 9 more times] File
"C:\Users\JGrov\Anaconda3\lib\site-packages\googlemaps\client.py",
line 203, in _request
raise googlemaps.exceptions.Timeout() googlemaps.exceptions.Timeout
Any help on this matter would be appreciated. Thank you.

How to get the rows of specific worksheet on public spreadsheet using gdata in python

I am using gdata on python to read the rows of specific worksheet from public spreadsheet when i tried the following code
client = gdata.spreadsheet.service.SpreadsheetsService()
key = 'xxxxxxxxxxxxxxxxxxxxxxxxxx'
worksheets_feed = client.GetWorksheetsFeed(key, visibility='public', projection='values')
# print worksheets_feed
for entry in worksheets_feed.entry:
print entry.title.text
worksheet_id = entry.id.text.rsplit('/',1)[1]
rows = client.GetListFeed(key, worksheet_id).entry
getting the error as
Traceback (most recent call last):
File "lib/scrapper.py", line 89, in <module>
start_it()
File "lib/scrapper.py", line 56, in start_it
rows = client.GetListFeed(key, worksheet_id).entry
File "/Library/Python/2.7/site-packages/gdata/spreadsheet/service.py", line 252, in GetListFeed
converter=gdata.spreadsheet.SpreadsheetsListFeedFromString)
File "/Library/Python/2.7/site-packages/gdata/service.py", line 1074, in Get
return converter(result_body)
File "/Library/Python/2.7/site-packages/gdata/spreadsheet/__init__.py", line 474, in SpreadsheetsListFeedFromString
xml_string)
File "/Library/Python/2.7/site-packages/atom/__init__.py", line 93, in optional_warn_function
return f(*args, **kwargs)
File "/Library/Python/2.7/site-packages/atom/__init__.py", line 127, in CreateClassFromXMLString
tree = ElementTree.fromstring(xml_string)
File "<string>", line 125, in XML
cElementTree.ParseError: no element found: line 1, column 0
can somebody correct me where i am wrong
Try:
worksheet_feed = spreadsheet.GetWorksheetsFeed(spreadsheetId)
worksheetfeed = []
for worksheet in worksheet_feed.entry:
worksheetfeed.append(worksheet.id.text.rsplit('/', 1)[0])
list_feed = spreadsheet.GetListFeed(spreadsheetId, worksheetfeed[0])#get first worksheet
entryList = []
for entry in list_feed.entry:
tempDict = {}
for key in entry.custom:
tempDict[str(key)] = str(entry.custom[key].text)
where spreadsheetId has been defined and you have been previously authenticated.

Categories

Resources