This is my first time using the robinhood api and I am trying out their documentation: https://readthedocs.org/projects/robin-stocks/downloads/pdf/latest/,
but one of their key functions is not working. When I call robin_stocks.get_current_positions() I get the error
AttributeError: module 'robin_stocks' has no attribute 'get_current_positions'
Here is my code:
import robin_stocks, json
from robin_stocks import *
import robin_stocks as r
import sys
import time
import requests
content = open('config.json').read()
config = json.loads(content)
login = r.login(config['username'],config['password'], store_session=True)
my_stocks = robin_stocks.build_holdings()
for key,value in my_stocks.items():
mystocks = key,value
print(mystocks)
WEIbalance = mystocks[1]['equity']
WEI = mystocks[0]
print('YY', WEI)
positions_data = robin_stocks.get_current_positions()
print('my equity', WEIbalance)
print(positions_data)
Is this an error on my part?
As of this commit get_current_positions was renamed to get_open_stock_positions() the corresponding issue can be found here
Related
I have been trying to follow an easy tutorial on how to get sentinel 2 images for a series of polygons I have. For some reason, no matter what I do I keep running into the same error (detailed above).
from sentinelsat import SentinelAPI, read_geojson, geojson_to_wkt
import geopandas as gpd
import folium
import rasterio as rio
from rasterio.plot import show
from rasterio.mask import mask
import matplotlib.pyplot as plt
from pyproj import Proj, transform
import pandas as pd
import os
from datetime import date
import sentinelhub
user = 'xxxxx'
password = 'xxxxx'
url = 'https://scihub.copernicus.eu/dhus'
api = SentinelAPI(user, password, url)
validation = gpd.read_file('EarthData/tutakoke_permafrost_validation/Tutakoke_permafrost_validation.shp')
plateau_transects = gpd.read_file('EarthData/tutakoke_permafrost_plateau_transects/Tutakoke_Permafrost_Plateau_Transects.shp')
validation = validation.set_crs(epsg=32604, inplace=True, allow_override=True)
validation['imdate']='01-01-2019'
validation['imdate'] = pd.to_datetime(validation2['imdate'])
validation['geometry2'] = validation.geometry.buffer(2, cap_style=3)
footprint=validation['geometry2'][1]
products = api.query(footprint,
date = ('20200109', '20200510'),
platformname = 'Sentinel-2',
processinglevel = 'Level-2A',
cloudcoverpercentage = (0, 20))
The error I keep getting is:
SentinelAPIError: HTTP status 200 OK: API response not valid. JSON decoding failed.
Ah - it was that my footprint was not in the correct lat lon format!
Updated question:
I need to get cloudformation stacks according to the environment. Below is the code I am using for this:
#!/usr/bin/env python
import boto3
import datetime
from datetime import date
import subprocess
import re, itertools
from collections import defaultdict
regions = ['us-west-2']
env_names = ["dev", "test", "stage"]
stack_names_found = defaultdict(list)
for region in regions:
session = boto3.session.Session(region_name=region)
cf_client = session.resource('cloudformation')
for i in cf_client.stacks.all():
StackStatus = i.stack_status
Createdtime = i.creation_time
StackName1 = i.stack_name
for env_name in env_names:
if ('-' + env_name + '-') in StackName1:
stack_names_found[env_name].append(StackName1)
output = {'StackName': stack_names_found,
'Createdtime': Createdtime,
'Status': StackStatus
}
print(output)
StackName in output looks like this:
{'StackName':defaultdict(<class 'list'>, {'test': ['customer1-test-server1', 'customer2-test-server1', 'customer3-test-server1','customer3-test-server1', 'customer1-test-server2]})
Instead of:
['customer1-test-server1']
['customer2-test-server1']
['customer3-test-server1']
['customer3-test-server1']
You can have a look at the following version, which uses defaultdict and creates dictionary of stack names for each env_name:
#!/usr/bin/env python
import boto3
import csv
import datetime
from datetime import date
import subprocess
import re, itertools
from collections import defaultdict
regions = ['us-west-2']
env_names = ["dev", "test", "stage"]
stack_names_found = defaultdict(list)
for region in regions:
session = boto3.session.Session(region_name=region)
cf_client = session.resource('cloudformation')
for i in cf_client.stacks.all():
StackStatus = i.stack_status
Createdtime = i.creation_time
StackName1 = i.stack_name
for env_name in env_names:
if ('-' + env_name + '-') in StackName1:
stack_names_found[env_name].append(StackName1)
print(stack_names_found)
Please note that I haven't run the code, thus some adjustment may need to be needed to make it fully work.
I am trying to extract Neptune database vertices into CSV file which is failing extracting at id column. Below is the script i am trying to run in AWS GLUE Console.
import boto3
import os
import sys
import site
import json
import pandas as pd
from setuptools.command import easy_install
from importlib import reload
s3 = boto3.client('s3')
dir_path = os.path.dirname(os.path.realpath(__file__))
#os.path.dirname(sys.modules['__main__'].__file__)
install_path = os.environ['GLUE_INSTALLATION']
easy_install.main( ["--install-dir", install_path, "gremlinpython"] )
reload(site)
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.process.traversal import T
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
graph = Graph()
remoteConn = DriverRemoteConnection('wss://neptune-test-new-reader-1.c3nqs7vjaggx.eu-west-1.neptune.amazonaws.com:8182/gremlin','g')
g = graph.traversal().withRemote(remoteConn)
vertices_columns = ['id', 'label', 'region','country']
vertices = g.V().hasLabel('airport').limit(2).project('id','label','region','country').by('T.id').by('T.label').by('region').by('country').select(values).fold()
for v in vertices:
print(v)
Error:
Name 'Values' is not defined
Tried below script with for loop
import boto3
import os
import sys
import site
import json
import pandas as pd
from setuptools.command import easy_install
from importlib import reload
s3 = boto3.client('s3')
dir_path = os.path.dirname(os.path.realpath(__file__))
#os.path.dirname(sys.modules['__main__'].__file__)
install_path = os.environ['GLUE_INSTALLATION']
easy_install.main( ["--install-dir", install_path, "gremlinpython"] )
reload(site)
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.process.traversal import T
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
graph = Graph()
remoteConn = DriverRemoteConnection('wss://neptune-test-new-reader-1.c3nqs7vjaggx.eu-west-1.neptune.amazonaws.com:8182/gremlin','g')
g = graph.traversal().withRemote(remoteConn)
vertices_columns = ['id', 'label', 'region','country']
"""
vertices = g.V().hasLabel('airport').limit(2).project('id','label','region','country').by('T.id').by('T.label').by('region').by('country').select(values).fold()
for v in vertices:
print(v)
"""
#vertices = []
vertices = g.V().limit(1).valueMap(True).toList()
for v in vertices:
print(v)
for col in vertices_columns:
print(v[col])
#print(vertices)
Error:
Output of the print(v)
{T.id:1:'1',T.label:1:'airport','country':'US','region':'US-AL'}
Key Error: id
The values keyword used in select(values) is a reference to an enum defined as part of Column. In your code you can use select(Column.values) and you can include the definition using :
from gremlin_python.process.traversal import Column
Here is a Python example:
>>> g.V('3').project('id','label','code').by(T.id).by(T.label).by('code').toList()
[{'id': '3', 'label': 'airport', 'code': 'AUS'}]
>>> g.V('3').project('id','label','code').by(T.id).by(T.label).by('code').select(Column.values).toList()
[['3', 'airport', 'AUS']]
Note that T.id and T.label are not inside quotes.
I am trying to, for the first time, deploy an NLP ML model. To do this it was suggested that I use FastAPI and uvicorn. I have had some success in getting FastAPI to respond; however, I have not been able to successfully pass the dataframe and have it process it. I've tried using dictionaries and even attempted to convert the passed json to a dataframe.
With data_dict = data.dict() I get:
ValueError: Iterable over raw text documents expected, string object received.
With data_dict = pd.DataFrame(data.dict()) I get:
ValueError: If using all scalar values, you must pass an index
I believe I understand the problem, my Data class is expecting a string which this is not; however, I have not been able to determine how to set and / or pass the expected data so that fit_transform() will work. Ultimately I will have a prediction returned based on the submitted messages value. Bonus if I can pass a dataframe of 1 or more rows and have predictions made and returned for each of the rows. The response will include the id, project, and the prediction so that we are in future able to leverage this response to post the prediction back to the original (requesting) system.
test_connection.py
#%%
import requests
import pandas as pd
import json
import os
from pprint import pprint
url = 'http://127.0.0.1:8000/predict'
print(os.getcwd())
#%%
df = pd.DataFrame(
{
'id': ['ab410483801c38', 'cd34148639180'],
'project': ['project1', 'project2'],
'messages': ['This is message 1', 'This is message 2']
}
)
to_predict_dict = df.iloc[0].to_dict()
#%%
r = requests.post(url, json=to_predict_dict)
main.py
#!/usr/bin/env python
# coding: utf-8
import pickle
import pandas as pd
import numpy as np
from pydantic import BaseModel
from sklearn.feature_extraction.text import TfidfVectorizer
# Server
import uvicorn
from fastapi import FastAPI
# Model
import xgboost as xgb
app = FastAPI()
clf = pickle.load(open('data/xgbmodel.pickle', 'rb'))
class Data(BaseModel):
# id: str
project: str
messages: str
#app.get("/ping")
async def test():
return {"ping": "pong"}
#app.post("/predict")
async def predict(data: Data):
# data_dict = data.dict()
data_dict = pd.DataFrame(data.dict())
tfidf_vect = TfidfVectorizer(stop_words="english", analyzer='word', token_pattern=r'\w{1,}')
tfidf_vect.fit_transform(data_dict['messages'])
# to_predict = tfidf_vect.transform(data_dict['messages'])
# prediction = clf.predict(to_predict)
return {"response": "Success"}
Probably not the most elegant solution but I've made progress using the following:
def predict(data: Data):
data_dict = pd.DataFrame(
{
'id': [data.id],
'project': [data.project],
'messages': [data.messages]
}
)
Frist, encode your dataFrame df to JSON record-oriented:
r = requests.post(url, json=df.to_json(orient='records')).
Then, decode your data inside the /predict/ endpoint with:
df = pd.DataFrame(jsonable_encoder(data))
Remember to import the module from fastapi.encoders import jsonable_encoder.
A new library called pandera now supports direct passage of DataFrames without conversion via FastAPI. The docs are bit basic as of posting this, but may be worth reading: https://pandera.readthedocs.io/en/latest/fastapi.html#fastapi-integration.
I was able to address the issue by simply converting data.messages into a list. I also had to make some unrelated changes, I had failed to pickle my vectorizer (string tokenizer).
import pickle
import pandas as pd
import numpy as np
import json
import time
from pydantic import BaseModel
from sklearn.feature_extraction.text import TfidfVectorizer
# Server / endpoint
import uvicorn
from fastapi import FastAPI
# Model
import xgboost as xgb
app = FastAPI(debug=True)
clf = pickle.load(open('data/xgbmodel.pickle', 'rb'))
vect = pickle.load(open('data/tfidfvect.pickle', 'rb'))
class Data(BaseModel):
id: str = None
project: str
messages: str
#app.get("/ping")
async def ping():
return {"ping": "pong"}
#app.post("/predict/")
def predict(data: Data):
start = time.time()
data_l = [data.messages] # make messages iterable.
to_predict = vect.transform(data_l)
prediction = clf.predict(to_predict)
exec_time = round((time.time() - start), 3)
return {
"id": data.id,
"project": data.project,
"prediction": prediction[0],
"execution_time": exec_time
}
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8000)
I have three modules: GetInput, Main and Converter. In the GetInput file there are all the inputs values and excel data in the form of list. In the Converter file I am using those input values from Getinput file and in the main file I am connecting both these files here. I am doing this so that my code can look more organized.
GetInput.py:
import pandas as pd
import numpy as np
import time
def getInputs():
df = pd.read_excel('input.xlsx')
actual = df['actual'].values.tolist()
schedule = df['schedule'].values.tolist()
freq = df['frequency'].values.tolist()
ACP = df['acp'].values.tolist()
modelInput = {
'actual': actual, 'schedule': schedule, 'freq': freq, 'ACP': ACP,'df' : df
}
return modelInput
Converter.py
import pandas as pd
def fun(modelInput):
underdraw = []
overdraw = []
for i,j, in zip(schedule, actual):
dev = j - i
if dev < 0:
underdraw.append(dev)
else:
underdraw.append(0)
if dev > 0:
overdraw.append(dev)
else:
overdraw.append(0)
df['underdraw'] = pd.Series(underdraw)
df['overdraw'] = pd.Series(overdraw)
df.to_excel('mainfile.xlsx')
Main.py
import pandas as pd
import numpy as np
from convert import *
from GetInputs import *
def fun1():
inpu = getInputs()
con = fun(inpu)
fun1()
This whole program works when I run it in a single module but it throw errors when I try divide my code into separate modules. Basically it throw error in GetInput.py and in Converter.py (df is not defined) file. I know its a very basic thing but I don't know how to make it work. There is no desired output for this program, I am already getting an output when I run it in a single file. I just want to divide my code in this format as I mentioned above: GetIput File, Converter File and Main File.
Keep all the files in same directory or else mention the file paths at the top of main code using os module.
You have misspelled the following in the main code:
from convert import *
from GetInputs import *
It should be:
from Converter import *
from GetInput import *
I have tested this using the following:
MainModule.py
from Converter import *
from GetInputs import *
def fun1():
inpu = getInputs()
con = fun(inpu)
fun1()
Converter.py
import pandas as pd
def fun(modelInput):
print("HIE" + modelInput)
GetInputs.py
def getInputs():
return "modelInput"