Not able to get real time data with Alpaca API - python

I'm trying to make a real time graph for plotting stock data but I can't get .get_position to work. The error message I get is APIError: position does not exist and 404 Client Error: Not Found for url: https://paper-api.alpaca.markets/v2/positions/AAPL. If I follow the link directly I get a forbidden message.
import matplotlib.pyplot as plt
from itertools import count
from matplotlib.animation import FuncAnimation
import threading
import time
api_key = '<API_KEY>'
api_secret = '<SECRET_KEY>'
base_url = 'https://paper-api.alpaca.markets'
api = tradeapi.REST(api_key, api_secret, base_url, api_version='v2')
account = api.get_account()
def stock_grabber():
global position
SYMBOL = 'AAPL'
try:
position = api.get_position(symbol=SYMBOL)
except Exception as exception:
if exception.__str__() == 'position does not exist':
position = 0
print(position)
def threadz():
while True:
x = threading.Thread(target=stock_grabber)
x.start()
time.sleep(0.4)
t = threading.Thread(target=threadz)
t.start()
stock_grabber()
x = []
y = []
index = count()
def animate(i):
x.append(next(index))
y.append(position)
plt.cla()
plt.plot(x, y)
ani = FuncAnimation(plt.gcf(), animate, interval=400)
plt.tight_layout()
plt.show()```

I recently discovered that I am dumb. What I was trying to do was to get the real time price not the position.
instead of
position = api.get_position(symbol=SYMBOL)
use
position = (api.get_position('SYMBOL'))
realprice = position.current_price
Also the forbidden message was because I was on school wifi

Related

Connection to Yahoo Finance with requests.get() not working anymore

until recently I was able to connect to Yahoo Finance, which allowed me to fill my stock screener with json-data. However, since a couple of weeks, I am not able to connect to Yahoo Finance anymore. Apparently, Yahoo Finance blocked the traditional way of accessing its data and I am wondering if there is a work around to reestablish such a connection.
Here are a few technical details how I was connecting to Yahoo Finance (which worked without any problems during the last 2 years):
r = requests.get(url_root_yahoo + ticker + key_stats_yahoo + ticker)
data = json.loads(p.findall(r.text)[0])
quote_store = data['context']['dispatcher']['stores']['QuoteSummaryStore']
statistics = quote_store['defaultKeyStatistics']
profit_margin = statistics['profitMargins']['raw']
But now I am getting the following error message:
File "stock_screener_4.py", line 80, in <module>
quote_store = data['context']['dispatcher']['stores']['QuoteSummaryStore']
NameError: name 'data' is not defined
Any hints how to reestablish the connection?
Thanks a lot.
P.S. Minimum working example:
import os
import urllib.request
import requests, re, json
p = re.compile(r'root\.App\.main = (.*);')
url_root_yahoo = 'https://finance.yahoo.com/quote/'
key_stats_yahoo = '/key-statistics?p='
ticker = 'AAPL'
execute_next_block = 1
r = requests.get(url_root_yahoo + ticker + key_stats_yahoo + ticker)
try:
data = json.loads(p.findall(r.text)[0])
except (KeyError, IndexError, TypeError):
execute_next_block = 0
try:
quote_store = data['context']['dispatcher']['stores']['QuoteSummaryStore']
statistics = quote_store['defaultKeyStatistics']
except (KeyError, IndexError, TypeError):
execute_next_block = 0

lst.append(float(n.text[-3:])) ValueError: could not convert string to float: ''

I was Trying to Code for Prime Video Scraping but Getting this Error and I'm Unable to solve this error (could not convert string to float):
[14500:11368:0328/150755.021:ERROR:device_event_log_impl.cc(214)]
[15:07:55.019] USB: usb_device_handle_win.cc:1056 Failed to read descriptor
from node connection: A device attached to the system is not functioning.
(0x1F)
c:/Users/SAM/Amazon Prime Video Selenium Scraper/main.py:53:
GuessedAtParserWarning: No parser was explicitly specified
so I'm using the best available HTML parser for this system ("html.parser"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently
Thanks in Advance
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
# CSS Variables
titleClass = "h1"
titleName = "_2IIDsE _3I-nQy"
ratingClass = "span"
ratingName = "Gpyvwj _1pG1w4 _1g4OLh _1tadIP _3YQFvK"
synopsisClass = "div"
synopsisName = "_1W5VSv"
storeFrontURL = "https://www.amazon.com/gp/video/storefront"
vidDownloadURL = "/gp/video/detail/"
videoLinks = []
titles = []
ratings = []
synopsis = []
def scrapeText(lst, classType, className):
findClass = soup.find_all(classType, class_=className)
if len(findClass) == 0:
lst.append(None)
else:
for n in findClass:
if className == ratingName:
lst.append(float(n.text[-3:]))
else:
lst.append(n.text)
# Initialize Browser to be Control by Python
driver = webdriver.Chrome(
executable_path="C:/Users/SAM/Downloads/chromedriver_win32/chromedriver.exe")
driver.get(storeFrontURL)
elems = driver.find_elements_by_xpath("//a[#href]")
for elem in elems:
if vidDownloadURL in elem.get_attribute("href"):
videoLinks.append(elem.get_attribute("href"))
videoLinks = list(dict.fromkeys(videoLinks))
for i in range(0, len(videoLinks)):
driver.get(videoLinks[i])
content = driver.page_source
soup = BeautifulSoup(content)
scrapeText(titles, titleClass, titleName)
scrapeText(ratings, ratingClass, ratingName)
scrapeText(synopsis, synopsisClass, synopsisName)
data = {'Titles': titles, 'Rating': ratings, 'Synopsis': synopsis}
df = pd.DataFrame(data)
df.to_csv('PrimeVid.csv', index=False, encoding='utf-8')
def wordcloud(dataframe, filename):
if len(df) > 1:
text = ' '.join(dataframe.Synopsis)
wordcloud = WordCloud().generate(text)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.savefig(filename + ".png")
dfBelow6 = df.loc[(df['Rating'] < 6)]
dfBelow6 = df.loc[(df['Rating'] >= 6) & (df['Rating'] < 8)]
dfBelow6 = df.loc[(df['Rating'] >= 8)]
wordcloud(dfBelow6, "below6")
wordcloud(df6to7, "6to7")
wordcloud(dfAbove8, "above8")

How to Filter json data with python

I am trying to create a function that filters json data pulled from the Google places api. I want it to return the name of a business and the types values if the name contains the string "Body Shop" and the types are ['car_repair', 'point_of_interest', 'establishment'] otherwise I want it to reject the result. Here is my code so far. I have tried and tried and can't seem to figure out a way to store certain criteria to make the search easier.
import googlemaps
import pprint
import time
import urllib.request
API_KEY = 'YOUR_API_KEY'
lat, lng = 40.35003, -111.95206
#define our Client
gmaps = googlemaps.Client(key = API_KEY)
#Define our Search
places_result = gmaps.places_nearby(location= "40.35003,-111.95206", radius= 40000,open_now= False,type= ['car_repair','point_of_interest','establishment'])
#pprint.pprint(places_result['results'])
time.sleep(3)
places_result_2 = gmaps.places_nearby(page_token =
places_result['next_page_token'])
pprint.pprint(places_result_2['results'])
places_result_2 = gmaps.places_nearby(page_token =
places_result['next_page_token'])
types = place_details['result']['types']
name = place_details['result']['name']
def match(types,name):
for val in types:
'car_repair','point_of_interest','establishment' in val and "Body Shop" in name
print(name,types)
Try this:
import googlemaps
import pprint
import time
import urllib.request
API_KEY = 'YOUR_API_KEY'
lat, lng = 40.35003, -111.95206
#define our Client
gmaps = googlemaps.Client(key = API_KEY)
#Define our Search
places_result = gmaps.places_nearby(location= "40.35003,-111.95206", radius= 40000,open_now= False,type= ['car_repair','point_of_interest','establishment'])
#heres how to retrieve the name of the first result
example_of_name = places_result['results'][0]['name']
print(example_of_name)
#gets places name and type for all the results
for place in places_result['results']:
print("Name of Place:")
print(place['name'])
print("Type of the place:")
print(place['types'], "\n")

Python animate live plotting fetching data from Mysql Table

I am trying to read the latest row from a table and plot the graph using animate and matplotlib in python. The table gets updated every 1 second with a new value. I need to simulate the graph by live plotting the values.
However, when I use animate function with an interval of 1 second, I get the same value for every interval fetch.
I am adding the code for your reference. Please let me know what I am missing. The same code is working good when I use a flat file instead of MySql table.
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import mysql.connector
import pandas as pd
style.use('fivethirtyeight')
mydb = mysql.connector.connect(
host="xxxxxxxxxx",
user="xxxxx",
passwd="xxxxxxx",
database="sakila"
)
fig = plt.figure(figsize=(8,5))
ax = plt.subplot2grid((1,1), (0,0))
plt.ion()
cursor = mydb.cursor()
def animate(i):
df = pd.read_sql("SELECT * FROM DATA_TEST ORDER BY ID DESC LIMIT 1", mydb)
y = df["VALUE"]
x = df["ID"]
xs = []
ys = []
xs.append(x)
ys.append(float(y)*100)
ax.clear()
ax.plot(xs,ys)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
## TESTING CURSOR IN FOR LOOP
import mysql.connector
import pandas as pd
import time
mydb = mysql.connector.connect(
host="xxxxxxx",
user="xxxx",
passwd="xxxxxx",
database="xxxxx"
)
for i in range(10):
cursor = mydb.cursor()
cursor.execute("SELECT * FROM DATA_TEST ORDER BY ID DESC LIMIT 1")
result = cursor.fetchall()
print("Total rows are: ", len(result))
for j in result:
print(j[0])
print(j[1])
time.sleep(10)
cursor.close()

How to specify a range without limiting to numbers

This is the code that gets the restaurant reviews. I am collecting reviews.
The range is specified and imported. But there is a problem. Each store has a different number of reviews. Store with little reviews should go to next store soon.
I am suffering from too large a scope. But it can not reduce the scope. This is because some shops have reviews in this range.
How can I work effectively?
I saw find all?(element) searching this code. But I do not know if I've mistakenly applied my code.
#python3
import sys
from bs4 import BeautifulSoup
import urllib.request
import requests
from urllib.parse import quote
import time
import os
import xlwt
import random
import re
FISRT_URL = "https://www.yelp.com/search?
find_desc=Korean+Food&find_loc=Seattle,+WA&start="
LAST_URL = "&cflt=korean"
def get_link(URL, doc_name):
global jisu_i
global num
global page
for jisu_i in range(1):
current_page_num = 20 + jisu_i*10
position = URL.index('t=')
URL_with_page_num = URL[: position+2] + str(current_page_num) \
+ URL[position+2 :]
print(URL_with_page_num)
importurl = URL_with_page_num
r = requests.get(importurl)
soup = BeautifulSoup(r.content.decode('euc-kr','replace'), "lxml")
time.sleep(random.randint(10, 15))
for title in soup.find_all('h3')[page+2:21]:
page = page + 1
title_link = title.select('a')
for jisu_m in range(130):
print(page)
last_URL = title_link[0]['href']
print(last_URL)
first_URL = "https://www.yelp.com"
global article_URL
article_URL = first_URL + last_URL
time.sleep(random.randint(15, 30))
jisuurl = article_URL
for k in range(99): #
jisu_page_num = 0 + k * 20 #
position = jisuurl.index('?')
URL_with_page_num = jisuurl[: position + 1] + str("start=") + str(jisu_page_num)
jisu_with_page_num = URL_with_page_num
print(jisu_with_page_num)
jisu_importurl = jisu_with_page_num
get_text(URL, jisu_importurl, doc_name)
time.sleep(random.randint(40,180))
Yelp has a very well documented API here: https://www.yelp.com/developers/documentation/v3
This is the only reliable way of interacting with the site programatically.

Categories

Resources