How to extract daily close from WSJ using Python? - python

I used python 3 and pandas to parse the daily close from WSJ into EXCEL. However, the daily close shown on the web page screen cannot be extracted. Here is the link: "https://quotes.wsj.com/index/COMP/historical-prices"
How to download the close data on screen into excel?
and how to download "DOWNLOAD A SPREADSHEET" button file into excel with another name like comp.xlxs ?
Here are the codes:
import requests
import pandas as pd
url = 'https://quotes.wsj.com/index/COMP/historical-prices'
jsonData = requests.get(url).json()
final_df = pd.DataFrame()
for row in jsonData['data']:
#row = jsonData['data'][1]
data_row = []
for idx, colspan in enumerate(row['colspan']):
colspan_int = int(colspan[0])
data_row.append(row['td'][idx] * colspan_int)
flat_list = [item for sublist in data_row for item in sublist]
temp_row = pd.DataFrame([flat_list])
final_df = final_df.append(temp_row, sort=True).reset_index(drop=True)
wait2 = input("PRESS ENTER TO CONTINUE.")
Follow UP question quotes:
#
url = 'https://quotes.wsj.com/index/HK/XHKG/HSI/historical-prices/download?num_rows=15&range_days=15&endDate=12/06/2019'
response = requests.get(url)
open('HSI.csv', 'wb').write(response.content)
read_file = pd.read_csv (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\HSI.csv')
read_file.to_excel (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\HSI.xlsx', index = None, header=True)
#
url = 'https://quotes.wsj.com/index/SPX/historical-prices/download?num_rows=15&range_days=15&endDate=12/06/2019'
response = requests.get(url)
open('SPX.csv', 'wb').write(response.content)
read_file = pd.read_csv (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\SPX.csv')
read_file.to_excel (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\SPX.xlsx', index = None, header=True)
#
url = 'https://quotes.wsj.com/index/COMP/historical-prices/download?num_rows=15&range_days=15&endDate=12/06/2019'
response = requests.get(url)
open('COMP.csv', 'wb').write(response.content)
read_file = pd.read_csv (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\COMP.csv')
read_file.to_excel (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\COMP.xlsx', index = None, header=True)

the URL is wrong; once downloaded you can do "Get Info" if on a Mac, and you'll see "Where From:". You will see it's of the form below.
import requests
import pandas as pd
import io
#original URL had a bunch of other parameters I omitted, only these seem to matter but YMMV
url = 'https://quotes.wsj.com/index/COMP/historical-prices/download?num_rows=360&range_days=360&endDate=11/06/2019'
response = requests.get(url)
#do this if you want the CSV written to your machine
open('test_file.csv', 'wb').write(response.content)
# this decodes the content of the downloaded response and presents it to pandas
df_test = pd.read_csv(io.StringIO(response.content.decode('utf-8')))
To answer your additional question -- you can simply loop across a list of tickers or symbols, something like:
base_url = 'https://quotes.wsj.com/index/{ticker_name}/historical-prices/download?num_rows=360&range_days=360&endDate=11/06/2019'
ticker_list = ['COMP','SPX','HK/XHKG/HSI']
for ticker in ticker_list:
response = requests.get(base_url.format(ticker_name = ticker))
#do this if you want the CSV written to your machine
open('prices_'+ticker.replace('/','-')+'.csv', 'wb').write(response.content)
Note for HK/XHKG/HSI, we need to replace the slashes with hyphens or it's not a valid filename. You can also use this pattern to make dataframes.

Related

Parsing JSON output file

Hi a learner in python I have written code the extracts a son file from a sports website.
The code is
from bs4 import BeautifulSoup
import requests
import json
url = "https://s3-ap-southeast-2.amazonaws.com/racevic.static/2022-08-01/sportsbet-pakenham-synthetic/sectionaltimes/race-2.json?"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
and the output looks like this (small portion)
sectionaltimes_callback({"Horses":[{"Comment":"Slow Out 1 Lengths , got back 2nd last off tardy start 8 Lengths 800m, still mile off them getting widest from the corner, charged home last 200m for eye catching second spot # powered home widest","FinalPosition":2,"FinalPositionAbbreviation":"2nd","FullName":"Ameerati","SaddleNumber":12,"HorseUrl":"/horses/ameerati","SilkUrl":"//cdn.silks.racing.com/bb/114031.png","Trainer":"Robbie Griffiths & Mathew de Kock","TrainerUrl":"/trainers/robbie-griffiths","Jockey":"P.M.Moloney","JockeyUrl":"/jockeys/patrick-moloney","SectionalTimes":[{"Distance":"1200m","Position":11,"Time":"1:11.43","AvgSpeed":0.0},{"Distance":"1000m","Position":11,"Time":"59.29","AvgSpeed":0.0},{"Distance":"800m","Position":11,"Time":"46.95","AvgSpeed":0.0},{"Distance":"600m","Position":11,"Time":"34.77","AvgSpeed":0.0},{"Distance":"400m","Position":11,"Time":"22.71","AvgSpeed":0.0},{"Distance":"200m","Position":4,"Time":"11.45","AvgSpeed":0.0},{"Distance":"Finish","Position":2,"Time":"","AvgSpeed":0.0}],"SplitTimes":[{"Distance":"1200m-1000m","Position":11,"Time":"12.14","AvgSpeed":0.0},{"Distance":"1000m-800m","Position":11,"Time":"12.34","AvgSpeed":0.0},{"Distance":"800m-600m","Position":11,"Time":"12.18","AvgSpeed":0.0},{"Distance":"600m-400m","Position":11,"Time":"12.06","AvgSpeed":0.0},{"Distance":"400m-200m","Position":11,"Time":"11.26","AvgSpeed":0.0},{"Distance":"200m-Finish","Position":4,"Time":"11.45","AvgSpeed":0.0}],"StartPosition":0,"BarrierNumber":12,"RaceTime":"","TimeVarToWinner":0.0,"BeatenMargin":0.0,"DistanceRun":0,"DistanceVarToWinner":"","SixHundredMetresTime":"34.77","TwoHundredMetresTime":"11.45","Early":0.0,"Mid":0.0,"Late":0.0,"OverallPeakSpeed":0.0,"PeakSpeedLocation":null,"OverallAvgSpeed":0.0,"DistanceFromRail":0.0},
The help I would appreciate now is what do I do to put this in a format that I can open in excel
import pandas as pd
import requests
import json
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
from openpyxl.utils.dataframe import dataframe_to_rows
def race_data_to_xslxs(url, fname):
# get data
data = json.loads(requests.get(url).text[24:-1])
# create dataframes
dfs = {}
singulars = pd.DataFrame()
for k, v in data.items():
if isinstance(v, list):
dfs[k] = pd.DataFrame(v)
else:
singulars[k] = [v]
dfs = {'summary': singulars, **dfs}
# create workbook
wb = Workbook()
for k, df in dfs.items():
# create sheet
wsx = wb.create_sheet(title=k)
rows = dataframe_to_rows(df)
for r_idx, row in enumerate(rows, 1):
for c_idx, value in enumerate(row, 1):
wsx.cell(row=r_idx, column=c_idx, value=str(value))
del wb['Sheet']
# write excel file
wb.save(filename=fname)
url = "https://s3-ap-southeast-2.amazonaws.com/racevic.static/2022-08-01/sportsbet-pakenham-synthetic/sectionaltimes/race-2.json?"
path = 'fname.xlsx'
race_data_to_xslxs(url=url, fname=path)
The API is returning JSONP, not JSON. This is JSON wrapped in a call to a callback function, which can be used by browsers without violating the same-origin rule. You need to remove that function call before parsing it as JSON.
import re
import json
response = requests.request("GET", url, headers=headers, data=payload)
json_string = re.sub(r'^sectionaltimes_callback\((.*)\)$', r'\1', response)
data = json.loads(json_string)
You can try this -
import requests
import json
url = "https://s3-ap-southeast-2.amazonaws.com/racevic.static/2022-08-01/sportsbet-pakenham-synthetic/sectionaltimes/race-2.json?"
response = requests.get(url)
json.loads(response.text.split("(", 1)[1].strip(")"))

How can we read JSON data from URL, convert to dataframe, and save as CSV

I'm playing around with some code to read JSON encoded data from a URL, push it into a data frame and save the results to a CSV. The code that I attempted to run is shown below. I think this is pretty close, but something is wrong, because nothing gets downloaded.
import urllib
from urllib.request import urlopen
import json
import pandas as pd
from pandas.io.json import json_normalize
all_links = ['https://www.baptisthealthsystem.com/docs/global/standard-charges/474131755_abrazomaranahospital_standardcharges.json?sfvrsn=9a27928_2',
'https://www.baptisthealthsystem.com/docs/global/standard-charges/621861138_abrazocavecreekhospital_standardcharges.json?sfvrsn=674fd6f_2',
'https://www.baptisthealthsystem.com/docs/global/standard-charges/621809851_abrazomesahospital_standardcharges.json?sfvrsn=13953222_2',
'https://www.baptisthealthsystem.com/docs/global/standard-charges/621811285_abrazosurprisehospital_standardcharges.json?sfvrsn=c8113dcf_2']
for item in all_links:
#print(item)
try:
length = len(item)
first_under = item.find('_') + 1
last_under = item.rfind('?') - 21
file_name = item[first_under:last_under]
print(file_name)
# store the response of URL
response = urlopen(item)
data = json.loads(response.read())
#print(type(data))
data = json.loads(item.read().decode())
df = pd.DataFrame(json_normalize(data, 'metrics'), encoding='mac_roman')
DOWNLOAD_PATH = 'C:\\Users\\ryans\\Desktop\\hospital_data\\' + file_name + '.csv'
urllib.request.urlretrieve(df,DOWNLOAD_PATH)
except Exception as e: print(e)
Any thoughts on what could be wrong here?

df.to_csv is not generating a CSV file even with fully specified filepath

Still trying to learn Python. As a learning exercise, I tried to write a script to pull the last 5 years of historical bitcoin data from coinmarketcap.com. I have it so no errors are generated whatsoever, but no CSV file generates at the end. I read elsewhere it may save to another directory, but I can see the working directory is where I'm looking, and I used df.to_csv in another script in this directory and it saved correctly.
I am using Spyder through Anaconda Notebooks. Here is my script:
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
coins = {}
for i in coins:
page = requests.get('https://coinmarketcap.com/currencies/bitcoin/historical-data/?start=20150920&end=20200918')
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find('script', id="__NEXT_DATA__",type="application/json")
historical_data = json.loads(data.contents[0])
quotes = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical']['i']['quotes']
market_cap = []
volume = []
timestamp = []
name = []
symbol = []
slug = []
df = pd.DataFrame(columns = ['marketcap','volume','timestamp','name','symbol','slug'])
for j in quotes:
market_cap.append(j['quote']['USD']['market_cap'])
volume.append(j['quote']['USD']['volume'])
timestamp.append(j['quote']['USD']['timestamp'])
name.append(info['name'])
symbol.append(info['symbol'])
slug.append(coins[i])
df['marketcap'] = market_cap
df['volume'] = volume
df['timestamp'] = timestamp
df['name'] = name
df['symbol'] = symbol
df['slug'] = slug
df.to_csv('C:\\Python34\\Projects\\Trends\\btcvalue.csv', index=False)
Your first for loop will never run, because coins is always empty. And I would suggest to move df.to_csv out of the for loop.
[...]
coins = {}
for i in coins:
[...]
df['slug'] = slug
df.to_csv('C:\\Python34\\Projects\\Trends\\btcvalue.csv', index=False)

Putting scraped data from beautifulsoup into a csv file

I want to put the data I'm scraping from the website into a csv file, my first attempt was using scrapy but I couldn't get the syntax right. When I managed to do using beautifulsoup, I just don't know how to put it into a csv file.
import requests
from bs4 import BeautifulSoup
URL = "https://www.practo.com/tests/glycosylated-haemoglobin-blood/p?city=delhi"
page = requests.get(URL)
#print(page)
soup=BeautifulSoup(page.content,'html.parser')
results = soup.find(id='root-app')
#print(results.prettify())
job_elems = results.find_all('div', class_='u-padrl--std')
#<h1 class="o-font-size--24 u-font-bold u-marginb--std">HbA1c Test</h1
for job_elem in job_elems:
title_elem = job_elem.find('h1', class_='o-font-size--24 u-font-bold u-marginb--std')
also_known = job_elem.find('span',class_="u-font-bold")
cost = job_elem.find('div',class_="o-font-size--22 u-font-bold o-f-color--primary")
what_test = job_elem.find('div',class_="c-markdown--unstyled")
#test_prep = job_elem.find('div',class_="c-tabsection__content c-pp__accordion-item__content active")
#temp = job_elem.find('p')
print(title_elem.text)
print(also_known.text)
print(cost.text)
print(what_test.text)
#print(temp.text)
#print(test_prep.text)
print()
text_content = results.find_all('div',class_='c-markdown--unstyled')
# c-tabsection__content c-pp__accordion-item__content active
# c-tabsection c-pp__accordion-item u-br-rule u-padtb--std--half active
for item in text_content:
prep = item.find('p')
#,class_="c-tabsection__content c-pp__accordion-item__content active")
print(prep.text)
print('xxo')
import requests
from bs4 import BeautifulSoup
# import the csv module
import csv
URL = "https://www.practo.com/tests/glycosylated-haemoglobin-blood/p?city=delhi"
page = requests.get(URL)
#print(page)
soup=BeautifulSoup(page.content,'html.parser')
results = soup.find(id='root-app')
#print(results.prettify())
job_elems = results.find_all('div', class_='u-padrl--std')
#<h1 class="o-font-size--24 u-font-bold u-marginb--std">HbA1c Test</h1
rows = []
fields = ['title_elem', 'also_known', 'cost', 'what_test']
filename = "myfile.csv"
for job_elem in job_elems:
title_elem = job_elem.find('h1', class_='o-font-size--24 u-font-bold u-marginb--std').text.encode("utf-8")
also_known = job_elem.find('span',class_="u-font-bold").text.encode("utf-8")
cost = job_elem.find('div',class_="o-font-size--22 u-font-bold o-f-color--primary").text.encode("utf-8")
what_test = job_elem.find('div',class_="c-markdown--unstyled").text.encode("utf-8")
row = [title_elem, also_known, cost, what_test]
rows.append(row)
with open(filename, 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(fields)
# writing the data rows
csvwriter.writerows(rows)
text_content = results.find_all('div',class_='c-markdown--unstyled')
# c-tabsection__content c-pp__accordion-item__content active
# c-tabsection c-pp__accordion-item u-br-rule u-padtb--std--half active
for item in text_content:
prep = item.find('p')
#,class_="c-tabsection__content c-pp__accordion-item__content active")
print(prep.text)
print('xxo')
You can use the xlsxwriter library.
import xlsxwriter
workbook = xlsxwriter.Workbook("file.xlsx")
worksheet = workbook.add_worksheet()
worksheet.write(row, column, element)
workbook.close()

how can i check parameter in while loop for each item in array?

I'm trying to get titles from old website.
The problem that i'm getting in some cases - null value.
Therefore, I have tried to do a while loop and change the URL.
Is my While loop in the right place?
The procedure is like this:
open file
get url
check url
get title
print title
while title = null):
replace part of the url and check url again
from urllib.request import urlopen
from bs4 import BeautifulSoup
from openpyxl import Workbook
import os
import xlrd
import lxml
# set file location
os.chdir("/excel_files")
# set the name of the file
file_name = "old.xlsx"
# open workbook
workbook = xlrd.open_workbook(file_name)
# set existing worksheet
sheet = workbook.sheet_by_index(0)
temp_list = [20131022212405,20090127003537,2009012702352,]
for i in range(sheet.nrows):
try:
u = sheet.cell_value(i,1)
html = urlopen(u)
bsObj = BeautifulSoup(html.read(), features='lxml')
# get title
title = str(bsObj.title)
print('row no. ',i, 'title is :' , title)
except:
title = 'null'
while (title == 'null'):
try:
u = u.replace(temp_list[i], temp_list[i + 1])
html = urlopen(u)
bsObj = BeautifulSoup(html.read(), features='lxml')
title = str(bsObj.title)
except:
print('title is :',title)
I'm getting null all the time - instead of getting only the row that actually is null.
It looks like your try/except indentation in the first for loop (for i in range(sheet.nrows):) is wrong, try and except should be on the same level.

Categories

Resources