Getting a hyperlink URL from an Excel document - python

I am reading an Excel file using xlrd. In one column I have a company name which is formatted as a hyperlink (meaning there is a URL behind it). When I get the cell value I only get the company name. How can I also get the URL behind it?
Below is the code for reading an Excel file using the xlrd module (assume files are imported).
mainData_book = xlrd.open_workbook("IEsummary.xls", formatting_info=True)
mainData_sheet = mainData_book.sheet_by_index(0) # Get the first sheet 0
start = 1
end = 101
for counter in range(start, end):
rowValues = mainData_sheet.row_values(counter, start_colx=0, end_colx=8)
company_name = rowValues[0] #how i can get link here also??

In xlrd 0.7.2 or newer, you can use hyperlink_map:
import xlrd
mainData_book = xlrd.open_workbook("IEsummary.xls", formatting_info=True)
mainData_sheet = mainData_book.sheet_by_index(0)
for row in range(1, 101):
rowValues = mainData_sheet.row_values(row, start_colx=0, end_colx=8)
company_name = rowValues[0]
link = mainData_sheet.hyperlink_map.get((row, 0))
url = '(No URL)' if link is None else link.url_or_path
print(company_name.ljust(20) + ': ' + url)

Related

df.to_csv is not generating a CSV file even with fully specified filepath

Still trying to learn Python. As a learning exercise, I tried to write a script to pull the last 5 years of historical bitcoin data from coinmarketcap.com. I have it so no errors are generated whatsoever, but no CSV file generates at the end. I read elsewhere it may save to another directory, but I can see the working directory is where I'm looking, and I used df.to_csv in another script in this directory and it saved correctly.
I am using Spyder through Anaconda Notebooks. Here is my script:
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
coins = {}
for i in coins:
page = requests.get('https://coinmarketcap.com/currencies/bitcoin/historical-data/?start=20150920&end=20200918')
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find('script', id="__NEXT_DATA__",type="application/json")
historical_data = json.loads(data.contents[0])
quotes = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical']['i']['quotes']
market_cap = []
volume = []
timestamp = []
name = []
symbol = []
slug = []
df = pd.DataFrame(columns = ['marketcap','volume','timestamp','name','symbol','slug'])
for j in quotes:
market_cap.append(j['quote']['USD']['market_cap'])
volume.append(j['quote']['USD']['volume'])
timestamp.append(j['quote']['USD']['timestamp'])
name.append(info['name'])
symbol.append(info['symbol'])
slug.append(coins[i])
df['marketcap'] = market_cap
df['volume'] = volume
df['timestamp'] = timestamp
df['name'] = name
df['symbol'] = symbol
df['slug'] = slug
df.to_csv('C:\\Python34\\Projects\\Trends\\btcvalue.csv', index=False)
Your first for loop will never run, because coins is always empty. And I would suggest to move df.to_csv out of the for loop.
[...]
coins = {}
for i in coins:
[...]
df['slug'] = slug
df.to_csv('C:\\Python34\\Projects\\Trends\\btcvalue.csv', index=False)

How to extract daily close from WSJ using Python?

I used python 3 and pandas to parse the daily close from WSJ into EXCEL. However, the daily close shown on the web page screen cannot be extracted. Here is the link: "https://quotes.wsj.com/index/COMP/historical-prices"
How to download the close data on screen into excel?
and how to download "DOWNLOAD A SPREADSHEET" button file into excel with another name like comp.xlxs ?
Here are the codes:
import requests
import pandas as pd
url = 'https://quotes.wsj.com/index/COMP/historical-prices'
jsonData = requests.get(url).json()
final_df = pd.DataFrame()
for row in jsonData['data']:
#row = jsonData['data'][1]
data_row = []
for idx, colspan in enumerate(row['colspan']):
colspan_int = int(colspan[0])
data_row.append(row['td'][idx] * colspan_int)
flat_list = [item for sublist in data_row for item in sublist]
temp_row = pd.DataFrame([flat_list])
final_df = final_df.append(temp_row, sort=True).reset_index(drop=True)
wait2 = input("PRESS ENTER TO CONTINUE.")
Follow UP question quotes:
#
url = 'https://quotes.wsj.com/index/HK/XHKG/HSI/historical-prices/download?num_rows=15&range_days=15&endDate=12/06/2019'
response = requests.get(url)
open('HSI.csv', 'wb').write(response.content)
read_file = pd.read_csv (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\HSI.csv')
read_file.to_excel (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\HSI.xlsx', index = None, header=True)
#
url = 'https://quotes.wsj.com/index/SPX/historical-prices/download?num_rows=15&range_days=15&endDate=12/06/2019'
response = requests.get(url)
open('SPX.csv', 'wb').write(response.content)
read_file = pd.read_csv (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\SPX.csv')
read_file.to_excel (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\SPX.xlsx', index = None, header=True)
#
url = 'https://quotes.wsj.com/index/COMP/historical-prices/download?num_rows=15&range_days=15&endDate=12/06/2019'
response = requests.get(url)
open('COMP.csv', 'wb').write(response.content)
read_file = pd.read_csv (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\COMP.csv')
read_file.to_excel (r'C:\A-CEO\REPORTS\STOCKS\PROFILE\Python\COMP.xlsx', index = None, header=True)
the URL is wrong; once downloaded you can do "Get Info" if on a Mac, and you'll see "Where From:". You will see it's of the form below.
import requests
import pandas as pd
import io
#original URL had a bunch of other parameters I omitted, only these seem to matter but YMMV
url = 'https://quotes.wsj.com/index/COMP/historical-prices/download?num_rows=360&range_days=360&endDate=11/06/2019'
response = requests.get(url)
#do this if you want the CSV written to your machine
open('test_file.csv', 'wb').write(response.content)
# this decodes the content of the downloaded response and presents it to pandas
df_test = pd.read_csv(io.StringIO(response.content.decode('utf-8')))
To answer your additional question -- you can simply loop across a list of tickers or symbols, something like:
base_url = 'https://quotes.wsj.com/index/{ticker_name}/historical-prices/download?num_rows=360&range_days=360&endDate=11/06/2019'
ticker_list = ['COMP','SPX','HK/XHKG/HSI']
for ticker in ticker_list:
response = requests.get(base_url.format(ticker_name = ticker))
#do this if you want the CSV written to your machine
open('prices_'+ticker.replace('/','-')+'.csv', 'wb').write(response.content)
Note for HK/XHKG/HSI, we need to replace the slashes with hyphens or it's not a valid filename. You can also use this pattern to make dataframes.

how can i check parameter in while loop for each item in array?

I'm trying to get titles from old website.
The problem that i'm getting in some cases - null value.
Therefore, I have tried to do a while loop and change the URL.
Is my While loop in the right place?
The procedure is like this:
open file
get url
check url
get title
print title
while title = null):
replace part of the url and check url again
from urllib.request import urlopen
from bs4 import BeautifulSoup
from openpyxl import Workbook
import os
import xlrd
import lxml
# set file location
os.chdir("/excel_files")
# set the name of the file
file_name = "old.xlsx"
# open workbook
workbook = xlrd.open_workbook(file_name)
# set existing worksheet
sheet = workbook.sheet_by_index(0)
temp_list = [20131022212405,20090127003537,2009012702352,]
for i in range(sheet.nrows):
try:
u = sheet.cell_value(i,1)
html = urlopen(u)
bsObj = BeautifulSoup(html.read(), features='lxml')
# get title
title = str(bsObj.title)
print('row no. ',i, 'title is :' , title)
except:
title = 'null'
while (title == 'null'):
try:
u = u.replace(temp_list[i], temp_list[i + 1])
html = urlopen(u)
bsObj = BeautifulSoup(html.read(), features='lxml')
title = str(bsObj.title)
except:
print('title is :',title)
I'm getting null all the time - instead of getting only the row that actually is null.
It looks like your try/except indentation in the first for loop (for i in range(sheet.nrows):) is wrong, try and except should be on the same level.

Importing Multiple HTML Files Into Excel as Separate Worksheets

I have a number of HTML files that I need to open up or import into a single Excel Workbook and simply save the Workbook. Each HTML file should be on its own Worksheet inside the Workbook.
My existing code does not work and it crashes on the workbook.Open(html) line and probably will on following lines. I can't find anything searching the web specific to this topic.
import win32com.client as win32
import pathlib as path
def save_html_files_to_worksheets(read_directory):
read_path = path.Path(read_directory)
save_path = read_path.joinpath('Single_Workbook_Containing_HTML_Files.xlsx')
excel_app = win32.gencache.EnsureDispatch('Excel.Application')
workbook = excel_app.Workbooks.Add() # create a new excel workbook
indx = 1 # used to add new worksheets dependent on number of html files
for html in read_path.glob('*.html'): # loop through directory getting html files
workbook.Open(html) # open the html in the newly created workbook - this doesn't work though
worksheet = workbook.Worksheets(indx) # each iteration in loop add new worksheet
worksheet.Name = 'Test' + str(indx) # name added worksheets
indx += 1
workbook.SaveAs(str(save_path), 51) # win32com requires string like path, 51 is xlsx extension
excel_app.Application.Quit()
save_html_files_to_worksheets(r'C:\Users\<UserName>\Desktop\HTML_FOLDER')
The following code does half of want I want, if this helps. It will convert each HTML file into a separate Excel file. I need each HTML file in one Excel file with multiple WorkSheets.
import win32com.client as win32
import pathlib as path
def save_as_xlsx(read_directory):
read_path = path.Path(read_directory)
excel_app = win32.gencache.EnsureDispatch('Excel.Application')
for html in read_path.glob('*.html'):
save_path = read_path.joinpath(html.stem + '.xlsx')
wb = excel_app.Workbooks.Open(html)
wb.SaveAs(str(save_path), 51)
excel_app.Application.Quit()
save_as_xlsx(r'C:\Users\<UserName>\Desktop\HTML_FOLDER')
Here is a link to a sample HTML file you can use, the data in the file is not real: HTML Download Link
One solution would be to open the HTML file into a temporary workbook, and copy the sheet from there into the workbook containing all of them:
workbook = excel_app.Application.Workbooks.Add()
sheet = workbook.Sheets(1)
for path in read_path.glob('*.html'):
workbook_tmp = excel_app.Application.Workbooks.Open(path)
workbook_tmp.Sheets(1).Copy(Before=sheet)
workbook_tmp.Close()
# Remove the redundant 'Sheet1'
excel_app.Application.ShowAlerts = False
sheet.Delete()
excel_app.Application.ShowAlerts = True
I believe pandas will make your job much easier.
pip install pandas
Here's an example on how to get multiple tables from a wikipedia html and input it into a Pandas DataFrame and save it to disk.
import pandas as pd
url = "https://en.wikipedia.org/wiki/List_of_American_films_of_2017"
wikitables = pd.read_html(url, header=0, attrs={"class":"wikitable"})
for idx,df in enumerate(wikitables):
df.to_csv('{}.csv'.format(idx),index=False)
For your use case, something like this should work:
import pathlib as path
import pandas as pd
def save_as_xlsx(read_directory):
read_path = path.Path(read_directory)
for html in read_path.glob('*.html'):
save_path = read_path.joinpath(html.stem + '.xlsx')
dfs_from_html = pd.read_html(html, header=0,)
for idx, df in enumerate(dfs_from_html):
df.to_excel('{}.xlsx'.format(idx),index=False)
** Make sure to set the correct html attribute in the pd.read_html function.
How about this?
Sub From_XML_To_XL()
'UpdatebyKutoolsforExcel20151214
Dim xWb As Workbook
Dim xSWb As Workbook
Dim xStrPath As String
Dim xFileDialog As FileDialog
Dim xFile As String
Dim xCount As Long
On Error GoTo ErrHandler
Set xFileDialog = Application.FileDialog(msoFileDialogFolderPicker)
xFileDialog.AllowMultiSelect = False
xFileDialog.Title = "Select a folder [Kutools for Excel]"
If xFileDialog.Show = -1 Then
xStrPath = xFileDialog.SelectedItems(1)
End If
If xStrPath = "" Then Exit Sub
Application.ScreenUpdating = False
Set xSWb = ThisWorkbook
xCount = 1
xFile = Dir(xStrPath & "\*.xml")
Do While xFile <> ""
Set xWb = Workbooks.OpenXML(xStrPath & "\" & xFile)
xWb.Sheets(1).UsedRange.Copy xSWb.Sheets(1).Cells(xCount, 1)
xWb.Close False
xCount = xSWb.Sheets(1).UsedRange.Rows.Count + 2
xFile = Dir()
Loop
Application.ScreenUpdating = True
xSWb.Save
Exit Sub
ErrHandler:
MsgBox "no files xml", , "Kutools for Excel"
End Sub

Update Excel worksheet using python xlwt module

I am looking to update my already created Excel worksheet with Json data coming from a browser. The current code generates a new worksheet everytime i run the program.
import requests
import json
import urllib
import xlwt
url = raw_input("Enter url:-")
res = urllib.urlopen(url)
data = res.read()
data1 = json.loads(data)
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet("AssetsReport0")
colunm_count = 0
for title, value in data1.iteritems():
sheet1.write(0, colunm_count, title)
sheet1.write(1, colunm_count, value)
colunm_count += 1
file_name = "test1.xls"%()
book.save(file_name)
What is the best way i could do this?

Categories

Resources