Scraping data from HTML table usin xpath and LXML or selenium - python

I need to extract data from HTML table from this website:
https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156
I use Python, selenium and lxml with xpath
I want to extract each match odds
The problem is that each match is in 2 row
two : tr class="dtd2", then come two: tr class="dtd1"
I need the xpath that allow to extract the first row and his following row
driver.get(u)
t = html.fromstring(driver.page_source)
for i in t.xpath('//*[#id="odds_tb"]/table/tbody/tr[#class="dtd2"]/td[1]/text()'):

A more verbose method
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup as bs
import pandas as pd
import copy
d = webdriver.Chrome()
d.get('https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156')
WebDriverWait(d, 20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#odds_tb tr[class]")))
soup = bs(d.page_source, 'lxml')
rows = soup.select('#odds_tb tr[class]')
results = []
i = 1
headers = ['Competition', 'Date', 'Match' ,'OddsType', 'Home Win', 'Draw', 'Away Win', 'Result']
for row in rows[1:]:
cols = [td.text for td in row.select('td')]
if (i % 2 == 1):
record = {'Competition' : cols[0],
'Date' : cols[1],
'Match' : ' v '.join([cols[2], cols[6]]),
'OddsType' : 'average early odds',
'Home Win' : cols[3],
'Draw' : cols[4],
'Away Win' : cols[5],
'Result' : cols[7]}
else:
record['OddsType'] = 'average live odds'
record['Home Win'] = cols[0]
record['Draw'] = cols[1]
record['Away Win'] = cols[2]
results.append(copy.deepcopy(record))
i+=1
df = pd.DataFrame(results, columns = headers)
df.to_csv(r'C:\Users\User\Desktop\data.csv', sep=',', encoding='utf-8-sig',index = False )
d.quit()

You can use both selenium and pandas to get the table info.
from selenium import webdriver
import time
import pandas as pd
driver = webdriver.Chrome()
driver.get("https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156")
time.sleep(3)
htmlcontent=driver.page_source
tables=pd.read_html(htmlcontent)
print(tables[14])

It looks like you want to iterate the odd trs and then include the "next" tr.
In css that looks like:
.dtd1:nth-child(odd),.dtd2:nth-child(odd)
You can get odds with xpath too, just add:
[position() mod 2 = 1]

Related

Appending table values from a webpage to csv

I want to get the table from webpage
import os
from webdriver_manager.chrome import ChromeDriverManager
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--start-maximized')
options.page_load_strategy = 'eager'
options.add_argument("--headless");
driver = webdriver.Chrome(options=options)
wait = WebDriverWait(driver, 20)
driver.get("https://munafasutra.com/nse/dividends")
file_object = open('divident.csv', 'a')
output table
How to get the first table and their values?
You have to look at the HTML path and locate the WebElement that is gathering that first table (Clicking "Inspect" when you click right-button of the mouse can do the work).
You can save that webelement using the following line of code:
first_table = driver.find_element_by_xpath("//div[#id = 'co']//table[1]") # The [1] is not really necessary as when using **find_element_by_xpath** will only look for the first element.
Then, if you look at how data is organized inside that table, you can observe each row is gathered by a tr WebElement. Therefore, if you wish to write it in a csv file, I would suggest to write row by row with the following code:
rows = first_table.find_elements_by_xpath("./tbody/tr")
for row in rows:
entries_of_the_row = row.find_elements_by_xpath("./td")
row_to_csv = []
for entry in entries_of_the_row:
row_to_csv.append(entry.text)
file_object.write(f"{row_to_csv[0]}, {row_to_csv[1]}, {row_to_csv[2]}, {row_to_csv[3]}, {row_to_csv[4]}\n")
file_object.close()
You can use below XPATH to retrieve the first table value :
//h3[text()=' Earlier dividends announced by companies ']/preceding-sibling::table/descendant::td
Something like this :
driver.get("https://munafasutra.com/nse/dividends")
first_table = driver.find_elements(By.XPATH, "//h3[text()=' Earlier dividends announced by companies ']/preceding-sibling::table/descendant::td")
for first in first_table:
print(first.text)
You can use BeautifulSoup to get the table data. Selenium is not required if you just want to extract web page data.
You need to import below packages :
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
You can Extract HTML of the table using below code (soup variable will contain the HTML code of the entire page):
url_munafasutra = "https://munafasutra.com/nse/dividends"
html_munafasutra = urlopen(url_munafasutra)
soup = BeautifulSoup(html_munafasutra, 'html')
Below is the code to extract HTML for the 1st table (here table is the tag value and in [] contains the index of the table we want to extract data of):
first_table = soup.find_all('table')[0]
You can also add attributes to distinctly identify the table along with tag name.
Below is the code to extract all the rows in the selected table :
all_rows = first_table.findAll("tr")
Use the below code to write the data in csv file :
with open("C:\\Users\\abhay\\.spyder-py3\\table_extract.csv", "wt+", newline="") as f:
table_to_csv = csv.writer(f)
for row in all_rows:
row_data = []
for cell in row.findAll(["td", "th"]):
row_data.append(cell.get_text())
table_to_csv.writerow(row_data)
Below is the complete code to extract 1st table data to csv :
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
url_munafasutra = "https://munafasutra.com/nse/dividends"
html_munafasutra = urlopen(url_munafasutra)
soup = BeautifulSoup(html_munafasutra, 'html')
first_table = soup.find_all('table')[0]
all_rows = first_table.findAll("tr")
with open("C:\\Users\\abhay\\.spyder-py3\\table_extract.csv", "wt+", newline="") as f:
table_to_csv = csv.writer(f)
for row in all_rows:
row_data = []
for cell in row.findAll(["td", "th"]):
row_data.append(cell.get_text())
table_to_csv.writerow(row_data)

How to find the nth child heading and print the text using beautifulsoup in python

According to my code, I am able to get the First heading of Project and I want the subheading to be printed (FSI Details). Not able to get the second heading using beautifulsoup.I tried the reference for the nth-child
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
import os
url = 'https://maharerait.mahaonline.gov.in'
chrome_path = r'C:/Users/User/AppData/Local/Programs/Python/Python36/Scripts/chromedriver.exe'
driver = webdriver.Chrome(executable_path=chrome_path)
driver.get(url)
WebDriverWait(driver,
20).until(EC.element_to_be_clickable((By.XPATH,"//div[#class='search-
pro-details']//a[contains(.,'Search Project Details')]"))).click()
Registered_Project_radio= WebDriverWait(driver,
10).until(EC.element_to_be_clickable((By.ID,"Promoter")))
driver.execute_script("arguments[0].click();",Registered_Project_radio)
Application = driver.find_element_by_id("CertiNo")
Application.send_keys("P50500000005")
Search = WebDriverWait(driver,
10).until(EC.element_to_be_clickable((By.ID,"btnSearch")))
driver.execute_script("arguments[0].click();",Search)
View = [item.get_attribute('href') for item in
driver.find_elements_by_tag_name("a") if
item.get_attribute('href') is not None]
View = View[0]
driver.get(View)
request = urllib.request.Request(View)
html = urllib.request.urlopen(request).read()
soup = BeautifulSoup(html, 'html.parser')
divPInfo2 = soup.find("div", {"id": "DivProject"})
Project_title = divPInfo2.find("div", {'class': 'x_panel'},
recursive=False).find("div", {'class': 'x_title'}).find(
"h2").text.strip()
print(Project_title)
Project_title1 = divPInfo2.find("div", {'class': 'x_panel'},
recursive=False).find("div", {'class': 'x_title'}).find_all(
"h2")[1].text.strip()
print(Project_title1 ) # (FSI Detail) heading should be printed here
You can try CSS selector :contains("FSI Details"), which selects element containing string "FSI Details". This code prints labels and values of the "FSI Details" section:
import requests
from bs4 import BeautifulSoup
url = 'https://maharerait.mahaonline.gov.in/PrintPreview/PrintPreview?q=BPUvrrjIzYs%2f2hwYj1YIOfflh9NisZW6zTns2KLjHBZn6cbQ008s91nzlFrDxVvLwR1vAeLID0%2bo%2bD0H0Z6o2t%2b5P%2b%2fbBOcHCbMQHU8gkwdNZJnbbfu6N7mWSpgKXt4AiQyzuEpoDE7FX6HZypqsGXz4ObYD4KpyRzCsFJaWTgA%3d'
soup = BeautifulSoup(requests.get(url).text, 'lxml')
fsi_content = soup.select_one('.x_title:contains("FSI Details") + .x_content')
print('{: <160}{: <8}'.format('Label', 'Value'))
print('-' * 168)
for label, text in zip(fsi_content.select('label'), fsi_content.select('div:has(> label) + div')):
print('{: <160}{: <8}'.format(label.get_text(strip=True), text.get_text(strip=True)))
Prints:
Label Value
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Built-up-Area as per Proposed FSI (In sqmts) ( Proposed but not sanctioned) ( As soon as approved, should be immediately updated in Approved FSI) 0
Built-up-Area as per Approved FSI (In sqmts) 11566.50
TotalFSI 11566.50
Further reading:
CSS Selectors Refernece

To get the columns of the table also the first column containing link by clicking that link to get the data

I have the below link
http://www.igrmaharashtra.gov.in/eASR/eASRCommon.aspx?hDistName=Pune
In this i want to scrape data in proper format in excel.The SurveyNo link contains the data when it is click i want the row-wise data with the data on clicking the survey number.
Also want the format that i have attached in the image (desired output in excel)
import urllib.request
from bs4 import BeautifulSoup
import csv
import os
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
import time
url = 'http://www.igrmaharashtra.gov.in/eASR/eASRCommon.aspx?
hDistName=Pune'
chrome_path =r'C:/Users/User/AppData/Local/Programs/Python/Python36/Scripts/chromedriver.exe'
driver = webdriver.Chrome(executable_path=chrome_path)
driver.implicitly_wait(10)
driver.get(url)
Select(driver.find_element_by_name('ctl00$ContentPlaceHolder5$ddlTaluka')).select_by_value('5')
Select(driver.find_element_by_name('ctl00$ContentPlaceHolder5$ddlVillage')).select_by_value('1872')
soup=BeautifulSoup(driver.page_source, 'lxml')
table = soup.find("table" , attrs = {'id':'ctl00_ContentPlaceHolder5_grdUrbanSubZoneWiseRate' })
with open('Baner.csv', 'w',encoding='utf-16',newline='') as csvfile:
f = csv.writer(csvfile, dialect='excel')
f.writerow(['SurveyNo','Subdivision', 'Open ground', 'Resident house','Offices','Shops','Industrial','Unit (Rs./)']) # headers
rows = table.find_all('tr')[1:]
data=[]
for tr in rows:
cols = tr.find_all('td')
for td in cols:
links = driver.find_elements_by_link_text('SurveyNo')
l =len(links)
data12 =[]
for i in range(l):
newlinks = driver.find_elements_by_link_text('SurveyNo')
newlinks[i].click()
soup = BeautifulSoup(driver.page_source, 'lxml')
td1 = soup.find("textarea", attrs={'class': 'textbox'})
data12.append(td1.text)
data.append(td.text)
data.append(data12)
print(data)
Please find the image. In that format I required the output of scrape data.
You could do the following and simply re-arrange columns at end along with desired renaming. There is the assumption SurveyNo exists for all wanted rows. I extract the hrefs from the SurveyNo cells which are actually executable strings you can pass to execute_script to show the survey numbers without worrying about stale element etc....
from selenium import webdriver
import pandas as pd
url = 'http://www.igrmaharashtra.gov.in/eASR/eASRCommon.aspx?hDistName=Pune'
d = webdriver.Chrome()
d.get(url)
d.find_element_by_css_selector('[value="5"]').click()
d.find_element_by_css_selector('[value="1872"]').click()
tableElement = d.find_element_by_id('ctl00_ContentPlaceHolder5_grdUrbanSubZoneWiseRate')
table = pd.read_html(tableElement.get_attribute('outerHTML'))[0]
table.columns = table.iloc[0]
table = table.iloc[1:]
table = table[table.Select == 'SurveyNo'] #assumption SurveyNo exists for all wanted rows
surveyNo_scripts = [item.get_attribute('href') for item in d.find_elements_by_css_selector("#ctl00_ContentPlaceHolder5_grdUrbanSubZoneWiseRate [href*='Select$']")]
i = 0
for script in surveyNo_scripts:
d.execute_script(script)
surveys = d.find_element_by_css_selector('textarea').text
table.iloc[i]['Select'] = surveys
i+=1
print(table)
#rename and re-order columns as required
table.to_csv(r"C:\Users\User\Desktop\Data.csv", sep=',', encoding='utf-8-sig',index = False )
Output before rename and re-order:
In a loop you can concat all dfs and then write out in one go (my preference - shown here) or later append as shown here

Cannot extract the html table

I want to harvest information using beautiful soup and python3 from a table within a given website .
I have also tried to use XPath method but still cannot get a way to obtain the data.
coaches = 'https://www.badmintonengland.co.uk/coach/find-a-coach'
coachespage = urlopen(coaches)
soup = BeautifulSoup(coachespage,features="html.parser")
data = soup.find_all("tbody", { "id" : "JGrid-az-com-1031-tbody" })
def crawler(table):
for mytable in table:
try:
rows = mytable.find_all('tr')
for tr in rows:
cols = tr.find_all('td')
for td in cols:
return(td.text)
except:
raise ValueError("no data")
print(crawler(data))
If you use selenium to make the selections and then pd.read_html the page_source to get the table, this allows javascript to run and populate values
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
url = 'https://www.badmintonengland.co.uk/coach/find-a-coach'
driver = webdriver.Chrome()
driver.get(url)
ele = driver.find_element_by_css_selector('.az-triggers-panel a') #distance dropdown
driver.execute_script("arguments[0].scrollIntoView();", ele)
ele.click()
option = WebDriverWait(driver,10).until(EC.presence_of_element_located((By.ID, "comboOption-az-com-1015-8"))) # any distance
option.click()
driver.find_element_by_css_selector('.az-btn-text').click()
time.sleep(5) #seek better wait condition for page update
tables = pd.read_html(driver.page_source)

How do I get the stock symbol by Selenium?

I am trying to grab the stock symbol from this page.
This is my code:
from selenium import webdriver
import pandas as pd
url = 'https://stock360.hkej.com/StockScreener/profession/tab/profile'
browser = webdriver.Chrome('C:/chromedriver_win32/chromedriver.exe')
browser.get(url)
dfs = pd.read_html(browser.page_source)
print(dfs)
browser.close()
This is the output:
dfs
[ 0
0 加入至心水組合:請先登入或註冊成為會員, Empty DataFrame
Columns: [沒有符合以上篩選條件的股票。]
Index: [], 0
0 加入至心水組合:請先登入或註冊成為會員]
I know it's javascript and I used Selenium already. How come I can't get the table? And how do I get the stock symbol in the page as shown below? Thanks.
Additonal info: After clicking the link, choose the 2nd one from the GREEN drop-down list, then the above table will be shown.
One way is as follows
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
url = 'https://stock360.hkej.com/StockScreener/profession/tab/profile'
driver = webdriver.Chrome()
driver.get(url)
WebDriverWait(driver,10).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'option')))
# select the second dropdown option by its value attribute whose value is mb
driver.find_element_by_css_selector('[value=mb]').click()
#wait for blue button to be clickable and click
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, '[href*=submit]'))).click()
#select table
table = driver.find_element_by_css_selector('.dt960')
#transfer html of table to pandas read_html which handles tables
df = pd.read_html(table.get_attribute('outerHTML'))[0] #grab the table
df2 = df.drop(df.columns[0], axis=1).dropna(how='all') #lose the nan column and rows
df2.rename(columns=df.iloc[0], inplace = True) #set headers same as row 1
df2.drop(df.index[0], inplace = True) #lose row 1
df2.reset_index(drop=True) #re-index
print(df2)
driver.quit()

Categories

Resources