I am trying to scrape multiple pages with selenium but they will scrape only 1 page what mistake I will do is there any solution then provide us this is the page link https://zoekeenadvocaat.advocatenorde.nl/zoeken?q=&type=advocaten&limiet=10&sortering=afstand&filters%5Brechtsgebieden%5D=%5B%5D&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D%5Blat%5D=52.132633&locatie%5Bgeo%5D%5Blng%5D=5.291266&locatie%5Bstraal%5D=56&locatie%5Bhash%5D=67eb2b8d0aab60ec69666532ff9527c9&weergave=lijst&pagina=1
import time
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
chrome_driver = webdriver.Chrome(
service=Service(ChromeDriverManager().install()),
options=options
)
def supplyvan_scraper():
with chrome_driver as driver:
driver.implicitly_wait(15)
URL = 'https://zoekeenadvocaat.advocatenorde.nl/zoeken?qvrtqca=&filters%5Brechtsgebieden%5D=%5B%5D&ypb=&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D=%7B%22lat%22%3A%2252.132633%22%2C%22lng%22%3A%225.291266%22%7D&locatie%5Bstraal%5D=56&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Bhash%5D='
driver.get(URL)
time.sleep(3)
page=1
page_links = [element.get_attribute('href') for element in
driver.find_elements(By.XPATH, "//span[#class='h4 no-margin-bottom']//a")]
data=[]
for link in page_links:
wev={}
driver.get(link)
time.sleep(2)
try:
title = driver.find_element(By.CSS_SELECTOR, '.title h3').text
except:
pass
wev['title']=title
try:
advocaten=driver.find_element(By.CSS_SELECTOR,".secondary").text
except:
pass
wev['advocaten']=advocaten
details=driver.find_elements(By.XPATH,"//section[#class='lawyer-info']")
for detail in details:
try:
address=detail.find_element_by_xpath("//div[#class='column medium-6']").text.strip()
except:
pass
wev['address']=address
try:
email=detail.find_element(By.XPATH, "//div[#class='row'][3]//div[#class='column small-9']//a").get_attribute('href')
except:
pass
wev['email']=email
try:
website=detail.find_element(By.XPATH, "//div[#class='row'][4]//div[#class='column small-9']//a").get_attribute('href')
except:
pass
wev['website']=website
data.append(wev)
if len(driver.find_elements_by_xpath("//a[#class='button next']")) > 0:
url = "https://zoekeenadvocaat.advocatenorde.nl/zoeken?q=&type=advocaten&limiet=10&sortering=afstand&filters%5Brechtsgebieden%5D=%5B%5D&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D%5Blat%5D=52.132633&locatie%5Bgeo%5D%5Blng%5D=5.291266&locatie%5Bstraal%5D=56&locatie%5Bhash%5D=67eb2b8d0aab60ec69666532ff9527c9&weergave=lijst&pagina={}".format(page)
driver.get(url)
page += 1
if int(page)>5:
break
else:
break
df=pd.DataFrame(data)
print(df)
You can make the pagination in starting url using for loop as follows:
import time
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
chrome_driver = webdriver.Chrome(
service=Service(ChromeDriverManager().install()),
options=options)
data=[]
def supplyvan_scraper():
with chrome_driver as driver:
driver.implicitly_wait(15)
URL = 'https://zoekeenadvocaat.advocatenorde.nl/zoeken?q=&type=advocaten&limiet=10&sortering=afstand&filters%5Brechtsgebieden%5D=%5B%5D&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D%5Blat%5D=52.132633&locatie%5Bgeo%5D%5Blng%5D=5.291266&locatie%5Bstraal%5D=56&locatie%5Bhash%5D=67eb2b8d0aab60ec69666532ff9527c9&weergave=lijst&pagina={page}'
for page in range(1,11):
driver.get(URL.format(page=page))
time.sleep(3)
page_links = [element.get_attribute('href') for element in driver.find_elements(By.XPATH, "//span[#class='h4 no-margin-bottom']//a")]
for link in page_links:
wev={}
driver.get(link)
time.sleep(2)
try:
title = driver.find_element(By.CSS_SELECTOR, '.title h3').text
except:
pass
wev['title']=title
try:
advocaten=driver.find_element(By.CSS_SELECTOR,".secondary").text
except:
pass
wev['advocaten']=advocaten
details=driver.find_elements(By.XPATH,"//section[#class='lawyer-info']")
for detail in details:
try:
address=detail.find_element_by_xpath("//div[#class='column medium-6']").text.strip()
except:
pass
wev['address']=address
try:
email=detail.find_element(By.XPATH, "//div[#class='row'][3]//div[#class='column small-9']//a").get_attribute('href')
except:
pass
wev['email']=email
try:
website=detail.find_element(By.XPATH, "//div[#class='row'][4]//div[#class='column small-9']//a").get_attribute('href')
except:
pass
wev['website']=website
data.append(wev)
df=pd.DataFrame(data)
print(df)
You also can try:
URL = 'https://zoekeenadvocaat.advocatenorde.nl/zoeken?q=&type=advocaten&limiet=10&sortering=afstand&filters%5Brechtsgebieden%5D=%5B%5D&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D%5Blat%5D=52.132633&locatie%5Bgeo%5D%5Blng%5D=5.291266&locatie%5Bstraal%5D=56&locatie%5Bhash%5D=67eb2b8d0aab60ec69666532ff9527c9&weergave=lijst&pagina={page}'
for page in range(1,11):
url=URL.format(page=page)
driver.get(url)
Related
I need get data from website: https://www.eex.com/en/market-data/natural-gas/spot
But Changing the date in my script not working
I need get data from every available date, so i need change the date with Selenium
PLS Help. I'm new in Python
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup as bs
import pandas as pd
from selenium_stealth import stealth
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
url ="https://www.eex.com/en/market-data/natural-gas/spot"
chrome_options = Options()
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--headless")
chrome_options.add_argument("start-maximized")
browser = webdriver.Chrome(executable_path="chromedriver1/chromedriver", options=chrome_options)
browser.get("https://www.eex.com/en/market-data/natural-gas/spot")
time.sleep(10)
date_picker = WebDriverWait(browser, 20).until(EC.visibility_of_element_located((By.XPATH, '//*[#id="symbolheader_ngs"]/div/div/div/input')))
date_picker.send_keys("2023-01-23")
time.sleep(20)
page_source = browser.page_source
s = bs(page_source)
table = s.select('table')[1]
final_list = []
for row in table.select('tr'):
final_list.append([x.text for x in row.find_all(['td', 'th'])])
final_df = pd.DataFrame(final_list[2:], columns = final_list[:1])
final_df.columns = ['Spot', 'Last Price', 'Last Volume', 'End of Day Index', 'Volume Exchange','del']
df=final_df.drop('del',axis=1)
browser.quit()
df.to_excel('final_df.xlsx', index = False)
You need to clear the input -> enter date -> push enter. You also want to wait for the clickability not the visibility of the element. Lastly you need to pick a date that has actual data.
from selenium.webdriver.common.keys import Keys
date_picker = WebDriverWait(browser, 30).until(
EC.element_to_be_clickable((By.XPATH, """//*[#id="symbolheader_ngs"]//*/input"""))
)
date_picker.clear()
date_picker.send_keys("2023-01-20")
date_picker.send_keys(Keys.ENTER)
If you comment out chrome_options.add_argument("--headless") you will see.
Try to scrape the data but data are overwrite and they will give the data of only 2 page in the csv file kindly recommend any solution for that I an waiting for your response How can I fix this? is there any way then suggest me I think due to for loop they overwrite data Thank you.these is the page link https://www.askgamblers.com/online-casinos/countries/uk/
from selenium import webdriver
import time
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import pandas as pd
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from csv import writer
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
wait = WebDriverWait(driver, 20)
for page in range(1,3):
URL = 'https://www.askgamblers.com/online-casinos/countries/uk/{page}'.format(page=page)
driver.get(URL)
time.sleep(2)
urls= []
data = []
page_links =driver.find_elements(By.XPATH, "//div[#class='card__desc']//a[starts-with(#href, '/online')]")
for link in page_links:
href=link.get_attribute("href")
urls.append(href)
product=[]
for url in urls:
wev={}
driver.get(url)
time.sleep(1)
try:
title=driver.find_element(By.CSS_SELECTOR,"h1.review-intro__title").text
except:
pass
wev['Title']=title
soup = BeautifulSoup(driver.page_source,"lxml")
pays=soup.select("div#tabPayments")
for pay in pays:
try:
t1=pay.select_one(".review-details-wrapper:nth-child(1) .review-details__item:nth-child(1) .review-details__text").get_text(' ',strip=True)
except:
pass
wev['deposit_method']=t1
try:
t2=pay.select_one(".review-details-wrapper:nth-child(1) .review-details__item+ .review-details__item .review-details__text").get_text(' ',strip=True)
except:
pass
wev['curriences']=t2
try:
t3=pay.select_one(" .review-details-wrapper+ .review-details-wrapper .review-details__item:nth-child(1) .review-details__text").get_text(' ',strip=True)
except:
pass
wev['with_drawl method']=t3
try:
t4 = pay.select_one(" .review-details-wrapper+ .review-details-wrapper .review-details__item:nth-child(2) .review-details__text")
t4 = [i.replace("\n", "") for i in t4 if i.text]
except:
pass
wev['with_drawl_time']=t4
product.append(wev)
df=pd.DataFrame(product)
df.to_csv('casino.csv')
All result in 1 file :
from selenium import webdriver
import time
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import pandas as pd
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from csv import writer
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
wait = WebDriverWait(driver, 20)
product=[]
for page in range(1,4):
URL = 'https://www.askgamblers.com/online-casinos/countries/uk/{page}'.format(page=page)
driver.get(URL)
time.sleep(2)
urls= []
data = []
page_links =driver.find_elements(By.XPATH, "//div[#class='card__desc']//a[starts-with(#href, '/online')]")
for link in page_links:
href=link.get_attribute("href")
urls.append(href)
for url in urls:
wev={}
driver.get(url)
time.sleep(1)
try:
title=driver.find_element(By.CSS_SELECTOR,"h1.review-intro__title").text
except:
pass
wev['Title']=title
soup = BeautifulSoup(driver.page_source,"lxml")
pays=soup.select("div#tabPayments")
for pay in pays:
try:
t1=pay.select_one(".review-details-wrapper:nth-child(1) .review-details__item:nth-child(1) .review-details__text").get_text(' ',strip=True)
except:
pass
wev['deposit_method']=t1
try:
t2=pay.select_one(".review-details-wrapper:nth-child(1) .review-details__item+ .review-details__item .review-details__text").get_text(' ',strip=True)
except:
pass
wev['curriences']=t2
try:
t3=pay.select_one(" .review-details-wrapper+ .review-details-wrapper .review-details__item:nth-child(1) .review-details__text").get_text(' ',strip=True)
except:
pass
wev['with_drawl method']=t3
try:
t4 = pay.select_one(" .review-details-wrapper+ .review-details-wrapper .review-details__item:nth-child(2) .review-details__text")
t4 = [i.replace("\n", "") for i in t4 if i.text]
except:
pass
wev['with_drawl_time']=t4
product.append(wev)
df=pd.DataFrame(product)
df.to_csv('casino.csv')
In first loop its running only 2 times :
Change it to 1,4 as below then it will give you [1,2,3]:
for page in range(1,4):
Then data getting overwritten because output file name is same:
change file name as below:
df.to_csv(f'casino_{page}.csv')
Problem Here I don't know why I am getting these errors on this line wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#typeahead-input-control-35 .up-menu-item-text"))).click() even I am not getting any error If I am running this code in a separate file, But when I merge this code to my main code this line not worked.
Problem line wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#typeahead-input-control-35 .up-menu-item-text"))).click()
Code that works fine if run in a separate file
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('F:\\work\\chromedriver_win32\\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
url = "https://www.upwork.com/nx/jobs/search/?sort=recency"
driver.get(url)
keys = ["Web Scraping", "Selenium Webdriver", "Data Scraping", "Selenium", "Beautiful Soup", "Scrapy", "Data Extraction", "Automation"]
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))
time.sleep(5)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(#title,"Advanced Search")]'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH,'//input[contains(#aria-labelledby,"tokenizer-label")]'))).clear()
wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(#aria-labelledby,"tokenizer-label")]'))).click()
time.sleep(3)
for i in range(len(keys)):
search_field = wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(#aria-labelledby,"tokenizer-label")]')))
search_field.click()
for character in keys[i]:
search_field.send_keys(character)
time.sleep(0.05)
time.sleep(2)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#typeahead-input-control-35 .up-menu-item-text"))).click()
time.sleep(2)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'[data-test="modal-advanced-search-search-btn"]'))).click()
line not work in this code
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.common.proxy import Proxy, ProxyType
from fake_useragent import UserAgent
import pyttsx3
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def main():
options = Options()
service = Service('F:\\work\\chromedriver_win32\\chromedriver.exe')
options.add_argument("start-maximized")
options.add_argument('--disable-blink-features=AutomationControlled') #Adding the argument
options.add_experimental_option("excludeSwitches",["enable-automation"])#Disable chrome contrlled message (Exclude the collection of enable-automation switches)
options.add_experimental_option('useAutomationExtension', False) #Turn-off useAutomationExtension
options.add_experimental_option('useAutomationExtension', False) #Turn-off useAutomationExtension
prefs = {"credentials_enable_service": False,
"profile.password_manager_enabled": False}
options.add_experimental_option("prefs", prefs)
ua = UserAgent()
userAgent = ua.random
options.add_argument(f'user-agent={userAgent}')
driver = webdriver.Chrome(service=service , options=options)
wait = WebDriverWait(driver, 10)
url = 'https://www.upwork.com/nx/jobs/search/?sort=recency'
driver.get(url)
time.sleep(7)
logbtn = driver.find_element(By.XPATH,'//a[contains(#class,"nav-item login-link d-none d-lg-block px-20")]')
logbtn.click()
time.sleep(7)
pop_one = driver.find_element(By.XPATH, '//*[#id="onetrust-accept-btn-handler"]')
pop_one.click()
time.sleep(7)
search_box = driver.find_element(By.NAME, "login[username]")
search_box.send_keys('my_mailaccountid#gmail.com')
time.sleep(7)
Login_button = driver.find_element(By.ID, "login_password_continue")
Login_button.submit()
time.sleep(7)
pass_box = driver.find_element(By.ID, "login_password")
pass_box.send_keys('myupworkpassword000')
Login_btn = driver.find_element(By.ID, "login_control_continue")
Login_btn.submit()
time.sleep(7)
closebtn = driver.find_element(By.XPATH, '//*[#id="main"]/div/div/aside/div/div[1]/div[1]/section/div[2]/div[2]/div/div/div/div[3]/div/div[2]/div[2]/div[2]/div[3]/div/button')
closebtn.click()
url = 'https://www.upwork.com/nx/jobs/search/?sort=recency'
driver.get(url)
time.sleep(7)
for i in range(0,20):
box1 = driver.find_element(By.XPATH,'//span[contains(text(),"Intermediate")]')
box1.click()
time.sleep (3)
box2 = driver.find_element(By.XPATH,'//span[contains(text(),"Expert")]')
box2.click()
time.sleep (3)
box3 = driver.find_element(By.XPATH,'//span[contains(text(),"Less than 5")]')
box3.click()
time.sleep (3)
box4 = driver.find_element(By.XPATH,'//span[contains(text(),"5 to 10")]')
box4.click()
time.sleep (3)
box5 = driver.find_element(By.XPATH,'//span[contains(text(),"Payment verified")]')
box5.click()
time.sleep (7)
cat1 = driver.find_element(By.XPATH,'//strong[contains(text(),"Job type")]')
cat1.click()
time.sleep (7)
box6 = driver.find_element(By.XPATH,'//span[contains(text(),"Hourly")]')
box6.click()
time.sleep (3)
box7 = driver.find_element(By.XPATH,'//span[contains(text(),"Less than $100")]')
box7.click()
time.sleep (3)
box8 = driver.find_element(By.XPATH,'//span[contains(text(),"$100 to $500")]')
box8.click()
time.sleep (3)
keys = ["Web Scraping", "Selenium Webdriver", "Data Scraping", "Selenium", "Beautiful Soup", "Scrapy", "Data Extraction", "Automation"]
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(#title,"Advanced Search")]'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH,'//input[contains(#aria-labelledby,"tokenizer-label")]'))).clear()
wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(#aria-labelledby,"tokenizer-label")]'))).click()
time.sleep(3)
for i in range(len(keys)):
search_field = wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(#aria-labelledby,"tokenizer-label")]')))
search_field.click()
for character in keys[i]:
search_field.send_keys(character)
time.sleep(0.05)
time.sleep(7)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#typeahead-input-control-35 .up-menu-item-text"))).click()
time.sleep(7)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'[data-test="modal-advanced-search-search-btn"]'))).click()
main()
I need to improve this script to extract daily data from this site. However, I am not getting any data except for the "Spot" column!
Thanks for the help!
UPD. Now i can't change the date(
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup as bs
import pandas as pd
from selenium_stealth import stealth
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
url ="https://www.eex.com/en/market-data/natural-gas/spot"
chrome_options = Options()
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--headless")
chrome_options.add_argument("start-maximized")
browser = webdriver.Chrome(executable_path="chromedriver1/chromedriver", options=chrome_options)
browser.get("https://www.eex.com/en/market-data/natural-gas/spot")
time.sleep(10)
date_picker = WebDriverWait(browser, 20).until(EC.visibility_of_element_located((By.XPATH, '//*[#id="symbolheader_ngs"]/div/div/div/input')))
date_picker.send_keys("2023-01-23")
time.sleep(20)
page_source = browser.page_source
s = bs(page_source)
table = s.select('table')[1]
final_list = []
for row in table.select('tr'):
final_list.append([x.text for x in row.find_all(['td', 'th'])])
final_df = pd.DataFrame(final_list[2:], columns = final_list[:1])
final_df.columns = ['Spot', 'Last Price', 'Last Volume', 'End of Day Index', 'Volume Exchange','del']
df=final_df.drop('del',axis=1)
browser.quit()
df.to_excel('final_df.xlsx', index = False)
little tweaks so that all columns can be extracted. main idea is that extract logic need to be checked with how HTML dom is.
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup as bs
import pandas as pd
def get_df(page_source):
soup = bs(page_source, 'html.parser')
table = soup.select('table')[1]
table_header=table.find("tr", {"class": "mv-quote-header-row"})
table_body=table.select('tbody')
result={}
for e_header in table_header.find_all('th'):
if e_header.text:
result[e_header.text]=[]
for e_r in table_body[0].find_all('tr'):
r1=[e.text for e in e_r.find_all('td',{'class':not ['mv-quote-button']})]
result['Spot'].append(r1[0])
result['Last Price'].append(r1[1])
result['Last Volume'].append(r1[2])
result['End of Day Index'].append(r1[3])
result['Volume Exchange'].append(r1[4])
#result
df=pd.DataFrame(result)
return df
chrome_options = Options()
chrome_options.add_argument("--no-sandbox")
#chrome_options.add_argument("--headless")
chrome_options.add_argument("start-maximized")
webdriver_service = Service("chromedriver/chromedriver") ## path to where you saved chromedriver binary
#webdriver_service = Service()
browser = webdriver.Chrome(service=webdriver_service, options=chrome_options)
browser.get("https://www.eex.com/en/market-data/natural-gas/spot")
#soup = BeautifulSoup(browser.page_source, 'html5lib')
page_source=browser.page_source
#table = soup.select('table')[1]
final_df=get_df(browser.page_source)
browser.quit()
final_df.to_excel('final_df.xlsx', index = False)
categories = driver.find_elements(By.XPATH, '//div[starts-with(#class, "item-1EEezFCx")]')
for category in categories:
try:
text = driver.find_element(By.XPATH, '//div[#text()="{category.text}"').click()
print(text)
time.sleep(2)
except ElementNotInteractableException:
pass
Here I have categories as the holder of WebElements with class names all starting with item-1EEezFCx. For each iteration I would like to access the text element of the WebElement, print and click. What can you do to access the text element please?
Full code (edited):
import os
import time
import selenium.webdriver as webdriver
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException, NoSuchElementException, ElementNotInteractableException
from bs4 import BeautifulSoup
import pandas as pd
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:100.0) Gecko/20100101 Firefox/100.0'
path = "C:\\"
FireFoxDriverPath = os.path.join(path, 'Python39', 'geckodriver.exe')
FireFoxProfile = r'C:\Users\username\AppData\Roaming\Mozilla\Firefox\Profiles\ltk7fdt2.default'
options = Options()
options.set_preference('profile', FireFoxProfile)
service = Service(r'C:\Python39\geckodriver.exe')
driver = Firefox(service=service, options=options)
url = "https://www.tradingview.com/markets/cryptocurrencies/prices-all/"
driver.get(url)
# Step 1. Toggle the active currency
currency = 'USD'
active_currency = driver.find_element(By.XPATH, '//span[contains(#class, "modeTitleActive-bJ0BPoV3")]')
if active_currency.text == currency:
pass
else:
driver.find_element(By.XPATH, '//input[#type="checkbox")]').click()
# Step 2. Import tables
xlwriter = pd.ExcelWriter('TradingView Crypto Prices.xlsx')
categories = driver.find_elements(By.XPATH, '//div[starts-with(#class, "item-1EEezFCx")]')
# Load columns one by one
for category in categories:
try:
driver.find_element(By.XPATH, category).text()
time.sleep(2)
except ElementNotInteractableException:
pass
load_more = True
while load_more:
try:
driver.find_element(By.CLASS_NAME, 'tv-load-more__btn').click()
time.sleep(1)
except ElementNotInteractableException:
load_more = False
df = pd.read_html(driver.page_source)[0]
df.to_excel(xlwriter, sheet_name=category.text, index=False)
xlwriter.save()
driver.quit()
for category in categories:
print(category.text)
category.click()