StaleElementReferenceException in python selenium - python

I am trying to count how many time "Load More Reviews" option is clicked from this site. But I am getting the following error:
selenium.common.exceptions.StaleElementReferenceException: Message: stale element reference: element is not attached to the page document
Here is my python code:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument("--disable-notifications")
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
url = "https://www.justdial.com/Delhi/S-K-Premium-Par-Hari-Nagar/011PXX11-XX11-131128122154-B8G6_BZDET"
driver.get(url)
pop_up = WebDriverWait(driver, 30).until(
EC.element_to_be_clickable((By.XPATH, '//*[#id="best_deal_detail_div"]/section/span')))
pop_up.click() # For disable pop-up
count = 0
while True:
element = WebDriverWait(driver, 20).until(
EC.element_to_be_clickable((By.XPATH, "//span[text()='Load More Reviews..']")))
element.click()
count = count + 1
print(count)

Try below code:
count = 0
while True:
try:
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//span[text()='Load More Reviews..']"))).click()
count = count + 1
except StaleElementReferenceException:
pass
except TimeoutException:
break
print(count)
Issue: As per your code you are waiting for Load More Reviews button to be clickable, now once its clicked and even before page has finished loading its detecting if button is there and clickable, but when its try to click , by that time page is still in process of refresing / loading more reviews. As a result HTML DOM is disrupted/refreshed and stale element exception comes.
Also as there is no break condition in your code, i have added one. If there is no Load More Reviews button on page. It will break out of loop.

Related

ElementClickInterceptedException Error Selenium

I keep getting the ElementClickInterceptedException on this script I'm writing, I'm supposed to click a link that will open a new window, scrape from the new window and close it and move to the next link to scrape, but it just won't work, it gives the error after max 3 link clicks. I saw a similar question here and I tried using wait.until(EC.element_to_be_clickable()) and also maximized my screen but still did not work for me. Here is the site I am scraping from trying to scrape all the games for each day and here is a chunk of the code I'm using
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.common.exceptions import TimeoutException, NoSuchElementException, ElementNotInteractableException, StaleElementReferenceException
from time import sleep
l = "https://www.flashscore.com/"
options = FirefoxOptions()
#options.add_argument("--headless")
driver = webdriver.Firefox(executable_path="geckodriver.exe",
firefox_options=options)
driver.install_addon('C:\\Windows\\adblock_plus-3.10.1-an+fx.xpi')
driver.maximize_window()
driver.get(l)
driver.implicitly_wait(5)
cnt = 0
sleep(5)
wait = WebDriverWait(driver, 20)
a = driver.window_handles[0]
b = driver.window_handles[1]
driver.switch_to.window(a)
# Close Adblock tab
if 'Adblock' in driver.title:
driver.close()
driver.switch_to.window(a)
else:
driver.switch_to.window(b)
driver.close()
driver.switch_to.window(a)
var1 = driver.find_elements_by_xpath("//div[#class='leagues--live ']/div/div")
knt = 0
for i in range(len(var1)):
if (var1[i].get_attribute("id")):
knt += 1
#sleep(2)
#driver.switch_to.window(driver.window_handles)
var1[i].click()
sleep(2)
#var2 = wait.until(EC.visibility_of_element_located((By.XPATH, "//div[contains(#classs, 'event__match event__match--last event__match--twoLine')]")))
print(len(driver.window_handles))
driver.switch_to.window(driver.window_handles[1])
try:
sleep(4)
driver.close()
driver.switch_to.window(a)
#sleep(3)
except(Exception):
print("Exception caught")
#WebDriverWait(driver, 30).until(EC.visibility_of_element_located((By.CLASS_NAME, "event__match event__match--last event__match--twoLine")))
sleep(10)
driver.close()
Any ideas to help please.
It looks like the element you are trying to click on is covered by a banner ad or something else like a cookie message.
To fix this you can scroll down to the last element using the following code:
driver.execute_script('\
let items = document.querySelectorAll(\'div[title="Click for match detail!"]\'); \
items[items.length - 1].scrollIntoView();'
)
Add it before clicking on the desired element in the loop.
I tried to make a working example for you but it works on chromedriver not gecodriver:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
# options.add_argument("--headless")
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
service = Service(executable_path='your\path\to\chromedriver.exe')
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 5)
url = 'https://www.flashscore.com/'
driver.get(url)
# accept cookies
wait.until(EC.presence_of_element_located((By.ID, 'onetrust-accept-btn-handler'))).click()
matches = driver.find_elements(By.CSS_SELECTOR, 'div[title="Click for match detail!"]')
for match in matches:
driver.execute_script('\
let items = document.querySelectorAll(\'div[title="Click for match detail!"]\'); \
items[items.length - 1].scrollIntoView();'
)
match.click()
driver.switch_to.window(driver.window_handles[1])
print('get data from open page')
driver.close()
driver.switch_to.window(driver.window_handles[0])
driver.quit()
It works in both normal and headless mode

Python Selenium iterate table of links clicking each link

So this question has been asked before but I am still struggling to get it working.
The webpage has a table with links, I want to iterate through clicking each of the links.
So this is my code so far
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(executable_path=r'C:\Users\my_path\chromedriver_96.exe')
driver.get(r"https://www.fidelity.co.uk/shares/ftse-350/")
try:
element = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.CLASS_NAME, "table-scroll")))
table = element.find_elements_by_xpath("//table//tbody/tr")
for row in table[1:]:
print(row.get_attribute('innerHTML'))
# link.click()
finally:
driver.close()
Sample of output
<td>FOUR</td>
<td>4imprint Group plc</td>
<td>Media & Publishing</td>
<td>888</td>
<td>888 Holdings</td>
<td>Hotels & Entertainment Services</td>
<td>ASL</td>
<td>Aberforth Smaller Companies Trust</td>
<td>Collective Investments</td>
How do a click the href and iterate to the next href?
Many thanks.
edit
I went with this solution (a few small tweaks on Prophet's solution)
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
from selenium.webdriver.common.action_chains import ActionChains
driver = webdriver.Chrome(executable_path=r'C:\Users\my_path\chromedriver_96.exe')
driver.get(r"https://www.fidelity.co.uk/shares/ftse-350/")
actions = ActionChains(driver)
#close the cookies banner
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.ID, "ensCloseBanner"))).click()
#wait for the first link in the table
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table//tbody/tr/td/a")))
#extra wait to make all the links loaded
time.sleep(1)
#get the total links amount
links = driver.find_elements_by_xpath('//table//tbody/tr/td/a')
for index, val in enumerate(links):
try:
#get the links again after getting back to the initial page in the loop
links = driver.find_elements_by_xpath('//table//tbody/tr/td/a')
#scroll to the n-th link, it may be out of the initially visible area
actions.move_to_element(links[index]).perform()
links[index].click()
#scrape the data on the new page and get back with the following command
driver.execute_script("window.history.go(-1)") #you can alternatevely use this as well: driver.back()
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table//tbody/tr/td/a")))
time.sleep(2)
except StaleElementReferenceException:
pass
To perform what you want to do here you first need to close cookies banner on the bottom of the page.
Then you can iterate over the links in the table.
Since by clicking on each link you are opening a new page, after scaring the data there you will have to get back to the main page and get the next link. You can not just get all the links into some list and then iterate over that list since by navigating to another web page all the existing elements grabbed by Selenium on the initial page become Stale.
Your code can be something like this:
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
driver = webdriver.Chrome(executable_path=r'C:\Users\my_path\chromedriver_96.exe')
driver.get(r"https://www.fidelity.co.uk/shares/ftse-350/")
actions = ActionChains(driver)
#close the cookies banner
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.ID, "ensCloseBanner"))).click()
#wait for the first link in the table
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table//tbody/tr/td/a")))
#extra wait to make all the links loaded
time.sleep(1)
#get the total links amount
links = driver.find_elements_by_xpath('//table//tbody/tr/td/a')
for index, val in enumerate(links):
#get the links again after getting back to the initial page in the loop
links = driver.find_elements_by_xpath('//table//tbody/tr/td/a')
#scroll to the n-th link, it may be out of the initially visible area
actions.move_to_element(links[index]).perform()
links[index].click()
#scrape the data on the new page and get back with the following command
driver.execute_script("window.history.go(-1)") #you can alternatevely use this as well: driver.back()
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table//tbody/tr/td/a")))
time.sleep(1)
You basically have to do the following:
Click on the cookies button if available
Get all the links on the page.
Iterate over the list of links and then click on the first (by first scrolling to the web element and doing that for the list item) and then navigate back to the original screen.
Code:
driver = webdriver.Chrome(driver_path)
driver.maximize_window()
wait = WebDriverWait(driver, 30)
driver.get("https://www.fidelity.co.uk/shares/ftse-350/")
try:
wait.until(EC.element_to_be_clickable((By.ID, "ensCloseBanner"))).click()
print('Click on the cookies button')
except:
print('Could not click on the cookies button')
pass
driver.execute_script("window.scrollTo(0, 750)")
try:
all_links = wait.until(EC.presence_of_all_elements_located((By.XPATH, "//table//tbody/tr/td/a")))
print("We have got to deal with", len(all_links), 'links')
j = 0
for link in range(len(all_links)):
links = wait.until(EC.presence_of_all_elements_located((By.XPATH, f"//table//tbody/tr/td/a")))
driver.execute_script("arguments[0].scrollIntoView(true);", links[j])
time.sleep(1)
links[j].click()
# here write the code to scrape something once the click is performed
time.sleep(1)
driver.execute_script("window.history.go(-1)")
j = j + 1
print(j)
except:
print('Bot Could not exceute all the links properly')
pass
Import:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
PS to handle stale element reference you'd have to define the list of web elements again inside the loop.

How to click on the Next button with doPostBack() call to browse to the next page while fetching data with Selenium and Python?

I have written this code but its not going on the next page its fetching data from the same page repeatedly.
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver import ActionChains
url="http://www.4docsearch.com/Delhi/Doctors"
driver = webdriver.Chrome(r'C:\chromedriver.exe')
driver.get(url)
next_page = True
while next_page == True:
soup = BeautifulSoup(driver.page_source, 'html.parser')
div = soup.find('div',{"id":"ContentPlaceHolder1_divResult"})
for heads in div.find_all('h2'):
links = heads.find('a')
print(links['href'])
try:
driver.find_element_by_xpath("""//* [#id="ContentPlaceHolder1_lnkNext"]""").click()
except:
print ('No more pages')
next_page=False
driver.close()
To browse to the Next page as the desired element is a JavaScript enabled element with __doPostBack() you have to:
Induce WebDriverWait for the staleness_of() the element first.
Induce WebDriverWait for the element_to_be_clickable() the element next.
You can use the following Locator Strategies:
Code Block:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("start-maximized")
driver = webdriver.Chrome(options=chrome_options, executable_path=r'C:\Utility\BrowserDrivers\chromedriver.exe')
driver.get("http://www.4docsearch.com/Delhi/Doctors")
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//a[#id='ContentPlaceHolder1_lnkNext' and not(#class='aspNetDisabled')]"))).click()
while True:
try:
WebDriverWait(driver, 20).until(EC.staleness_of((driver.find_element_by_xpath("//a[#id='ContentPlaceHolder1_lnkNext' and not(#class='aspNetDisabled')]"))))
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//a[#id='ContentPlaceHolder1_lnkNext' and not(#class='aspNetDisabled')]"))).click()
print ("Next")
except:
print ("No more pages")
break
print ("Exiting")
driver.quit()
Console Output
Next
Next
Next
.
.
.
No more pages
Exiting

Unable to click on 'more' button cyclically to get all the full reviews

I've created a script in python in combination with selenium to fetch all the reviews from a certain page of google maps. There are lots of reviews in that page and they are only visible once that page is made to scroll downward. My script can do all of them successfully.
However, the only issue that I'm facing at this moment is that some of the reviews have More button which is meant to click in order to show the full review.
One of such is this:
website address
I've tried with:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = "https://www.google.com/maps/place/Pizzeria+Di+Matteo/#40.8512552,14.255779,17z/data=!4m7!3m6!1s0x133b0841ef6e38e5:0xece6ea09987e9baf!8m2!3d40.8512512!4d14.2579677!9m1!1b1"
driver = webdriver.Chrome()
driver.get(link)
wait = WebDriverWait(driver,10)
while True:
try:
elem = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "[class='section-loading-spinner']")))
driver.execute_script("arguments[0].scrollIntoView();",elem)
except Exception:
break
for see_more in wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "button[class^='section-expand-review']"))):
see_more.click()
for item in wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, ".section-review-content"))):
name = item.find_element_by_css_selector("[class='section-review-title'] > span").text
try:
review = item.find_element_by_css_selector("[class='section-review-text']").text
except AttributeError:
review = ""
print(name)
driver.quit()
Currently the above script throws stale element error when it hits this line for see_more in wait.until().click().
How can I click on that More button cyclically to get all the full reviews?
If use WebdriverWait and presence_of_all_elements_located it wait for search the element in given time and if it is not attached to the html you will receive error.
However Check the length of element present in webpage if there then click on the element.
if len(driver.find_elements_by_css_selector("button[class^='section-expand-review']"))>0:
driver.find_element_by_css_selector("button[class^='section-expand-review']").click()
Here is the code.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = "https://www.google.com/maps/place/Ecstasy/#23.7399982,90.3732109,17z/data=!3m1!4b1!4m7!3m6!1s0x3755b8caa669d5e3:0x41f47ddcc39a556e!8m2!3d23.7399933!4d90.3753996!9m1!1b1"
driver = webdriver.Chrome()
driver.get(link)
wait = WebDriverWait(driver,10)
while True:
try:
elem = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "[class='section-loading-spinner']")))
driver.execute_script("arguments[0].scrollIntoView();",elem)
except Exception:
break
if len(driver.find_elements_by_css_selector("button[class^='section-expand-review']"))>0:
driver.find_element_by_css_selector("button[class^='section-expand-review']").click()
print('pass')
for item in wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, ".section-review-content"))):
name = item.find_element_by_css_selector("[class='section-review-title'] > span").text
try:
review = item.find_element_by_css_selector("[class='section-review-text']").text
except AttributeError:
review = ""
print(name)
driver.quit()
EDITED
if len(driver.find_elements_by_css_selector("button[class^='section-expand-review']"))>0:
for item in driver.find_elements_by_css_selector("button[class^='section-expand-review']"):
item.location_once_scrolled_into_view
item.click()
time.sleep(2)
this is worked with me :-
you can put it within for loop or your method to get all reviews.
try:
driver.find_element_by_class_name("mapsConsumerUiSubviewSectionReview__section-expand-review").click()
except:
continue

How to click load more button in selenium untill page endspython?

Using selenium to load and page and need to click load more button, but couldn't able to do that.
Tried this :
from selenium import webdriver
import pandas as pd
driver = webdriver.Chrome('/Users/1/chromedriver.exe')
driver.get('https://simpletire.com/catalog?select=1&brand=61&query=catalog')
driver.find_element_by_css_selector(".btn.btn-primary.btn-lg").click();
tried the above but button is clicking and there are multiple load more how to load them multiple times untill page gets over
Error :
Tried to keep it in loop but getting :
element not interactable
The solution that worked for me is a little simple and requires a little effort but works fine.
count=20
while count>1:
button=driver.find_element_by_css_selector("button.ipl-load-more__button")
button.click()
count-=1
time.sleep(2)
//do you work once all the pages are loaded
The only thing you need to worry about is setting the right count value, if its too small you might get thrown an error, just catch it and increase/decrease the count value according to your requirement. I hope this helps.
Here is the code that should work. I am not sure how many tires available, the script ran successfully to load ~1000 results.
I have given the option to stop loading after meeting tires count, rather iterating 100+ times.
url = 'https://simpletire.com/catalog?select=1&brand=61&query=catalog'
driver.get(url)
loadingButton = WebDriverWait(driver,30).until(EC.presence_of_element_located((By.XPATH,"//div[#id='load_button']")))
maxTires = 200;
while loadingButton:
loadingButton.click()
time.sleep(2)
WebDriverWait(driver,30).until(EC.presence_of_element_located((By.XPATH,"//div[#id='is_loading'][contains(#style,'none')]")))
loadElems = driver.find_elements_by_xpath("//div[#id='load_button'][contains(#style,'block')]")
if len(loadElems)>0:
loadingButton = driver.find_element_by_xpath("//div[#id='load_button'][contains(#style,'block')]")
tiresLoaded = len(driver.find_elements_by_css_selector(".catResultWrapper.result"))
else:
print("Loaded all the tires")
break
if tiresLoaded >= maxTires:
print (tiresLoaded + " are loaded successfully.")
break
To click() on the element with text as LOAD MORE RESULTS you need to induce WebDriverWait for the desired element_to_be_clickable() and you can use the following Locator Strategies:
Code Block A:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("start-maximized")
# chrome_options.add_argument('disable-infobars')
driver = webdriver.Chrome(options=chrome_options, executable_path=r'C:\Utility\BrowserDrivers\chromedriver.exe')
driver.get("https://simpletire.com/catalog?select=1&brand=61&query=catalog")
while True:
try:
# WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//button[#class='btn btn-primary btn-lg']//span[#class='glyphicon glyphicon-play']"))).click()
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//button[contains(., 'Load More Results')]"))).click()
print("LOAD MORE RESULTS button clicked")
except TimeoutException:
print("No more LOAD MORE RESULTS button to be clicked")
break
driver.quit()
Code Block B:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("start-maximized")
# chrome_options.add_argument('disable-infobars')
driver = webdriver.Chrome(options=chrome_options, executable_path=r'C:\Utility\BrowserDrivers\chromedriver.exe')
driver.get("https://simpletire.com/catalog?select=1&brand=61&query=catalog")
while True:
try:
# ActionChains(driver).move_to_element(WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//button[#class='btn btn-primary btn-lg']//span[#class='glyphicon glyphicon-play']")))).pause(3).click().perform()
ActionChains(driver).move_to_element(WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//button[contains(., 'Load More Results')]")))).pause(5).click().perform()
print("LOAD MORE RESULTS button clicked")
except TimeoutException:
print("No more LOAD MORE RESULTS button to be clicked")
break
driver.quit()
Console Output:
LOAD MORE RESULTS button clicked
LOAD MORE RESULTS button clicked
LOAD MORE RESULTS button clicked
LOAD MORE RESULTS button clicked
LOAD MORE RESULTS button clicked
.
.
.
Browser Snapshot:

Categories

Resources