I'm trying to get details of all properties from the following website which has properties listed as elements:
https://www.altamirarealestate.com.cy/results/for-sale/flats/cyprus/35p009679979327046l33p17435142059772z9
I'm using Selenium in Python to scrape the elements' details but as soon as I go to the element I cannot click on its link to open it to a new page and get the necessary information. Code below:
from selenium.webdriver.common.keys import Keys
import webbrowser
import random
import time
import selenium.webdriver.support.ui as ui
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support.select import Select
import csv
from csv import writer
from selenium.common.exceptions import ElementNotVisibleException, WebDriverException, NoSuchElementException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
Link = 'https://www.altamirarealestate.com.cy/results/for-sale/flats/cyprus/35p009679979327046l33p17435142059772z9'
# MAIN
driver = webdriver.Chrome()
driver.maximize_window()
#Go to link
driver.get(Link)
#Accept cookies
time.sleep(2)
driver.find_element_by_xpath('//*[#id="onetrust-accept-btn-handler"]').click()
time.sleep(2)
#Load everything
while True:
try:
driver.find_element_by_xpath("//*[contains(#value,'View more')]").click()
time.sleep(3)
except Exception as no_more_properties:
print('all properties expanded: ', no_more_properties)
break
#Get properties
properties_list=driver.find_elements_by_xpath('//*[#class="minificha "]')
print (len(properties_list))#25
time.sleep(2)
#Get each property link
property_url=set()
properties_details=[]
main_window_handle = driver.current_window_handle
for i in range(0,len(properties_list)):
driver.switch_to_window(main_window_handle)
property = properties_list[i]
property_link = property.find_element_by_xpath('//a[#href="'+url+'"]')
property_link.click()
time.sleep(2)
#Switch to property window
window_after = driver.window_handles[1]
driver.switch_to.window(window_after)
#Get number of properties
number_of_flats=driver.find_elements_by_xpath('//[#class="lineainmu "]')
print(len(number_of_flats))
time.sleep(2)
currentWindow = driver.current_window_handle
for j in range(0,len(number_of_flats)):
driver.switch_to_window(currentWindow)
flat= number_of_flats[j]
flat.click()
time.sleep(2)
#Switch to flat window
window_after = driver.window_handles[1]
driver.switch_to.window(window_after)
When we click on a link on first page, it will open a new tab. In selenium in these type of cases we should switch the focus to new windows and then we can interact with web elements on the newly open page.
Once the task is done, it's important to close the tab and then switch back to original content.
This may lead to stale element reference, if we do not defined the web elements in loop again.
Code :
driver = webdriver.Chrome(driver_path)
driver.maximize_window()
driver.implicitly_wait(30)
wait = WebDriverWait(driver, 30)
driver.get("https://www.altamirarealestate.com.cy/results/for-sale/flats/cyprus/35p009679979327046l33p17435142059772z9")
try:
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button#onetrust-accept-btn-handler"))).click()
except:
pass
size = driver.find_elements(By.XPATH, "//div[#class='slick-list draggable']")
j = 1
org_windows_handle = driver.current_window_handle
for i in range(len(size)):
ele = driver.find_element(By.XPATH, f"(//div[#class='slick-list draggable'])[{j}]")
driver.execute_script("arguments[0].scrollIntoView(true);", ele)
ele.click()
all_handles = driver.window_handles
driver.switch_to.window(all_handles[1])
try:
name = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "p#tituloFiltroTipo"))).text
print(name)
except:
pass
try:
price = wait.until(EC.visibility_of_element_located((By.ID, "soloPrecio"))).text
print(price)
except:
pass
driver.close()
driver.switch_to.window(org_windows_handle)
j = j + 1
Imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
Output :
Flats - Egkomi, Nicosia
310,000
Flat - Strovolos, Nicosia
115,000
Flat - Agios Dometios, Nicosia
185,000
Flats - Aglantzia, Nicosia
765,000
Flat - Kaimakli, Nicosia
170,000
Flat - Kaimakli, Nicosia
280,000
Flat - Kaimakli, Nicosia
130,000
Flat - Germasogia, Limassol
410,000
Flat - Germasogeia, Limassol
285,000
Flat - Petrou & Pavlou, Limassol
230,000
Mixing implicit with explicit is not recommended. But in few cases like this where we are using find_element and explicit wait, does not do any harm. Please comment implicit wait line, and run the code. If it fails please uncomment and then try again.
Related
How to navigate through each page without using driver.current_url? In my full code, I get a bunch of errors once I navigate through the page for a loop. Without it, it runs fine but can only go through one page. I want to navigate through as many pages. Any help appreciated, thanks.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
driver_service = Service(executable_path="C:\Program Files (x86)\chromedriver.exe")
driver = webdriver.Chrome(service=driver_service)
driver.maximize_window() # load web driver
wait = WebDriverWait(driver, 5)
url_test = driver.get('https://www.seek.com.au/data-jobs-in-information-communication-technology/in-All-Perth-WA')
url_template = driver.current_url
template = url_template+ '?page={}'
for page in range(2,5):
link_job = [x.get_attribute('href') for x in driver.find_elements(By.XPATH, "//a[#data-automation='jobTitle']")]
for job in link_job:
driver.get(job)
try:
quick_apply = WebDriverWait(driver, 3).until(EC.element_to_be_clickable((By.XPATH, "(//a[#data-automation='job-detail-apply' and #target='_self'])")))
quick_apply.click()
#sleep(3)
except:
print("No records found " + job)
pass
sleep(3)
driver.get(template.format(page))
If I understand you correctly you want to determine dynamically how many pages there are and loop over each of them.
I have managed to achieve this by using a while loop and look on each page if the "Next" button at the bottom is visible. If not, the last page was reached and you can exit the loop.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from time import sleep
driver_service = Service(executable_path="C:\\Users\\Stefan\\bin\\chromedriver.exe")
driver = webdriver.Chrome(service=driver_service)
driver.maximize_window() # load web driver
wait = WebDriverWait(driver, 5)
url_test = driver.get('https://www.seek.com.au/data-jobs-in-information-communication-technology/in-All-Perth-WA')
url_template = driver.current_url
template = url_template+ '?page={}'
page = 1
while True:
# check if "Next" button is visible
# -> if not, the last page was reached
try:
driver.find_element(By.XPATH, "//a[#title='Next']")
except:
# last page reached
break
link_job = [x.get_attribute('href') for x in driver.find_elements(By.XPATH, "//a[#data-automation='jobTitle']")]
for job in link_job:
driver.get(job)
try:
quick_apply = WebDriverWait(driver, 3).until(EC.element_to_be_clickable((By.XPATH, "(//a[#data-automation='job-detail-apply' and #target='_self'])")))
quick_apply.click()
#sleep(3)
except:
print("No records found " + job)
pass
sleep(3)
page += 1
driver.get(template.format(page))
driver.close()
Seems your problem is with StaleElementException when you getting back from job page to jobs search results page.
The simplest approach to overcome this problem is to keep the jobs search results page url.
Actually I changed your code only with this point and it works.
I also changed driver.find_elements(By.XPATH, "//a[#data-automation='jobTitle']") with wait.until(EC.visibility_of_all_elements_located((By.XPATH, "//a[#data-automation='jobTitle']"))) for better performance.
The code below works, but the web site itself responds badly.
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(service=webdriver_service, options=options)
wait = WebDriverWait(driver, 10)
url = 'https://www.seek.com.au/data-jobs-in-information-communication-technology/in-All-Perth-WA?page={p}'
for p in range(1,20):
driver.get(url)
link_job = [x.get_attribute('href') for x in wait.until(EC.visibility_of_all_elements_located((By.XPATH, "//a[#data-automation='jobTitle']")))]
for job in link_job:
driver.get(job)
try:
wait.until(EC.element_to_be_clickable((By.XPATH, "(//a[#data-automation='job-detail-apply' and #target='_self'])"))).click()
print("applied")
except:
print("No records found " + job)
pass
driver.get(url)
I keep getting the ElementClickInterceptedException on this script I'm writing, I'm supposed to click a link that will open a new window, scrape from the new window and close it and move to the next link to scrape, but it just won't work, it gives the error after max 3 link clicks. I saw a similar question here and I tried using wait.until(EC.element_to_be_clickable()) and also maximized my screen but still did not work for me. Here is the site I am scraping from trying to scrape all the games for each day and here is a chunk of the code I'm using
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.common.exceptions import TimeoutException, NoSuchElementException, ElementNotInteractableException, StaleElementReferenceException
from time import sleep
l = "https://www.flashscore.com/"
options = FirefoxOptions()
#options.add_argument("--headless")
driver = webdriver.Firefox(executable_path="geckodriver.exe",
firefox_options=options)
driver.install_addon('C:\\Windows\\adblock_plus-3.10.1-an+fx.xpi')
driver.maximize_window()
driver.get(l)
driver.implicitly_wait(5)
cnt = 0
sleep(5)
wait = WebDriverWait(driver, 20)
a = driver.window_handles[0]
b = driver.window_handles[1]
driver.switch_to.window(a)
# Close Adblock tab
if 'Adblock' in driver.title:
driver.close()
driver.switch_to.window(a)
else:
driver.switch_to.window(b)
driver.close()
driver.switch_to.window(a)
var1 = driver.find_elements_by_xpath("//div[#class='leagues--live ']/div/div")
knt = 0
for i in range(len(var1)):
if (var1[i].get_attribute("id")):
knt += 1
#sleep(2)
#driver.switch_to.window(driver.window_handles)
var1[i].click()
sleep(2)
#var2 = wait.until(EC.visibility_of_element_located((By.XPATH, "//div[contains(#classs, 'event__match event__match--last event__match--twoLine')]")))
print(len(driver.window_handles))
driver.switch_to.window(driver.window_handles[1])
try:
sleep(4)
driver.close()
driver.switch_to.window(a)
#sleep(3)
except(Exception):
print("Exception caught")
#WebDriverWait(driver, 30).until(EC.visibility_of_element_located((By.CLASS_NAME, "event__match event__match--last event__match--twoLine")))
sleep(10)
driver.close()
Any ideas to help please.
It looks like the element you are trying to click on is covered by a banner ad or something else like a cookie message.
To fix this you can scroll down to the last element using the following code:
driver.execute_script('\
let items = document.querySelectorAll(\'div[title="Click for match detail!"]\'); \
items[items.length - 1].scrollIntoView();'
)
Add it before clicking on the desired element in the loop.
I tried to make a working example for you but it works on chromedriver not gecodriver:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
# options.add_argument("--headless")
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
service = Service(executable_path='your\path\to\chromedriver.exe')
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 5)
url = 'https://www.flashscore.com/'
driver.get(url)
# accept cookies
wait.until(EC.presence_of_element_located((By.ID, 'onetrust-accept-btn-handler'))).click()
matches = driver.find_elements(By.CSS_SELECTOR, 'div[title="Click for match detail!"]')
for match in matches:
driver.execute_script('\
let items = document.querySelectorAll(\'div[title="Click for match detail!"]\'); \
items[items.length - 1].scrollIntoView();'
)
match.click()
driver.switch_to.window(driver.window_handles[1])
print('get data from open page')
driver.close()
driver.switch_to.window(driver.window_handles[0])
driver.quit()
It works in both normal and headless mode
So this question has been asked before but I am still struggling to get it working.
The webpage has a table with links, I want to iterate through clicking each of the links.
So this is my code so far
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(executable_path=r'C:\Users\my_path\chromedriver_96.exe')
driver.get(r"https://www.fidelity.co.uk/shares/ftse-350/")
try:
element = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.CLASS_NAME, "table-scroll")))
table = element.find_elements_by_xpath("//table//tbody/tr")
for row in table[1:]:
print(row.get_attribute('innerHTML'))
# link.click()
finally:
driver.close()
Sample of output
<td>FOUR</td>
<td>4imprint Group plc</td>
<td>Media & Publishing</td>
<td>888</td>
<td>888 Holdings</td>
<td>Hotels & Entertainment Services</td>
<td>ASL</td>
<td>Aberforth Smaller Companies Trust</td>
<td>Collective Investments</td>
How do a click the href and iterate to the next href?
Many thanks.
edit
I went with this solution (a few small tweaks on Prophet's solution)
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
from selenium.webdriver.common.action_chains import ActionChains
driver = webdriver.Chrome(executable_path=r'C:\Users\my_path\chromedriver_96.exe')
driver.get(r"https://www.fidelity.co.uk/shares/ftse-350/")
actions = ActionChains(driver)
#close the cookies banner
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.ID, "ensCloseBanner"))).click()
#wait for the first link in the table
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table//tbody/tr/td/a")))
#extra wait to make all the links loaded
time.sleep(1)
#get the total links amount
links = driver.find_elements_by_xpath('//table//tbody/tr/td/a')
for index, val in enumerate(links):
try:
#get the links again after getting back to the initial page in the loop
links = driver.find_elements_by_xpath('//table//tbody/tr/td/a')
#scroll to the n-th link, it may be out of the initially visible area
actions.move_to_element(links[index]).perform()
links[index].click()
#scrape the data on the new page and get back with the following command
driver.execute_script("window.history.go(-1)") #you can alternatevely use this as well: driver.back()
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table//tbody/tr/td/a")))
time.sleep(2)
except StaleElementReferenceException:
pass
To perform what you want to do here you first need to close cookies banner on the bottom of the page.
Then you can iterate over the links in the table.
Since by clicking on each link you are opening a new page, after scaring the data there you will have to get back to the main page and get the next link. You can not just get all the links into some list and then iterate over that list since by navigating to another web page all the existing elements grabbed by Selenium on the initial page become Stale.
Your code can be something like this:
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
driver = webdriver.Chrome(executable_path=r'C:\Users\my_path\chromedriver_96.exe')
driver.get(r"https://www.fidelity.co.uk/shares/ftse-350/")
actions = ActionChains(driver)
#close the cookies banner
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.ID, "ensCloseBanner"))).click()
#wait for the first link in the table
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table//tbody/tr/td/a")))
#extra wait to make all the links loaded
time.sleep(1)
#get the total links amount
links = driver.find_elements_by_xpath('//table//tbody/tr/td/a')
for index, val in enumerate(links):
#get the links again after getting back to the initial page in the loop
links = driver.find_elements_by_xpath('//table//tbody/tr/td/a')
#scroll to the n-th link, it may be out of the initially visible area
actions.move_to_element(links[index]).perform()
links[index].click()
#scrape the data on the new page and get back with the following command
driver.execute_script("window.history.go(-1)") #you can alternatevely use this as well: driver.back()
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table//tbody/tr/td/a")))
time.sleep(1)
You basically have to do the following:
Click on the cookies button if available
Get all the links on the page.
Iterate over the list of links and then click on the first (by first scrolling to the web element and doing that for the list item) and then navigate back to the original screen.
Code:
driver = webdriver.Chrome(driver_path)
driver.maximize_window()
wait = WebDriverWait(driver, 30)
driver.get("https://www.fidelity.co.uk/shares/ftse-350/")
try:
wait.until(EC.element_to_be_clickable((By.ID, "ensCloseBanner"))).click()
print('Click on the cookies button')
except:
print('Could not click on the cookies button')
pass
driver.execute_script("window.scrollTo(0, 750)")
try:
all_links = wait.until(EC.presence_of_all_elements_located((By.XPATH, "//table//tbody/tr/td/a")))
print("We have got to deal with", len(all_links), 'links')
j = 0
for link in range(len(all_links)):
links = wait.until(EC.presence_of_all_elements_located((By.XPATH, f"//table//tbody/tr/td/a")))
driver.execute_script("arguments[0].scrollIntoView(true);", links[j])
time.sleep(1)
links[j].click()
# here write the code to scrape something once the click is performed
time.sleep(1)
driver.execute_script("window.history.go(-1)")
j = j + 1
print(j)
except:
print('Bot Could not exceute all the links properly')
pass
Import:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
PS to handle stale element reference you'd have to define the list of web elements again inside the loop.
The link inside the href is constantly changing but the xpath of this href is always the same.
How can I click on the www.confirmationemail.com ?
<div dir="ltr">
<p>exampleTEXT.</p>
<p>www.confirmationemail.com</p>
<p>exampleTEXT.</p>
<p>exampleTEXT,</p>
<p>exampleTEXT</p>
</div>
This is the page I'm working on:https://www.minuteinbox.com/
The process is as follows: registering on a site with the e-mail received from here and receiving an e-mail, logging in to the e-mail, but I cannot click on the link in its content.
from selenium import webdriver
from time import sleep
import config2 as cf
from selenium.webdriver.support.select import Select
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.keys import Keys
from asyncio import sleep
import time
driver = webdriver.Chrome("C:\\ChromeDriver\\chromedriver.exe")
url = "https://www.minuteinbox.com/"
url2 = "EXAMPLE.COM"
driver.get(url)
element = driver.find_element_by_xpath("XPATH").text
print(element)
time.sleep(4)
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[1])
driver.get(url2)
sec = driver.find_element_by_xpath("XPATH")
sec.click()
devam = driver.find_element_by_xpath("XPATH")
devam.click()
ad = driver.find_element_by_xpath("XPATH")
ad.send_keys("deneme")
soyad = driver.find_element_by_xpath("XPATH")
soyad.send_keys("test")
eMail = driver.find_element_by_css_selector("#user_email")
eMail.send_keys(element)
eMail2 = driver.find_element_by_css_selector("#user_email_confirmation")
eMail2.send_keys(element)
sifre = driver.find_element_by_css_selector("#user_password")
sifre.send_keys("PASS")
sifre2 = driver.find_element_by_css_selector("#user_password_confirmation")
sifre2.send_keys("PASS")
buton = driver.find_element_by_css_selector("SELECT")
buton.click()
hesapol = driver.find_element_by_css_selector("SELECT")
hesapol.click()
sleep(2)
driver.switch_to.window(driver.window_handles[0])
time.sleep(7)
bas = driver.find_element_by_css_selector("#schranka > tr:nth-child(1)")
bas.click()
time.sleep(1)
time.sleep(1)
SD = driver.switch_to.frame(driver.find_element_by_css_selector("iframe#iframeMail"))
time.sleep(5)
SD = driver.find_element_by_xpath("//a[contains(#href,'minuteinbox')]").click
driver.switch_to.default_content()
sd = I put this just to be able to write it in the code section
SOLVED
İMPORTS
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
bas = driver.find_element_by_css_selector("#schranka > tr:nth-child(1)")
bas.click()
time.sleep(3)
wait = WebDriverWait(driver, 10)
wait.until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR, "iframe[id='iframeMail']")))
print(driver.page_source)
link = driver.find_element_by_xpath("/html/body/div/p[2]/a")
link.click()
As you mentioned, the XPath of that element is constant.
So you can get that element based the constant XPath locator and click it.
Something like this:
driver.find_element_by_xpath('the_constant_xpath').click()
UPD
The element you want to be clicked can be located by XPath.
However, it is inside an iframe, so in order to access it you will have to switch to that iframe first.
I have also made your locators better.
So your code could be something like this:
driver.switch_to.window(driver.window_handles[0])
time.sleep(5)
bas = driver.find_element_by_css_selector("td.from")
bas.click()
time.sleep(1)
driver.switch_to.frame(driver.find_element_by_css_selector("iframe#iframeMail"))
driver.find_element_by_xpath("//a[contains(#href,'minuteinbox')]").click
When you finished working inside the iframe you will have to get out to the default content with
driver.switch_to.default_content()
I've created a script in python in combination with selenium to fetch all the reviews from a certain page of google maps. There are lots of reviews in that page and they are only visible once that page is made to scroll downward. My script can do all of them successfully.
However, the only issue that I'm facing at this moment is that some of the reviews have More button which is meant to click in order to show the full review.
One of such is this:
website address
I've tried with:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = "https://www.google.com/maps/place/Pizzeria+Di+Matteo/#40.8512552,14.255779,17z/data=!4m7!3m6!1s0x133b0841ef6e38e5:0xece6ea09987e9baf!8m2!3d40.8512512!4d14.2579677!9m1!1b1"
driver = webdriver.Chrome()
driver.get(link)
wait = WebDriverWait(driver,10)
while True:
try:
elem = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "[class='section-loading-spinner']")))
driver.execute_script("arguments[0].scrollIntoView();",elem)
except Exception:
break
for see_more in wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "button[class^='section-expand-review']"))):
see_more.click()
for item in wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, ".section-review-content"))):
name = item.find_element_by_css_selector("[class='section-review-title'] > span").text
try:
review = item.find_element_by_css_selector("[class='section-review-text']").text
except AttributeError:
review = ""
print(name)
driver.quit()
Currently the above script throws stale element error when it hits this line for see_more in wait.until().click().
How can I click on that More button cyclically to get all the full reviews?
If use WebdriverWait and presence_of_all_elements_located it wait for search the element in given time and if it is not attached to the html you will receive error.
However Check the length of element present in webpage if there then click on the element.
if len(driver.find_elements_by_css_selector("button[class^='section-expand-review']"))>0:
driver.find_element_by_css_selector("button[class^='section-expand-review']").click()
Here is the code.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = "https://www.google.com/maps/place/Ecstasy/#23.7399982,90.3732109,17z/data=!3m1!4b1!4m7!3m6!1s0x3755b8caa669d5e3:0x41f47ddcc39a556e!8m2!3d23.7399933!4d90.3753996!9m1!1b1"
driver = webdriver.Chrome()
driver.get(link)
wait = WebDriverWait(driver,10)
while True:
try:
elem = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "[class='section-loading-spinner']")))
driver.execute_script("arguments[0].scrollIntoView();",elem)
except Exception:
break
if len(driver.find_elements_by_css_selector("button[class^='section-expand-review']"))>0:
driver.find_element_by_css_selector("button[class^='section-expand-review']").click()
print('pass')
for item in wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, ".section-review-content"))):
name = item.find_element_by_css_selector("[class='section-review-title'] > span").text
try:
review = item.find_element_by_css_selector("[class='section-review-text']").text
except AttributeError:
review = ""
print(name)
driver.quit()
EDITED
if len(driver.find_elements_by_css_selector("button[class^='section-expand-review']"))>0:
for item in driver.find_elements_by_css_selector("button[class^='section-expand-review']"):
item.location_once_scrolled_into_view
item.click()
time.sleep(2)
this is worked with me :-
you can put it within for loop or your method to get all reviews.
try:
driver.find_element_by_class_name("mapsConsumerUiSubviewSectionReview__section-expand-review").click()
except:
continue