How do I access a frame that's present inside a frameset which is inside another frameset?
Here's my code which returns selenium.common.exceptions.NoSuchFrameException: Message: no such frame.
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome()
driver.get("https://netbanking.hdfcbank.com/netbanking/")
driver.switch_to_default_content()
assert "Welcome to HDFC Bank" in driver.title
driver.switch_to_frame("login_page")
try:
WebDriverWait(driver, 10).until(EC.presence_of_element_located(driver.find_element_by_class_name('pwd_field')))
print "Page is ready!"
except TimeoutException:
print "Loading took too much time!"
driver.implicitly_wait(10) # seconds
driver.close()
You don't need to handle framesets in any special way - these are just containers for the frames, they are not frames themselves. Here is what worked for me:
add a wait to wait for the frame to be present
switch to the frame
The code:
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome()
driver.get("https://netbanking.hdfcbank.com/netbanking/")
wait = WebDriverWait(driver, 10)
assert "Welcome to HDFC Bank" in driver.title
frame = wait.until(EC.presence_of_element_located((By.NAME, 'login_page')))
driver.switch_to.frame(frame)
try:
wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'pwd_field')))
print("Page is ready!")
except TimeoutException:
print("Loading took too much time!")
driver.close()
Note that I've also fixed the wait you've already had, replaced:
.until(EC.presence_of_element_located(driver.find_element_by_class_name('pwd_field')))
with:
.until(EC.presence_of_element_located((By.CLASS_NAME, 'pwd_field')))
I've also removed the first driver.switch_to_default_content() call - you are already operating in the scope of the default content at the beginning.
Related
I need to press green button in to accept cookies on https://garantex.io/
My code:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.implicitly_wait(10)
driver.get("https://garantex.io/")
try:
cookie_access = driver.find_element_by_class_name('btn.btn-success')
print("Is displayed = " + str(cookie_access.is_displayed()))
except Exception as e:
driver.close()
print(e)
I have Is displayed = FALSE. How can I interact with this button?
You were almost correct. but that is CSS selector not class name.
driver = webdriver.Chrome(ChromeDriverManager().install())
# driver.implicitly_wait(10)
driver.maximize_window()
wait = WebDriverWait(driver, 30)
driver.get("https://garantex.io/")
try:
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".btn.btn-success"))).click()
print('Clicked on cookies button successfully.')
#print("Is displayed = " + str(cookie_access.is_displayed()))
except Exception as e:
driver.close()
print(e)
You will need these imports as well
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
The button is clicked with me normally, maybe you need to add some wait like this:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
...
driver.get("https://garantex.io/")
cookie_access = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CLASS_NAME, "btn.btn-success")))
cookie_access.click()
You also don't need driver.implicitly_wait(10) at all before loading the page, maybe after it.
I am tring to accept a cookie banner in python using selenium.
So far, I tried a lot to access to the "Accept and continue" (or in german "Akzeptieren und weiter") button, but none of my tries is working.
Some things I already tried out:
driver.get("https://www.spiegel.de/")
time.sleep(5)
try:
driver.find_element_by_css_selector('.sp_choice_type_11').click()
except:
print('css selector failed')
try:
driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div[3]/div[1]/button').click()
except:
print('xpath failed')
try:
driver.find_element_by_class_name('message-component message-button no-children focusable primary-button font-sansUI font-bold sp_choice_type_11 first-focusable-el').click()
except:
print('full class failed')
try:
driver.find_element_by_class_name('message-component').click()
except:
print('one class component failed')
What else can I try to accept cookies on that website?
The Element Accept and continue is in an Iframe. Need to switch to frame to perform click operation on the same. Try like below:
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = webdriver.Chrome(executable_path="path to chromedriver.exe")
driver.maximize_window()
driver.get("https://www.spiegel.de/")
wait = WebDriverWait(driver,30)
wait.until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,"//iframe[#title='Privacy Center']")))
cookie = wait.until(EC.element_to_be_clickable((By.XPATH,"//button[text()='Accept and continue']")))
cookie.click()
While the other answer uses xpath, which is not the preferred locator in Selenium automation, You can use the below css to switch to iframe :
iframe[id^=sp_message_iframe]
Code :
WebDriverWait(driver, 20).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR, "iframe[id^=sp_message_iframe]")))
WebDriverWait(driver, 20).until((EC.element_to_be_clickable((By.CSS_SELECTOR, "button[title='Accept and continue']")))).click()
Imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
It is recommended to have try and except like you've in your code.
I am looking to scrape hashtags generated from a site using Selenium webdriver. Since the site is using Shadow Content (User Agent) I decided to just copy the hashtags using the button already in the site that copies them into my clipboard. However, I am failing to locate the <button>
This is the HTML
<button type="button" id="copyBtn" data-clipboard-target="#hashtag_textarea" class="btn btn-success">Copy to clipboard</button>
How is it that Selenium can't find the button. What am I doing wrong.
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait as wait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
browser = webdriver.Chrome('/Users/user/Documents/docs/chromedriver')
browser.get('https://seekmetrics.com/hashtag-generator')
delay = 15
print ("Headless Chrome Initialized")
print ("\n")
try:
element = wait(browser, delay).until(EC.element_to_be_clickable((By.CLASS_NAME, 'el-input__inner')))
element.click()
element.send_keys('love')
element.send_keys(Keys.ENTER)
wait(browser, delay)
browser.find_element_by_id('copyBtn').click()
print('Page is ready!')
# print(hashtags.text)
# print (browser.page_source)
except TimeoutException:
print("Loading took too much time!")
browser.quit()
You don't need to click the button just get textarea value but It need to wait until the textarea located.
element.send_keys('love')
element.send_keys(Keys.ENTER)
# wait until hashtags generated
hashtags = wait(browser, delay).until(EC.presence_of_element_located((By.ID, 'hashtag_textarea')))
print(hashtags.get_attribute('value'))
print('Page is ready!')
after inputing value in textbox page refreshed and it takes some amount of time during that time your code tries to click on button which is not clickable or say not loaded in DOM. instead doing that wait for button until it becomes clickable check following code sample:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait as wait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
browser = webdriver.Chrome('C:\Python27\Scripts\chromedriver')
browser.get('https://seekmetrics.com/hashtag-generator')
delay = 15
print ("Headless Chrome Initialized")
print ("\n")
try:
element = wait(browser, delay).until(EC.element_to_be_clickable((By.CLASS_NAME, 'el-input__inner')))
element.click()
element.send_keys('love')
element.send_keys(Keys.ENTER)
wait(browser, delay)
button = wait(browser, delay).until(EC.element_to_be_clickable((By.XPATH, "//button[text()='Copy to clipboard']")))
button.click()
print('Page is ready!')
# print(hashtags.text)
# print (browser.page_source)
except TimeoutException:
print("Loading took too much time!")
browser.quit()
hope this helps you..
I am trying to simulate a click from this page (http://www.oddsportal.com/baseball/usa/mlb/results/) to the last page number found at the bottom. The click I use on the icon in my code seems to work, but I can't get it to scrape the actual page data I want to after simulating this click. Instead, it just scrapes the data from the first original url. Any help on this would be greatly appreciated.
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
url='http://www.oddsportal.com/baseball/usa/mlb/results/'
driver = webdriver.Chrome()
driver.get(url)
timeout=5
while True:
try:
element_present = EC.presence_of_element_located((By.LINK_TEXT, '»|'))
WebDriverWait(driver, timeout).until(element_present)
last_page_link = driver.find_element_by_link_text('»|')
last_page_link.click()
element_present2 = EC.presence_of_element_located((By.XPATH, ".//th[#class='first2 tl']"))
WebDriverWait(driver, timeout).until(element_present2)
content=driver.page_source
soup=BeautifulSoup(content,'lxml')
dates2 = soup.find_all('th',{'class':'first2'})
dates2 = [element.text for element in dates2]
dates2=dates2[1:]
driver.quit()
except TimeoutException:
print('Timeout Error!')
driver.quit()
continue
break
print(dates2)
Im trying to translate user comments from tripadvisor. So the scraper reads the link, then one by one iterates through each of the comments and translates them. But my code stops after translating the first comment itself.
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://www.tripadvisor.in/ShowUserReviews-g1-d8729164-r425811350-TAP_Portugal-World.html")
gt= driver.find_elements(By.CSS_SELECTOR,".googleTranslation>.link")
for i in gt:
i.click()
time.sleep(2)
driver.find_element_by_class_name("ui_close_x").click()
time.sleep(2)
try this:
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome()
driver.maximize_window()
url="https://www.tripadvisor.com/Airline_Review-d8729164-Reviews-Cheap-Flights-TAP-Portugal#REVIEWS"
driver.get(url)
wait = WebDriverWait(driver, 10)
langselction = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "span.sprite-date_picker-triangle")))
langselction.click()
driver.find_element_by_xpath("//div[#class='languageList']//li[normalize-space(.)='Portuguese first']").click()
gt= driver.find_elements(By.CSS_SELECTOR,".googleTranslation>.link")
for i in gt:
i.click()
time.sleep(2)
driver.find_element_by_class_name("ui_close_x").click()
time.sleep(2)
I tried the same code of yours just increased the sleep time and list is getting traversed through the list and comments are also getting translated
Note: I tried the program on Firefox
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
driver = webdriver.Firefox()
driver.maximize_window()
driver.get("https://www.tripadvisor.in/ShowUserReviews-g1-d8729164-r425811350-TAP_Portugal-World.html")
gt= driver.find_elements(By.CSS_SELECTOR,".googleTranslation>.link")
print(type(gt))
for i in gt:
i.click()
time.sleep(15)
driver.find_element_by_class_name("ui_close_x").click()
time.sleep(15)