How do I use Selenium to click this big button? - python

I've tried using the elements linktext, value and xpath. I cant seem to make it click on the button with anything. What am I doing wrong?
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
import time
PATH = "C:/Users/yongs/Downloads/chromedriver_win32/chromedriver.exe"
driver = webdriver.Chrome(PATH)
driver.get("https://ttsfree.com/")
textbox = driver.find_element("id", "input_text")
textbox.send_keys("Text to convert")
driver.implicitly_wait(5)
button_xpath = "/html/body/section[2]/div[2]/form/div[2]/div[2]/a"
button = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, button_xpath)))
actions = ActionChains(driver)
actions.click(button)
actions.perform()

EDITED to include download solution as well
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.relative_locator import locate_with
import time as t
chrome_options = Options()
chrome_options.add_argument("--no-sandbox")
# chrome_options.add_argument("--headless")
webdriver_service = Service("chromedriver/chromedriver") ## path to where you saved chromedriver binary
browser = webdriver.Chrome(service=webdriver_service, options=chrome_options)
url = 'https://ttsfree.com/'
browser.get(url)
WebDriverWait(browser, 20).until(EC.element_to_be_clickable((By.XPATH, "//button/span[text()='AGREE']"))).click()
textbox = WebDriverWait(browser, 20).until(EC.element_to_be_clickable((By.ID, "input_text")))
textbox.send_keys("Text to convert")
button = WebDriverWait(browser, 2000).until(EC.element_to_be_clickable((By.XPATH, "//a[text()='Convert Now']")))
button.click()
print('clicked!')
t.sleep(10)
browser.execute_script('document.getElementsByClassName("label_process text-left")[0].scrollIntoView();')
dl_button = WebDriverWait(browser, 2000).until(EC.element_to_be_clickable((By.CLASS_NAME, "fa-download")))
dl_button.click()
t.sleep(10)
browser.quit()

To click on the element Convert Now you need to induce WebDriverWait for the element_to_be_clickable() which automatically scrolls the element within view and you can use the following locator strategy:
Using LINK_TEXT:
driver.execute("get", {'url': 'https://ttsfree.com/'})
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "textarea#input_text"))).send_keys("Hello")
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.LINK_TEXT, "Convert Now"))).click()
Using CSS_SELECTOR:
driver.execute("get", {'url': 'https://ttsfree.com/'})
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "textarea#input_text"))).send_keys("Hello")
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "a.convert-now"))).click()
Using XPATH:
driver.execute("get", {'url': 'https://ttsfree.com/'})
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "textarea#input_text"))).send_keys("Hello")
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//a[text()='Convert Now']"))).click()
Note: You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC

Related

Can't find out xpath with selenium in jobsite.co.uk website

I want to find out the "Accept All" button xpath for click accept cookies.
Code trials:
from ast import Pass
import time
from selenium import webdriver
driver = driver = webdriver.Chrome(executable_path=r'C:\Users\Nahid\Desktop\Python_code\Jobsite\chromedriver.exe') # Optional argument, if not specified will search path.
driver.get('http://jobsite.co.uk/')
driver.maximize_window()
time.sleep(1)
#find out XPath in div tag but there has another span tag
cookie = driver.find_element_by_xpath('//div[#class="privacy-prompt-button primary-button ccmgt_accept_button "]/span')
cookie.click()
The desired element:
<div id="ccmgt_explicit_accept" class="privacy-prompt-button primary-button ccmgt_accept_button ">
<span>Accept All</span>
</div>
is a <span> tag having an ancestor <div>.
Solution
To click on the clickable element you need to induce WebDriverWait for the element_to_be_clickable() and you can use either of the following locator strategies:
Using CSS_SELECTOR:
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div.privacy-prompt-button.primary-button.ccmgt_accept_button>span"))).click()
Using XPATH:
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//span[text()='Accept All']"))).click()
Note: You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
Your XPath looks correct but if can be improved.
Also you should use WebDriverWait expected conditions instead of hardcoded sleeps.
As following:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("--start-maximized")
s = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=s)
url = 'http://jobsite.co.uk/'
wait = WebDriverWait(driver, 10)
driver.get(url)
wait.until(EC.element_to_be_clickable((By.ID, "ccmgt_explicit_accept"))).click()

Can't Find Element Inside iframe

I want to get the data-sitekey, but it is inside the iframe.
I only can get the element in class="container", can't find the element insde of it.
How can I get the data-sitekey?
driver.get(url)
driver.switch_to.frame("main-iframe")
container= driver.find_element(By.CLASS_NAME, 'container')
print(container)
time.sleep(2)
captcha = driver.find_element(By.CLASS_NAME, 'g-recaptcha')
print(captcha)
The reCAPTCHA element is within an <iframe>
Solution
To extract the value of the data-sitekey attribute you have to:
Induce WebDriverWait for the desired frame to be available and switch to it.
Induce WebDriverWait for the visibility_of_element_located.
You can use either of the following locator strategies:
Using CSS_SELECTOR:
WebDriverWait(driver, 20).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe#main-iframe")))
print(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.g-recaptcha"))).get_attribute("data-sitekey"))
Using XPATH:
WebDriverWait(driver, 20).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,"//iframe[#id='main-iframe']"))).get_attribute("data-sitekey"))
print(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//div[#class='g-recaptcha']")))
Note : You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
This is how you get that information:
from selenium import webdriver
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options as Firefox_Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
import time as t
firefox_options = Firefox_Options()
# firefox_options.add_argument("--width=1280")
# firefox_options.add_argument("--height=720")
# firefox_options.headless = True
driverService = Service('chromedriver/geckodriver')
browser = webdriver.Firefox(service=driverService, options=firefox_options)
url = 'https://premier.hkticketing.com/'
browser.get(url)
t.sleep(5)
WebDriverWait(browser, 20).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH, "//*[#id='main-iframe']")))
print('switched')
t.sleep(5)
element_x = WebDriverWait(browser, 20).until(EC.element_to_be_clickable((By.XPATH, "//div[#class='g-recaptcha']")) )
print(element_x.get_attribute('data-sitekey'))
Result printed in terminal:
switched
6Ld38BkUAAAAAPATwit3FXvga1PI6iVTb6zgXw62
Setup is for linux/Firefox/geckodriver, but you can adapt it to your own system, just mind the imports, and the code after defining the browser.
Selenium docs: https://www.selenium.dev/documentation/

How to crawl the title of the page?

I don't know how to crawl the title of the page,below is my code(it's simple),but I have no idea where is wrong, if you have any idea please let me know,thank you.
from selenium import webdriver
url="https://sukebei.nyaa.si/?s=seeders&o=desc&p=1"
driver_path = "C:\\webdriver\\chromedriver.exe"
option = webdriver.ChromeOptions()
driver = webdriver.Chrome(driver_path, options=option)
driver.implicitly_wait(10)
driver.get(url)
print(driver.find_element_by_xpath("/html/head/title").text)
To crawl the title of the page you have to induce WebDriverWait for the visibility_of_element_located() for the <table> with torrent-list and you can use either of the following Locator Strategies:
Using CSS_SELECTOR:
driver.get('https://sukebei.nyaa.si/?s=seeders&o=desc&p=1')
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table.torrent-list")))
print(driver.title)
Using XPATH:
driver.get('https://sukebei.nyaa.si/?s=seeders&o=desc&p=1')
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//table[contains(#class, 'torrent-list')]")))
print(driver.title)
Console Output:
Browse :: Sukebei
Note : You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
url="https://sukebei.nyaa.si/?s=seeders&o=desc&p=1"
driver_path = "C:\\webdriver\\chromedriver.exe"
option = webdriver.ChromeOptions()
driver = webdriver.Chrome(driver_path, options=option)
driver.implicitly_wait(10)
driver.get(url)
print(driver.title)

Time out exception with WebDriverWait despite faster internet and element present

I am trying to scrape this:
https://www.lanebryant.com/chiffon-faux-wrap-fit-flare-midi-dress/prd-355958#color/0000091393
And this is my code:
wait = WebDriverWait(d, 10)
close = wait.until(EC.element_to_be_clickable((By.XPATH, "//a[#id='closeButton']")))
close.click()
time.sleep(5)
chart = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[contains(*,'Size Guide')][#class='size-chart-link']")))
chart.click()
It first closes the pop up and then clicks the size guide, However, it always gives timeout exception and works only a couple of times.
The PARTIAL_LINK_TEXT Size Guide is pretty much unique within the page so would be your best bet would be to:
Induce WebDriverWait for invisibility_of_element() for the wrapper element
Induce WebDriverWait for the element_to_be_clickable() for the desired element
You can use the following Locator Strategy:
Code Block (using XPATH and PARTIAL_LINK_TEXT):
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_argument('start-maximized')
driver = webdriver.Chrome(options=options, executable_path=r'C:\WebDrivers\chromedriver.exe')
driver.get('https://www.lanebryant.com/chiffon-faux-wrap-fit-flare-midi-dress/prd-355958#color/0000091393')
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//a[#id='closeButton']"))).click()
WebDriverWait(driver, 20).until(EC.invisibility_of_element((By.XPATH, "//div[#id='tinymask']")))
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.PARTIAL_LINK_TEXT, "Size Guide"))).click()
Code Block (using CSS_SELECTOR and PARTIAL_LINK_TEXT):
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_argument('start-maximized')
driver = webdriver.Chrome(options=options, executable_path=r'C:\WebDrivers\chromedriver.exe')
driver.get('https://www.lanebryant.com/chiffon-faux-wrap-fit-flare-midi-dress/prd-355958#color/0000091393')
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "a#closeButton"))).click()
WebDriverWait(driver, 20).until(EC.invisibility_of_element((By.CSS_SELECTOR, "div#tinymask")))
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.PARTIAL_LINK_TEXT, "Size Guide"))).click()
Browser Snapshot:
Use JavaScript Executor to click on the element.Seems like selenium webdriver unable to click on the element.Use the below xpath
d.get("https://www.lanebryant.com/chiffon-faux-wrap-fit-flare-midi-dress/prd-355958#color/0000091393")
wait = WebDriverWait(d, 10)
close = wait.until(EC.element_to_be_clickable((By.XPATH, "//a[#id='closeButton']")))
close.click()
chart = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[#class='size-chart-link']/a[contains(.,'Size Guide')]")))
d.execute_script("arguments[0].click();", chart)
Browser snapshot:

How can i scrape the text from a located element using Selenium and Python

I am trying to run the following code
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
options = Options()
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(options=options)
driver.get('https://theunderminejournal.com/#eu/draenor/battlepet/1155')
time.sleep(20) #bypass cloudflare
price = driver.find_element_by_xpath('//*[#id="battlepet-page"]/div[1]/table/tr[3]/td/span')
print (price)
so i can scrape the "Current Price" from the page . But this xpath location won't return the text value( i also tried the "text" varriant in the end with no success.
thanks in advance for any reply
First, use WebdriverWait to wait for the element instead of sleep.
Second, Your locator is not finding the element.
Try this,
driver.get('https://theunderminejournal.com/#eu/draenor/battlepet/1155')
price = WebDriverWait(driver,30).until(EC.visibility_of_element_located((By.XPATH,"//div[#id='battlepet-page']/div/table/tr[#class='current-price']/td/span")))
print(price.text)
To use wait import the followings,
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
You should wait for visibility of element before getting text. Check WebDriverWait in example below:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
rom selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(options=options)
wait = WebDriverWait(driver, 20)
driver.get('https://theunderminejournal.com/#eu/draenor/battlepet/1155')
current_price = wait.until(ec.visibility_of_element_located((By.CSS_SELECTOR, ".current-price .price"))).text
print(current_price)
To scrape the value of Current Price from the webpage you need to induce WebDriverWait for the visibility_of_element_located() and you can use either of the following Locator Strategies:
Using CSS_SELECTOR:
print(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "tr.current-price td>span"))).text)
Using XPATH:
print(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//th[text()='Current Price']//following::td[1]/span"))).text)
Note : You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC

Categories

Resources