Get dynamic value with Selenium - python

I am trying to get this number (-22.65) that changes in the website
Page code
I have tried with the xpath, with contain text...
text = driver.find_element_by_xpath("//*[contains(text(),'valueValue-2KhwsEwE')]").text
print(text)
But in any case I get the number itself... I get the following message:
File "C:\Users\ESJOMAN2.spyder-py3\temp.py", line 22, in
text = driver.find_element_by_xpath("//*[contains(text(),'valueValue-2KhwsEwE')]").text
File "C:\Users\ESJOMAN2\Anaconda3\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 394, in find_element_by_xpath
return self.find_element(by=By.XPATH, value=xpath)
File "C:\Users\ESJOMAN2\Anaconda3\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 976, in find_element
return self.execute(Command.FIND_ELEMENT, {
File "C:\Users\ESJOMAN2\Anaconda3\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 321, in execute
self.error_handler.check_response(response)
File "C:\Users\ESJOMAN2\Anaconda3\lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 242, in check_response
raise exception_class(message, screen, stacktrace)
NoSuchElementException: no such element: Unable to locate element:
{"method":"xpath","selector":"//*[contains(text(),'valueValue-2KhwsEwE')]"}
(Session info: chrome=89.0.4389.90)
Any ideas?

Try this:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
try:
text = WebDriverWait(driver, 20).until(
EC.presence_of_element_located(
(By.XPATH, 'ELEMENT XPATH'))
)
except:
driver.quit()
print(text.text)

Try to use something like that:
text = driver.find_element_by_xpath("//div[contains(text(),'sourcesWrapper')]//div[contains(),'valueValue']").text
or
text = driver.find_element_by_xpath("//div[contains(text(),'sourcesWrapper')]//div[contains(),'valueValue']").get_attribute('value')
before that, use
time.sleep(5)
so code snipped looks like:
time.sleep(5)
text = driver.find_element_by_xpath("//div[contains(text(),'sourcesWrapper')]//div[contains(),'valueValue']").text
print(text)
By the way, have you checked, might your elements inside iframe, did iframe locate on your page?

Related

Message: Unable to locate element, Selenium Python

I'm trying to get access to this page "fullcollege" with a bot I'm making for students. The problem is that I can't even select an element from it, and this error shows up. I have recently tested my code with another webpages like instagram and everything works perfectly. Anyone knows what can I do to solve this? Thanks in advance.
My code:
from time import sleep
from selenium import webdriver
browser = webdriver.Firefox()
browser.implicitly_wait(5)
browser.get('https://www.fullcollege.cl/fullcollege/')
sleep(5)
username_input = browser.find_element_by_xpath("//*[#id='textfield-3610-inputEl']")
password_input = browser.find_element_by_xpath("//*[#id='textfield-3611-inputEl']")
username_input.send_keys("username")
password_input.send_keys("password")
sleep(5)
browser.close()
The error:
Traceback (most recent call last):
File "c:\Users\marti\OneDrive\Escritorio\woo\DiscordBot\BetterCollege\tester.py", line 11, in <module>
username_input = browser.find_element_by_xpath("//*[#id='textfield-3610-inputEl']")
File "C:\Users\marti\AppData\Local\Programs\Python\Python38\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 394, in find_element_by_xpath
return self.find_element(by=By.XPATH, value=xpath)
File "C:\Users\marti\AppData\Local\Programs\Python\Python38\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 976, in find_element
return self.execute(Command.FIND_ELEMENT, {
File "C:\Users\marti\AppData\Local\Programs\Python\Python38\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 321, in execute
self.error_handler.check_response(response)
File "C:\Users\marti\AppData\Local\Programs\Python\Python38\lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 242, in check_response
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.NoSuchElementException: Message: Unable to locate element: //*[#id='textfield-3610-inputEl']
The username and password field is inside and iframe you need to switch it first.
browser.get('https://www.fullcollege.cl/fullcollege/')
WebDriverWait(browser,10).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe#logFrame")))
sleep(5)
username_input = browser.find_element_by_xpath("//input[#id='textfield-3610-inputEl']")
password_input = browser.find_element_by_xpath("//input[#id='textfield-3611-inputEl']")
username_input.send_keys("username")
password_input.send_keys("password")
import below libraries as well.
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By

Unable to locate element, can't scrape 'reviews'

I'm scraping the product reviews from the sephora website which contains javascript(reviews), but I can't scrape. This is my code:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located as EC
import time
chrome_path = '/media/danish-khan/New Volume/Web_scraping/rgcrawler2/chromedriver'
driver = webdriver.Chrome(chrome_path)
chrome_options = Options()
url = 'https://www.sephora.com/product/the-porefessional-face-primer-P264900?skuId=1259068&icid2=products%20grid:p264900:product'
driver.get(url)
WebDriverWait(driver, 70)
time.sleep(70)
review = driver.find_element_by_class_name('css-1jg2pb9 eanm77i0')
for post in review:
#try:
# element = WebDriverWait(driver, 50).until(
# EC.presence_of_element_located((By.XPATH, "//div[#class = 'css-1jg2pb9 eanm77i0']"))
# )
#finally:
# driver.quit()
#
print(review)
driver.close()'
The output is:
Traceback (most recent call last):
File "resgt.py", line 15, in
review = driver.find_element_by_class_name('css-1jg2pb9 eanm77i0')
File "/home/danish-khan/miniconda3/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 564, in find_element_by_class_name
return self.find_element(by=By.CLASS_NAME, value=name)
File "/home/danish-khan/miniconda3/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 978, in find_element
'value': value})['value']
File "/home/danish-khan/miniconda3/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 321, in execute
self.error_handler.check_response(response)
File "/home/danish-khan/miniconda3/lib/python3.7/site-packages/selenium/webdriver/remote/errorhandler.py", line 242, in check_response
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.NoSuchElementException: Message: no such element: Unable to locate element: {"method":"css selector","selector":".css-1jg2pb9 eanm77i0"}
(Session info: chrome=85.0.4183.102)
The reviews for that page are being loaded in asynchronously specifically when the section is scrolled into view. you will have to scroll to an element close to where the reviews are and wait until it appears. Only then you will be able to retrieve the element.
I was able to do this with this code
driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
time.sleep(10)
review = driver.find_element_by_css_selector('.css-1jg2pb9.eanm77i0')
# review = driver.find_element_by_xpath('/html/body/div[1]/div[2]/div/main/div/div[2]/div[1]/div/div[5]/div/div[2]/div[1]/div[2]')
print(review)
I left the Xpath in there as thats what i used to get it the first time
note* you may have to adjust the timing and the scroll height to get it always correct

python with selenium automating login

I'm new to selenium
Here I want to ask about a problem code (actually not mine)
this is the code
aww = email.strip().split('|')
driver = webdriver.Chrome()
driver.get("https://stackoverflow.com/users/signup?ssrc=head&returnurl=%2fusers%2fstory%2fcurrent")
time.sleep(5)
loginform = driver.find_element_by_xpath("//button[#data-provider='google']")
loginform.click()
mailform = driver.find_element_by_id('identifierId')
mailform.send_keys(aww[0])
driver.find_element_by_xpath("//div[#id='identifierNext']").click()
time.sleep(3)
passform = driver.find_element_by_css_selector("input[type='password']")
passform.send_keys(aww[1])
driver.find_element_by_id('passwordNext').click()
time.sleep(3)
driver.get("https://myaccount.google.com/lesssecureapps?pli=1")
open('LIVE.txt', 'a+').write(f"CHECKED : {aww[0]}|{aww[1]}")
time.sleep(3)
lessoff = driver.find_element_by_xpath('//div[#class="hyMrOd "]/div/div/div//div[#class="N9Ni5"]').click()
driver.delete_all_cookies()
driver.close()
I'm using those code for automating turn on the less-secure apps from Gmail
and the error will pop up like this
quote Traceback (most recent call last):
File "C:\Users\ASUS\Downloads\ok\less.py", line 59, in
lessoff = driver.find_element_by_xpath('//div[#class="hyMrOd "]/div/div/div//div[#class="N9Ni5"]').click()
File "C:\Users\ASUS\AppData\Local\Programs\Python\Python39\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 394, in find_element_by_xpath
return self.find_element(by=By.XPATH, value=xpath)
File "C:\Users\ASUS\AppData\Local\Programs\Python\Python39\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 976, in find_element
return self.execute(Command.FIND_ELEMENT, {
File "C:\Users\ASUS\AppData\Local\Programs\Python\Python39\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 321, in execute
self.error_handler.check_response(response)
File "C:\Users\ASUS\AppData\Local\Programs\Python\Python39\lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 242, in check_response
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.NoSuchElementException: Message: no such element: Unable to locate element: {"method":"xpath","selector":"//div[#class="hyMrOd "]/div/div/div//div[#class="N9Ni5"]"}
(Session info: chrome=86.0.4240.183)
any help gonna be helpfull,sorry for my english before :)
You can simply target this xpath and .click to toggle the less secure apps.
lessoff = driver.find_element_by_xpath("input[type='checkbox']").click()
It looks like it couldn't find the element it's looking for so give some time to load the element. You can check with Wait().until.
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as wait
wait(driver, 10).until(EC.presence_of_element_located((By.XPATH, 'YOUR_XPATH')))
when you try to click an element make sure it's there. above code will wait until the element located for the 10s if the element not located then it will throw an exception

I am getting error in scraping content through selenium python

I am scraping the title of jobs results on https://www.indeed.ae/jobs-in-dubai through selenium. i think .text is not working.
i am running the code through selenium which go to main website, enter selective keyword and then scrape all titles from result. but i am getting error, how can i solve this error
here is my code
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
Path = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(Path)
driver.get("https://indeed.ae/")
print(driver.title)
search = driver.find_element_by_name("l")
search.send_keys("Dubai")
search.send_keys(Keys.RETURN)
try:
td = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "resultsCol"))
)
divs = td.find_elements_by_tag_name("div")
for div in divs:
header = div.find_element_by_class_name("title")
print(header)
finally:
driver.quit()
driver.quit()
and i am getting following error
Job Search | Indeed
Traceback (most recent call last):
File "C:/Users/hp/Desktop/python projects/selenium-pycharm/selenium-bot.py", line 24, in <module>
header = div.find_element_by_class_name("title")
File "C:\Users\hp\Desktop\python projects\selenium-pycharm\venv\lib\site-packages\selenium\webdriver\remote\webelement.py", line 398, in find_element_by_class_name
return self.find_element(by=By.CLASS_NAME, value=name)
File "C:\Users\hp\Desktop\python projects\selenium-pycharm\venv\lib\site-packages\selenium\webdriver\remote\webelement.py", line 659, in find_element
{"using": by, "value": value})['value']
File "C:\Users\hp\Desktop\python projects\selenium-pycharm\venv\lib\site-packages\selenium\webdriver\remote\webelement.py", line 633, in _execute
return self._parent.execute(command, params)
File "C:\Users\hp\Desktop\python projects\selenium-pycharm\venv\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 321, in execute
self.error_handler.check_response(response)
File "C:\Users\hp\Desktop\python projects\selenium-pycharm\venv\lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 242, in check_response
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.NoSuchElementException: Message: no such element: Unable to locate element: {"method":"css selector","selector":".title"}
(Session info: chrome=83.0.4103.116)
Process finished with exit code 1
thanks in advance
You can not find title because you get all of the div from the resultsCol. which means some div have title and some not.
Try this :
try:
td = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "resultsCol"))
)
divs = td.find_elements_by_tag_name("div")
#print(divs)
for div in divs:
try:
header = div.find_element_by_class_name("title")
print(header.text)
except:
continue
finally:
driver.quit()
driver.quit()
which give title as output :
Receptionist
Administrative Assistant/ Document Controller
RECEPTIONIST
ADMIN OFFICER IN UAE
Data Entry Assistant (Fresh Graduate)
Receptionist
Replenishment Associate - Light Household - Hypermarket
DOCUMENT CONTROLLER
School Administrative Assistant - Dubai
ACCOUNTANT

python - Find email address on page with selenium

I am trying to get a list of email addresses from a website and am very close. The code I have can be seen below. I am getting the following error.
What happens is that there is a page of links which are then clicked on and in the following page there is an email address.
I am trying to print out the email address inside each of the pages after the link is clicked.
Here is an example of a page that the link clicks through to.
xTraceback (most recent call last): File "scrape.py", line 34, in
lookup(driver) File "scrape.py", line 26, in lookup
emailAdress = driver.find_element_by_xpath('//div[#id="widget-contact"]//a‌​').get_attribute('hr‌​ef')
File
"/usr/local/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py",
line 293, in find_element_by_xpath
return self.find_element(by=By.XPATH, value=xpath) File "/usr/local/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py",
line 752, in find_element
'value': value})['value'] File "/usr/local/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py",
line 236, in execute
self.error_handler.check_response(response) File "/usr/local/lib/python2.7/site-packages/selenium/webdriver/remote/errorhandler.py",
line 192, in check_response
raise exception_class(message, screen, stacktrace) selenium.common.exceptions.InvalidSelectorException:
I am using python 2.7.13.
# -*- coding: utf-8 -*-
from lxml import html
import requests
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def init_driver():
driver = webdriver.Firefox()
driver.wait = WebDriverWait(driver, 5)
return driver
def lookup(driver):
driver.get("http://www.sportbirmingham.org/directory?sport=&radius=15&postcode=B16+8QG&submit=Search")
try:
for link in driver.find_elements_by_xpath('//h2[#class="heading"]/a'):
link.click()
emailAdress = driver.find_element_by_xpath('//div[#id="widget-contact"]//a‌​').get_attribute('hr‌​ef')
print emailAdress
except TimeoutException:
print "not found"
if __name__ == "__main__":
driver = init_driver()
lookup(driver)
time.sleep(5)
driver.quit()
When I try and continue to the next page of links I get the following error
File "scrape.py", line 43, in
lookup(driver) File "scrape.py", line 26, in lookup
links.extend([link.get_attribute('href') for link in driver.find_elements_by_xpath('//h2[#class="heading"]/a')]) File
"/usr/local/lib/python2.7/site-packages/selenium/webdriver/remote/webelement.py",
line 139, in get_attribute
self, name) File "/usr/local/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py",
line 465, in execute_script
'args': converted_args})['value'] File "/usr/local/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py",
line 236, in execute
self.error_handler.check_response(response) File "/usr/local/lib/python2.7/site-packages/selenium/webdriver/remote/errorhandler.py",
line 192, in check_response
raise exception_class(message, screen, stacktrace) selenium.common.exceptions.StaleElementReferenceException: Message:
The element reference is stale. Either the element is no longer
attached to the DOM or the page has been refreshed.
You just need more precise X-PATH (aslo with calling text method):
emailAdress = driver.find_element_by_xpath('//div[#class="body"]/dl/dd[2]').text
But this example works with Python3. Let me know if it works for you.
I would also recommend to use "XPath Helper" extension for Chrome.
This seem to be copy/paste issue. Sometimes when you copy code from StackOverflow answers, some hidden characters might be present. Your XPath in Python shell appears like '//div[#id="widget-contact"]//a‌​??'. You should re-write it manually to get rid of those ??...
Also note that your code will not work as you stuck on the first iteration- there is no turning back to search page.
Try to use below code instead:
from selenium.common.exceptions import NoSuchElementException
def lookup(driver):
driver.get("http://www.sportbirmingham.org/directory?sport=&radius=15&postcode=B16+8QG&submit=Search")
links = [link.get_attribute('href') for link in driver.find_elements_by_xpath('//h2[#class="heading"]/a')]
page_counter = 1
while True:
try:
page_counter += 1
driver.find_element_by_link_text(str(page_counter)).click()
links.extend([link.get_attribute('href') for link in driver.find_elements_by_xpath('//h2[#class="heading"]/a')])
except NoSuchElementException:
break
try:
for link in links:
driver.get(link)
try:
emailAdress = driver.find_element_by_xpath('//div[#id="widget-contact"]//a').text
print emailAdress
except NoSuchElementException:
print "No email specified"
except TimeoutException:
print "not found"

Categories

Resources