Absolute Path: Selenium Xpath Can't Identify Element - python

I am attempting to identify an HTML Button with Xpath and have attempted both the relative and absolute Xpath without success. I am attempting to click the button.
The relative path:
click = webdriver.find.element_by_xpath("//onboarding-mobile-fixed-bottom-container/div[1]/div/sprout-button/button").click()
Absolute path: /html/body/cfapp-root/main/cfapp-spa-host/main/onboarding-root/div/div[1]/main/onboarding-business-phone/section/form/onboarding-next-button/onboarding-mobile-fixed-bottom-container/div[2]/sprout-button/button
absolute = webdriver.find.element_by_xpath("/html/body/cfapp-root/main/cfapp-spa-host/main/onboarding-root/div/div[1]/main/onboarding-business-phone/section/form/onboarding-next-button/onboarding-mobile-fixed-bottom-container/div[2]/sprout-button/button").click()
Even when using the absolute xpath (I know, frowned upon practice) I can't get the button to click.
For reference, I am automating: site: https://account.kabbage.com/onboarding/data/number-of-employees; Username: testingoverflow#aol.com; Pw: Kabbage123
(click finish applying; finish applying; working on the continue box)
Any help is much appreciated!!
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep, strftime
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import csv
import xlrd
info = "info.xlsx"
openwb = xlrd.open_workbook(info)
inputws = openwb.sheet_by_index(0)
print(inputws.nrows)
print(inputws.ncols)
print(inputws.cell_value(1,0))
email_log = inputws.cell_value(2,0)
businesslog = inputws.cell_value(2,1)
firstname = inputws.cell_value(2,2)
lastname = inputws.cell_value(2,3)
phone = int(inputws.cell_value(2,4))
employees = int(inputws.cell_value(2,5))
business_types = inputws.cell_value(2,6)
print(email_log)
print(businesslog)
print(firstname)
print(lastname)
print(phone)
sleep(1)
chromedriver_path = 'C:/Users/Documents/Scale/Programs/chromedriver.exe'
webdriver = webdriver.Chrome(executable_path=chromedriver_path)
webdriver.get('https://app.kabbage.com/signup/create_account')
sleep(1)
#input email
input_emails = webdriver.find_element_by_xpath('//*[#id="CreateAccount.EmailAddress_inner"]').send_keys(email_log)
sleep(1)
#re-input email
reinput = webdriver.find_element_by_xpath('//*[#id="CreateAccount.ConfirmEmail_inner"]').send_keys(email_log)
# Password
passwrd = webdriver.find_element_by_xpath('//*[#id="CreateAccount.CreatePassword"]')
sleep(1)
passwrd.send_keys('Paycheck11!!')
sleep(1)
button_started = webdriver.find_element_by_class_name("btn-full-width").click()
sleep(5)
#ApplyNow
#apply = webdriver.find_element_by_class_name('spr-btn spr-btn-primary')
#apply = webdriver.find_elements_by_class_name("spr-btn-primary").click()
#xpath("//div[#class='fc-day-content' and text()='15']")
applynow = webdriver.find_element_by_xpath("//sprout-button/button[contains(#class, 'spr-btn-primary')]").click()
sleep(5)
applyfinal = webdriver.find_element_by_xpath("//sprout-button/button[contains(#class, 'spr-btn-primary')]").click()
sleep(5)
business_name = webdriver.find_element_by_xpath('//*[#id="businessName-input"]').send_keys(businesslog)
business_send = webdriver.find_element_by_xpath("/html/body/cfapp-root/main/cfapp-spa-host/main/onboarding-root/div/div[1]/main/onboarding-business-name/section/form/onboarding-next-button/onboarding-mobile-fixed-bottom-container/div[2]/sprout-button/button").click()
sleep(5)
first_name = webdriver.find_element_by_xpath('//*[#id="lastName-input"]').send_keys(lastname)
last_name = webdriver.find_element_by_xpath('//*[#id="firstName-input"]').send_keys(firstname)
names_send = webdriver.find_element_by_xpath("/html/body/cfapp-root/main/cfapp-spa-host/main/onboarding-root/div/div[1]/main/onboarding-personal-name/section/form/onboarding-next-button/onboarding-mobile-fixed-bottom-container/div[2]/sprout-button/button").click()
sleep(5)
phone_num = webdriver.find_element_by_xpath('//*[#id="businessPhone-input"]').send_keys(phone)
phone_check = webdriver.find_element_by_xpath('//html/body/cfapp-root/main/cfapp-spa-host/main/onboarding-root/div/div[1]/main/onboarding-business-phone/section/form/kbg-consent-box/div/sprout-checkbox/div/label').click()
#phone_send = names_send = webdriver.find_element_by_xpath("/html/body/cfapp-root/main/cfapp-spa-host/main/onboarding-root/div/div[1]/main/onboarding-personal-name/section/form/onboarding-next-button/onboarding-mobile-fixed-bottom-container/div[2]/sprout-button/button").click()
phone_submits = webdriver.find_element_by_xpath("/html/body/cfapp-root/main/cfapp-spa-host/main/onboarding-root/div/div[1]/main/onboarding-business-phone/section/form/onboarding-next-button/onboarding-mobile-fixed-bottom-container/div[2]/sprout-button/button").click()
sleep(5)
num_empl = webdriver.find_element_by_xpath('//*[#id="numberOfEmployees-input"]').send_keys(employees)
#emp_submit = webdriver.find_element_by_xpath("//sprout-button/button[contains(#class, 'spr-btn-block')][2]").click()
sending = webdriver.find.element_by_xpath("//button[#class='spr-btn spr-btn-primary' and contains(text(),'Continue')]").click()

You can use any of these Xpaths:
Correct relative XPath for Continue button
Xpath 1:
*//onboarding-next-button//onboarding-mobile-fixed-bottom-container//div[2]//sprout-button//button[contains(text(),Continue)]
Xpath 2:
*//sprout-button[#class='desktop-button']//button[contains(text(),Continue)]

Give this a go:
webdriver.find_element_by_xpath("//button[#class="spr-btn spr-btn-primary" and contains(text(),'Continue')]").click()

Related

Accessing an element for scraping data

I want to access the highlighted element. This is part of the html to access the sub comments section in 9gag website. I'm using this meme https://9gag.com/gag/a5EAv9O as an example input for the program.
I used the following code to access but it doesn't work.
sub_com_html = item.find_element(By.CSS_SELECTOR, '//*/div/section/section[2]').Get_attribute("innerHTML")
Edit:
I'm able to access the section now and print some subcomments. Thanks to #Arundeep Chohan for correcting my silly mistake. But there's an issue. It’s accessing the sub comments section but its repeating the sub comments for different main comments. This screenshot is part of the output with main comment and sub comments as list. You can see that it’s repeating same data which is wrong. It's also giving the sub comments for only a few of the comments and skipping the rest. Theoretically it should work fine but I don't understand what's going wrong here.
This is the whole code I'm working with now. The goal is to scrape all the comments and sub comments of a meme.
import csv
from email.mime import image
from re import T
from tkinter import SCROLL, Image
from unittest import result
import instanceof as instanceof
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
import undetected_chromedriver as uc
if __name__ == '__main__':
options = Options()
# options.headless = True
driver = uc.Chrome(service=Service(ChromeDriverManager().install()), options=options)
driver.maximize_window()
driver.get("https://9gag.com/gag/a5EAv9O")
time.sleep(5)
# click on I accept cookies
actions = ActionChains(driver)
try:
consent_button = driver.find_element(By.XPATH, '//*[#id="qc-cmp2-ui"]/div[2]/div/button[2]')
actions.move_to_element(consent_button).click().perform()
except:
pass
for i in range(31):
actions.click()
actions.send_keys(Keys.ARROW_DOWN).perform()
time.sleep(4)
# click on fresh comments section
fresh_comments = driver.find_element(By.XPATH, '//*[#id="page"]/div[1]/section[2]/section/header/div/button[2]')
actions.move_to_element(fresh_comments).click(on_element=fresh_comments).perform()
time.sleep(5)
# click on lood more comments button to load all the comments
fresh_comments = driver.find_element(By.CSS_SELECTOR, '.comment-list__load-more')
actions.move_to_element(fresh_comments).click(on_element=fresh_comments).perform()
miN = 1000
results = []
comments = {}
while miN <= 20000:
window = 'window.scrollTo(0,' + str(miN) + ')'
driver.execute_script(window)
time.sleep(3)
# Dealing with all comments
try:
# Scrape the main comments
try:
All_comments = driver.find_elements(By.CSS_SELECTOR, "div.vue-recycle-scroller__item-view")
except:
All_comments = driver.find_elements(By.CSS_SELECTOR, "div.vue-recycle-scroller__item-view")
del_comm_cnt = 1
for item in All_comments:
try:
html = item.get_attribute("innerHTML")
if "comment-list-item__text" in html:
comment = item.find_element(By.CSS_SELECTOR, "div.comment-list-item__text").text
elif "comment-list-item__deleted-text" in html:
comment = item.find_element(By.CSS_SELECTOR, "div.comment-list-item__deleted-text").text
comment = comment + str(del_comm_cnt)
del_comm_cnt += 1
if(comments.get(comment) == None):
sub_coms_list = []
comments[comment] = ""
# get sub comments
if "comment-list-item__replies" in html:
# item.find_element(By.CSS_SELECTOR, "div.comment-list-item__replies").click()
sub_comments = item.find_element(By.CSS_SELECTOR, "div.comment-list-item__replies")
actions.move_to_element(sub_comments).click(on_element=sub_comments).perform()
sub_com_section = item.find_element(By.XPATH, '//*/div/section/section[2]')
sub_com_html = sub_com_section.get_attribute("innerHTML")
#sub_coms = sub_com_section.find_elements(By.CSS_SELECTOR, "section.comment-list-item__wrapper comment-list-item__wrapper_reply")
sub_coms = sub_com_section.find_elements(By.CSS_SELECTOR, "div.comment-list-item__text")
for com in sub_coms:
sub_coms_list.append(com.text)
comments[comment] = sub_coms_list
except:
pass
except:
pass
miN = miN + 1500
driver.quit()
for i in comments:
print(i, "\n", comments[i], "\n\n")

How do I press load more button while scraping comments on Instagram with Selenium Python

I'm working on a project that can scrape comments off posts on instagram and write them into an excel file.
Here's my code:
from selenium.webdriver.common.by import By
from selenium import webdriver
import time
import sys
import pandas as pd
from pandas import ExcelWriter
import os.path
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
url = [
"https://www.instagram.com/p/CcVTqRtJ2gj/",
"https://www.instagram.com/p/CcXpLHepve-/",
]
user_names = []
user_comments = []
driver = driver = webdriver.Chrome("C:\chromedriver.exe")
driver.get(url[0])
time.sleep(3)
username = WebDriverWait(driver, 30).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input[name='username']")))
password = WebDriverWait(driver, 30).until(EC.element_to_be_clickable((By.CSS_SELECTOR,"input[name='password']")))
username.clear()
username.send_keys("username")
password.clear()
password.send_keys("pwd")
Login_button = (
WebDriverWait(driver, 2)
.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "button[type='submit']")))
.click()
)
time.sleep(4)
not_now = (
WebDriverWait(driver, 30)
.until(
EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), "Not Now")]'))
)
.click()
)
for n in url:
try:
driver.get(n)
time.sleep(3)
load_more_comment = driver.find_element_by_xpath("//button[class='wpO6b ']")
print("Found {}".format(str(load_more_comment)))
i = 0
while load_more_comment.is_displayed() and i < 10:
load_more_comment.click()
time.sleep(1.5)
load_more_comment = driver.find_element_by_xpath(
"//button[class='wpO6b ']"
)
print("Found {}".format(str(load_more_comment)))
i += 1
user_names.pop(0)
user_comments.pop(0)
except Exception as e:
print(e)
pass
comment = driver.find_elements_by_class_name("gElp9 ")
for c in comment:
container = c.find_element_by_class_name("C4VMK")
name = container.find_element_by_class_name("_6lAjh ").text
content = container.find_element_by_class_name("MOdxS ").text
content = content.replace("\n", " ").strip().rstrip()
user_names.append(name)
user_comments.append(content)
print(content)
user_names.pop(0)
user_comments.pop(0)
# export(user_names, user_comments)
driver.close()
df = pd.DataFrame(list(zip(user_names, user_comments)), columns=["Name", "Comments"])
# df.to_excel("Anime Content Engagement.xlsx")
print(df)
And the load-more-comments part, doesn't seem to work.
Since there are more than one buttons with the same class name, I"m not able to choose the right button to click on. And I'm a beginner so if there's anyone with any solution to how I can solve this it would be great.
you can select by aria-label text:
driver.find_element_by_css_selector("svg._8-yf5[aria-label='TEXT']")
i believe the text inside changes according to instagram language, put it according to what appears on your

Having Trouble Clicking in Date Field with Selenium

I'm trying to scrape a table from the 1/30/2022 slate. However, I get the 'unable to locate element' error when I attempt to click in the date field and change the date from 2/6 to 1/30. I've tried finding by class name as well. Is there another way to do this, or is there something I'm doing wrong?
from ast import Return
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
import time
path = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(path)
driver.get('https://rotogrinders.com/resultsdb/nfl')
time.sleep(5)
driver.maximize_window()
time.sleep(10)
search = driver.find_element_by_xpath('//*[#id="navbar-demo1-mobile"]/div[1]/div/span/div')
search.click()
previous = driver.find_element_by_class_name('react-datepicker__navigation react-datepicker__navigation--previous')
previous.click()
time.sleep(5)
date = driver.find_element_by_class_name('react-datepicker__day react-datepicker__day--030
react-datepicker__day--weekend')
date.click()
You are not able to find it because it is inside an iframe. You have to switch to iframe window first, and then try to access the element.
Also, I see that the date picker has enabled to key in the dates, so you could use send_keys to type in the date. It makes your code a little easier on you perhaps. But you may write to click on the date picker ui. It's your choice per se.
Having said that here is the code:
driver.get("https://rotogrinders.com/resultsdb/nfl")
time.sleep(10)
frame = driver.find_element(By.XPATH, "//iframe")
driver.switch_to.frame(frame)
date_picker = WebDriverWait(driver, 30).until(EC.visibility_of_element_located((By.XPATH, "//div[#class='react-datepicker__input-container']//input")))
date_picker.send_keys("01/16/2022")
time.sleep(10)
Try to change the time.sleep to explicit wait if possible (webdriverwait)
wait=WebDriverWait(driver,60)
driver.get('https://rotogrinders.com/resultsdb/nfl')
wait.until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,"//iframe")))
date = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'.react-datepicker__input-container input')))
date.send_keys("01/16/2022")
First wait for the iframe and then proceed to click the search element and then send keys.
Import:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
It might be possible to avoid Selenium here. It's just a matter of pulling out some id's to feed into the direct url.
import requests
import datetime
import pandas as pd
dateStr = input('Enter date (YYYY-MM-DD): ')
dateStr_alpha = datetime.datetime.strptime(dateStr, '%Y-%M-%d').strftime('%Y%M%d')
url = f'https://service.fantasylabs.com/contest-sources/?sport_id=1&date={dateStr}'
jsonData = requests.get(url).json()
groupId = jsonData['contest-sources'][0]['draft_groups'][0]['id']
url = f'https://service.fantasylabs.com/live-contests/?sport=NFL&contest_group_id={groupId}'
jsonData = requests.get(url).json()
tables = {}
for each in jsonData['live_contests']:
contestId = each['contest_id']
if each['contest_name'] not in tables.keys():
tables[each['contest_name']] = {}
url = f'https://dh5nxc6yx3kwy.cloudfront.net/contests/nfl/{dateStr_alpha}/{contestId}/data/'
jsonData = requests.get(url).json()
contestUsers = pd.DataFrame(jsonData['users']).T.reset_index(drop=True)
tables[each['contest_name']]['users'] = contestUsers
fieldExposures = pd.DataFrame(jsonData['players']).T
for k, v in jsonData['exposures'].items():
exposureDf = pd.DataFrame(v['exposureCounts']).T
exposureDf.columns = [x + f'_top_{k}%' for x in exposureDf.columns]
fieldExposures = pd.merge(fieldExposures, exposureDf, how='left', left_index=True, right_index=True )
fieldExposures = fieldExposures.fillna(0).reset_index(drop=True)
tables[each['contest_name']]['exposures'] = fieldExposures
print('****** ' + each['contest_name'] + ' ******')
print(contestUsers,fieldExposures )
Output:
Now just call the table by its contest name:
print(tables['NFL $100K Conference Special [$20K to 1st]'])

Python Selenium: Changing from three loops to one loop repeat the same information

I am extracting google reviews of a resturant. I am interested in extracting reviewer name, rating given by reviewer, and text of the review. I used following code for the extraction:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
import time
driver = webdriver.Chrome('')
base_url = 'https://www.google.com/search?tbs=lf:1,lf_ui:9&tbm=lcl&sxsrf=AOaemvJFjYToqQmQGGnZUovsXC1CObNK1g:1633336974491&q=10+famous+restaurants+in+Dunedin&rflfq=1&num=10&sa=X&ved=2ahUKEwiTsqaxrrDzAhXe4zgGHZPODcoQjGp6BAgKEGo&biw=1280&bih=557&dpr=2#lrd=0xa82eac0dc8bdbb4b:0x4fc9070ad0f2ac70,1,,,&rlfi=hd:;si:5749134142351780976,l,CiAxMCBmYW1vdXMgcmVzdGF1cmFudHMgaW4gRHVuZWRpbiJDUjEvZ2VvL3R5cGUvZXN0YWJsaXNobWVudF9wb2kvcG9wdWxhcl93aXRoX3RvdXJpc3Rz2gENCgcI5Q8QChgFEgIIFkiDlJ7y7YCAgAhaMhAAEAEQAhgCGAQiIDEwIGZhbW91cyByZXN0YXVyYW50cyBpbiBkdW5lZGluKgQIAxACkgESaXRhbGlhbl9yZXN0YXVyYW50mgEkQ2hkRFNVaE5NRzluUzBWSlEwRm5TVU56ZW5WaFVsOUJSUkFCqgEMEAEqCCIEZm9vZCgA,y,2qOYUvKQ1C8;mv:[[-45.8349553,170.6616387],[-45.9156414,170.4803685]]'
driver.get(base_url)
WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH,"//div[./span[text()='Newest']]"))).click()
total_reviews_text =driver.find_element_by_xpath("//div[#class='review-score-container']//div//div//span//span[#class='z5jxId']").text
num_reviews = int (total_reviews_text.split()[0])
all_reviews = WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'div.gws-localreviews__google-review')))
time.sleep(2)
total_reviews = len(all_reviews)
while total_reviews < num_reviews:
driver.execute_script('arguments[0].scrollIntoView(true);', all_reviews[-1])
WebDriverWait(driver, 5, 0.25).until_not(EC.presence_of_element_located((By.CSS_SELECTOR, 'div[class$="activityIndicator"]')))
#all_reviews = driver.find_elements_by_css_selector('div.gws-localreviews__google-review')
time.sleep(5)
all_reviews = WebDriverWait(driver, 5).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'div.gws-localreviews__google-review')))
print(total_reviews)
total_reviews +=5
person_info = driver.find_elements_by_xpath("//div[#id='reviewSort']//div[contains(#class,'google-review')]")
rating_info = driver.find_elements_by_xpath("//div[#class='PuaHbe']")
review_text = driver.find_elements_by_xpath("//div[#class='Jtu6Td']")
for person in person_info:
name = person.find_element_by_xpath("./div/div/div/a").text
print(name)
for rating in rating_info:
rating_txt = person.find_element_by_xpath("./g-review-stars/span").get_attribute('aria-label')
print(rating_txt)
for text in review_text:
texts = text.find_element_by_xpath("./span").text
print(texts)
The above code worked as per expectations. I want to make slight change in above code. Instead of using three loops to display name, rating, and review_text. I wanted to extract the same information using one loop. So I made following changes in the above code:
reviews_info = driver.find_elements_by_xpath("//div[#class='jxjCjc']")
for review_info in reviews_info:
name = review_info.find_element_by_xpath("./div/div/a").text
rating = review_info.find_element_by_xpath("//div[#class='PuaHbe']//g-review-stars//span").get_attribute('aria-label')
text = review_info.find_element_by_xpath("//div[#class='Jtu6Td']//span").text
print(name)
print(rating)
print(text)
print()
The problem with a change in code is that it displays the same information (i.e. rating and text) for all reviewers names. I am not sure where am I making the mistake. Any help to fix the issue would be really appreciated.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
driver = webdriver.Chrome()
base_url = 'https://www.google.com/search?tbs=lf:1,lf_ui:9&tbm=lcl&sxsrf=AOaemvJFjYToqQmQGGnZUovsXC1CObNK1g:1633336974491&q=10+famous+restaurants+in+Dunedin&rflfq=1&num=10&sa=X&ved=2ahUKEwiTsqaxrrDzAhXe4zgGHZPODcoQjGp6BAgKEGo&biw=1280&bih=557&dpr=2#lrd=0xa82eac0dc8bdbb4b:0x4fc9070ad0f2ac70,1,,,&rlfi=hd:;si:5749134142351780976,l,CiAxMCBmYW1vdXMgcmVzdGF1cmFudHMgaW4gRHVuZWRpbiJDUjEvZ2VvL3R5cGUvZXN0YWJsaXNobWVudF9wb2kvcG9wdWxhcl93aXRoX3RvdXJpc3Rz2gENCgcI5Q8QChgFEgIIFkiDlJ7y7YCAgAhaMhAAEAEQAhgCGAQiIDEwIGZhbW91cyByZXN0YXVyYW50cyBpbiBkdW5lZGluKgQIAxACkgESaXRhbGlhbl9yZXN0YXVyYW50mgEkQ2hkRFNVaE5NRzluUzBWSlEwRm5TVU56ZW5WaFVsOUJSUkFCqgEMEAEqCCIEZm9vZCgA,y,2qOYUvKQ1C8;mv:[[-45.8349553,170.6616387],[-45.9156414,170.4803685]]'
driver.get(base_url)
WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH,"//div[./span[text()='Newest']]"))).click()
total_reviews_text =driver.find_element_by_xpath("//div[#class='review-score-container']//div//div//span//span[#class='z5jxId']").text
num_reviews = int (total_reviews_text.split()[0])
print("NUm reviews=", num_reviews)
all_reviews = WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'div.gws-localreviews__google-review')))
time.sleep(2)
total_reviews = len(all_reviews)
print("Total reviews=", total_reviews)
s = "(//div[#id='reviewSort']//div[contains(#class,'google-review')])[0]"
b = '0'
a = 1 # Index of Review button
for i in range(10):
c = str(a)
s = s.replace(b, c) # Updating Xpath's index in every loop so that it can focus on new review everytime.
b = str(a)
a = a + 1
WebDriverWait(driver, 5, 0.25).until_not(EC.presence_of_element_located((By.CSS_SELECTOR, 'div[class$="activityIndicator"]')))
time.sleep(5)
all_reviews = WebDriverWait(driver, 5).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'div.gws-localreviews__google-review')))
total_reviews +=1
Info = driver.find_element_by_xpath(s).text
print(Info)
print("<------------------------------------------------------>\n\n")
Output:-
Click Here to See Program Output

Selenium button not being clicked but is being highlighted

What I'm trying to do is making nike product auto buyer the problem is after selecting size it doesn't let me click through selenium I even tried to click manually but nothing pops up this is my code where I try to click (not full code):
from selenium import webdriver
from selenium.common.exceptions import JavascriptException
from selenium.webdriver import ChromeOptions
import re
from bs4 import BeautifulSoup
import requests
import json
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
user = os.environ['USERNAME']
snkrsurl = "https://www.nike.com/t/air-zoom-pegasus-38-womens-running-shoe-wide-gg8GBK/CW7358-500" #input("Please input your SNKRS url \n")
size = float(input("Please input size \n"))
options = ChromeOptions()
options.add_experimental_option('excludeSwitches',['enable-logging'])
options.add_experimental_option("useAutomationExtension", False)
options.add_experimental_option("detach",True)
options.add_argument("--disable-notifications")
chrome = webdriver.Chrome(options=options)
if "https://" in snkrsurl:
pass
elif "http://" in snkrsurl:
pass
else:
snkrsurl = "http://"+snkrsurl
chrome.get(snkrsurl)
with requests.Session() as session:
soup = BeautifulSoup(session.get(snkrsurl).text, features="lxml")
script = soup.find("script", string=re.compile('INITIAL_REDUX_STATE')).string
redux = json.loads(script[script.find('{'):-1])
products = redux["Threads"]["products"]
wait = WebDriverWait(chrome, 15)
def step1(i,v):
for key, product in products.items():
if float(product["skus"][i]["nikeSize"]) == v:
print("Found")
if v.is_integer():
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="gen-nav-footer"]/nav/button'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()='{}']".format(int(v))))).click()
chrome.execute_script("window.scroll(0,609)")
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[text()="Add to Bag"]'))).click()
break
else:
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="gen-nav-footer"]/nav/button'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()='{}']".format(v)))).click()
e = chrome.find_element_by_css_selector("#floating-atc-wrapper > div > button.ncss-btn-primary-dark.btn-lg.add-to-cart-btn")
chrome.execute_script("arguments[0].scrollIntoView(true);")
e.click()
break
else:
pass
for i,v in products.items():
global length
length = len(v['skus'])
break
for i in range(length):
length -=1
step1(length,size)
I use window.scroll to go to that element because if I don't it throws error saying element is not interactable and yes checkout is being only clickable from real chrome.
Thanks

Categories

Resources