Get text from WebElement - python

categories = driver.find_elements(By.XPATH, '//div[starts-with(#class, "item-1EEezFCx")]')
for category in categories:
try:
text = driver.find_element(By.XPATH, '//div[#text()="{category.text}"').click()
print(text)
time.sleep(2)
except ElementNotInteractableException:
pass
Here I have categories as the holder of WebElements with class names all starting with item-1EEezFCx. For each iteration I would like to access the text element of the WebElement, print and click. What can you do to access the text element please?
Full code (edited):
import os
import time
import selenium.webdriver as webdriver
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException, NoSuchElementException, ElementNotInteractableException
from bs4 import BeautifulSoup
import pandas as pd
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:100.0) Gecko/20100101 Firefox/100.0'
path = "C:\\"
FireFoxDriverPath = os.path.join(path, 'Python39', 'geckodriver.exe')
FireFoxProfile = r'C:\Users\username\AppData\Roaming\Mozilla\Firefox\Profiles\ltk7fdt2.default'
options = Options()
options.set_preference('profile', FireFoxProfile)
service = Service(r'C:\Python39\geckodriver.exe')
driver = Firefox(service=service, options=options)
url = "https://www.tradingview.com/markets/cryptocurrencies/prices-all/"
driver.get(url)
# Step 1. Toggle the active currency
currency = 'USD'
active_currency = driver.find_element(By.XPATH, '//span[contains(#class, "modeTitleActive-bJ0BPoV3")]')
if active_currency.text == currency:
pass
else:
driver.find_element(By.XPATH, '//input[#type="checkbox")]').click()
# Step 2. Import tables
xlwriter = pd.ExcelWriter('TradingView Crypto Prices.xlsx')
categories = driver.find_elements(By.XPATH, '//div[starts-with(#class, "item-1EEezFCx")]')
# Load columns one by one
for category in categories:
try:
driver.find_element(By.XPATH, category).text()
time.sleep(2)
except ElementNotInteractableException:
pass
load_more = True
while load_more:
try:
driver.find_element(By.CLASS_NAME, 'tv-load-more__btn').click()
time.sleep(1)
except ElementNotInteractableException:
load_more = False
df = pd.read_html(driver.page_source)[0]
df.to_excel(xlwriter, sheet_name=category.text, index=False)
xlwriter.save()
driver.quit()

for category in categories:
print(category.text)
category.click()

Related

Data are overwritten in DataFrame

Try to scrape the data but data are overwrite and they will give the data of only 2 page in the csv file kindly recommend any solution for that I an waiting for your response How can I fix this? is there any way then suggest me I think due to for loop they overwrite data Thank you.these is the page link https://www.askgamblers.com/online-casinos/countries/uk/
from selenium import webdriver
import time
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import pandas as pd
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from csv import writer
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
wait = WebDriverWait(driver, 20)
for page in range(1,3):
URL = 'https://www.askgamblers.com/online-casinos/countries/uk/{page}'.format(page=page)
driver.get(URL)
time.sleep(2)
urls= []
data = []
page_links =driver.find_elements(By.XPATH, "//div[#class='card__desc']//a[starts-with(#href, '/online')]")
for link in page_links:
href=link.get_attribute("href")
urls.append(href)
product=[]
for url in urls:
wev={}
driver.get(url)
time.sleep(1)
try:
title=driver.find_element(By.CSS_SELECTOR,"h1.review-intro__title").text
except:
pass
wev['Title']=title
soup = BeautifulSoup(driver.page_source,"lxml")
pays=soup.select("div#tabPayments")
for pay in pays:
try:
t1=pay.select_one(".review-details-wrapper:nth-child(1) .review-details__item:nth-child(1) .review-details__text").get_text(' ',strip=True)
except:
pass
wev['deposit_method']=t1
try:
t2=pay.select_one(".review-details-wrapper:nth-child(1) .review-details__item+ .review-details__item .review-details__text").get_text(' ',strip=True)
except:
pass
wev['curriences']=t2
try:
t3=pay.select_one(" .review-details-wrapper+ .review-details-wrapper .review-details__item:nth-child(1) .review-details__text").get_text(' ',strip=True)
except:
pass
wev['with_drawl method']=t3
try:
t4 = pay.select_one(" .review-details-wrapper+ .review-details-wrapper .review-details__item:nth-child(2) .review-details__text")
t4 = [i.replace("\n", "") for i in t4 if i.text]
except:
pass
wev['with_drawl_time']=t4
product.append(wev)
df=pd.DataFrame(product)
df.to_csv('casino.csv')
All result in 1 file :
from selenium import webdriver
import time
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import pandas as pd
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from csv import writer
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
wait = WebDriverWait(driver, 20)
product=[]
for page in range(1,4):
URL = 'https://www.askgamblers.com/online-casinos/countries/uk/{page}'.format(page=page)
driver.get(URL)
time.sleep(2)
urls= []
data = []
page_links =driver.find_elements(By.XPATH, "//div[#class='card__desc']//a[starts-with(#href, '/online')]")
for link in page_links:
href=link.get_attribute("href")
urls.append(href)
for url in urls:
wev={}
driver.get(url)
time.sleep(1)
try:
title=driver.find_element(By.CSS_SELECTOR,"h1.review-intro__title").text
except:
pass
wev['Title']=title
soup = BeautifulSoup(driver.page_source,"lxml")
pays=soup.select("div#tabPayments")
for pay in pays:
try:
t1=pay.select_one(".review-details-wrapper:nth-child(1) .review-details__item:nth-child(1) .review-details__text").get_text(' ',strip=True)
except:
pass
wev['deposit_method']=t1
try:
t2=pay.select_one(".review-details-wrapper:nth-child(1) .review-details__item+ .review-details__item .review-details__text").get_text(' ',strip=True)
except:
pass
wev['curriences']=t2
try:
t3=pay.select_one(" .review-details-wrapper+ .review-details-wrapper .review-details__item:nth-child(1) .review-details__text").get_text(' ',strip=True)
except:
pass
wev['with_drawl method']=t3
try:
t4 = pay.select_one(" .review-details-wrapper+ .review-details-wrapper .review-details__item:nth-child(2) .review-details__text")
t4 = [i.replace("\n", "") for i in t4 if i.text]
except:
pass
wev['with_drawl_time']=t4
product.append(wev)
df=pd.DataFrame(product)
df.to_csv('casino.csv')
In first loop its running only 2 times :
Change it to 1,4 as below then it will give you [1,2,3]:
for page in range(1,4):
Then data getting overwritten because output file name is same:
change file name as below:
df.to_csv(f'casino_{page}.csv')

Can't click on element. ElementNotInteractable SELENIUM

I'm trying to click on a "next page" button but an "ElementNotInteractable" error appears.
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import pandas as pd
import time
options = Options()
options.headless = False
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
options.add_argument('user-agent={0}'.format(user_agent))
options.add_argument('window-size=1920x1080')
website = 'https://www.promoopcion.com/bebidas/termos.html'
path = '/Users/diegotrigal/OneDrive/00000001_HDD_TOSHIBA/diegotrigal/PYHTON/chromedriver'
driver = webdriver.Chrome(path, options=options)
driver.get(website)
driver.maximize_window()
# driver.implicitly_wait(30)
# driver.get_screenshot_as_file("screenshot-3.png")
# pagination
pagination = driver.find_element('xpath', '//div[contains(#class, "products-list")]')
pages = pagination.find_elements_by_tag_name('li')
last_page = 8
current_page = 1
product_name = []
product_sku = []
while current_page <= last_page:
container = driver.find_element_by_xpath('//div[contains(#class, "products-list")]')
productos = container.find_elements_by_xpath('.//li')
for product in productos:
product_name.append(product.find_element_by_class_name("product-item-name").text)
product_sku.append(product.find_element_by_class_name("product-sku").text)
current_page = current_page + 1
driver.execute_script('window.scrollTo(0,2000)')
try:
next_page = driver.find_element('xpath', '//li[contains(#class, "pages-item-next")]')
next_page.click()
except:
pass
df_productos = pd.DataFrame({'Nombre': product_name, 'SKU': product_sku})
df_productos.to_csv('termos.csv', index=False)
driver.quit()
# driver.find_element_by_class_name('product-items')
# driver.find_element_by_class_name('product-item-link')
# driver.find_element_by_class_name('product-sku')
I'm trying to run it by changing the next_page's xpath to different ones but it doesn't work.
I also try the is_displayed condition (once upon the scroll window action is made) and it returns False, but is_enabled() returns True.
With this script you can get what you want, I hope the comments on code helps:
# Needed libs
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
# Initiate the driver and navigate
driver = webdriver.Chrome()
driver.maximize_window()
driver.get('https://www.promoopcion.com/bebidas/termos.html?product_list_limit=24')
# We count how many pages we have counting the number of elements that we have for pagination
number_of_pages = len(WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, "(//div[#class='pages'])[2]//a[#class='page']"))))
# For every number of page we take the titles and then click on next button to go to next page
for i in range(0,number_of_pages):
product_titles = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, "//a[#class='product-item-link']")))
for product_title in product_titles:
print(product_title.text)
next_button = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "(//a[#title='Siguiente'])[2]")))
driver.execute_script("arguments[0].scrollIntoView();", next_button)
next_button.click()
time.sleep(2)
# In this point we are in the last page, so we take the products of last page
product_titles = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, "//a[#class='product-item-link']")))
for product_title in product_titles:
print(product_title.text)

Python - get into all python dropdowns

I have a code that does the following.
Enter the website
Log in
Click on a link
Click on a date
and select an item from the dropdown.
I wanted him to do this search in the first dropdown option that would be:
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="ctl00_ctl00_Content_Content_ddlVagasTerminalEmpresa"]/option[1]'))).click()
And add it to my csv file and then do it in option 2 and option 3. How to do this?
Below is my code
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox import options
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import Select
import pandas as pd
import json
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
options = Options()
options.headless = False
dia = '{:0>2}'.format(input("Qual o dia do agendamento ? = "))
navegador = webdriver.Firefox(options = options)
wait = WebDriverWait(navegador, 30)
link = 'https://extranet.ecopatio.com.br/'
navegador.get(url = link)
inicio_str_dia = "a[title='"
final_str_dia = " de dezembro']"
diadoagenda = (inicio_str_dia+dia+final_str_dia)
wait.until(EC.element_to_be_clickable((By.ID, "ctl00_ctl00_Content_Content_txtLogin"))).send_keys('*********')
wait.until(EC.element_to_be_clickable((By.ID, "ctl00_ctl00_Content_Content_txtSenha"))).send_keys('*********')
wait.until(EC.element_to_be_clickable((By.ID, "ctl00_ctl00_Content_Content_btnEnviar"))).click()
wait.until(EC.element_to_be_clickable((By.ID, "ctl00_ctl00_Content_Content_TreeView2t8"))).click()
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, diadoagenda))).click()
wait.until(EC.element_to_be_clickable((By.ID, "ctl00_ctl00_Content_Content_ddlVagasTerminalEmpresa"))).click()
wait.until(EC.element_to_be_clickable((By.ID, "ctl00_ctl00_Content_Content_ddlVagasTerminalEmpresa"))).click()
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="ctl00_ctl00_Content_Content_ddlVagasTerminalEmpresa"]/option[3]'))).click()
wait.until(EC.presence_of_element_located((By.XPATH, '//*[#id="divScroll"]')))
sleep(3)
teste = navegador.find_element(By.XPATH, '//*[#id="divScroll"]').get_attribute('innerHTML')
soup = BeautifulSoup(teste, "html.parser")
Vagas = soup.find_all(title="Vaga disponível.")
temp=[]
for i in Vagas:
on_click = i.get('onclick')
temp.append(on_click)
texto = str(temp)
b = {'amp;': '', 'Cadastro': 'https://extranet.ecopatio.com.br/agendamento/Cadastro'}
for x,y in b.items():
texto = texto.replace(x, y)
achado2 = texto.split('\'')[1::6]
#achado2_series = pd.Series(achado2)
df = pd.DataFrame(achado2)
df.to_csv('testa.csv', mode='a', header=False, index=False)```

Scrape only 1 page I want to scrape multiple pages with selenium

I am trying to scrape multiple pages with selenium but they will scrape only 1 page what mistake I will do is there any solution then provide us this is the page link https://zoekeenadvocaat.advocatenorde.nl/zoeken?q=&type=advocaten&limiet=10&sortering=afstand&filters%5Brechtsgebieden%5D=%5B%5D&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D%5Blat%5D=52.132633&locatie%5Bgeo%5D%5Blng%5D=5.291266&locatie%5Bstraal%5D=56&locatie%5Bhash%5D=67eb2b8d0aab60ec69666532ff9527c9&weergave=lijst&pagina=1
import time
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
chrome_driver = webdriver.Chrome(
service=Service(ChromeDriverManager().install()),
options=options
)
def supplyvan_scraper():
with chrome_driver as driver:
driver.implicitly_wait(15)
URL = 'https://zoekeenadvocaat.advocatenorde.nl/zoeken?qvrtqca=&filters%5Brechtsgebieden%5D=%5B%5D&ypb=&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D=%7B%22lat%22%3A%2252.132633%22%2C%22lng%22%3A%225.291266%22%7D&locatie%5Bstraal%5D=56&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Bhash%5D='
driver.get(URL)
time.sleep(3)
page=1
page_links = [element.get_attribute('href') for element in
driver.find_elements(By.XPATH, "//span[#class='h4 no-margin-bottom']//a")]
data=[]
for link in page_links:
wev={}
driver.get(link)
time.sleep(2)
try:
title = driver.find_element(By.CSS_SELECTOR, '.title h3').text
except:
pass
wev['title']=title
try:
advocaten=driver.find_element(By.CSS_SELECTOR,".secondary").text
except:
pass
wev['advocaten']=advocaten
details=driver.find_elements(By.XPATH,"//section[#class='lawyer-info']")
for detail in details:
try:
address=detail.find_element_by_xpath("//div[#class='column medium-6']").text.strip()
except:
pass
wev['address']=address
try:
email=detail.find_element(By.XPATH, "//div[#class='row'][3]//div[#class='column small-9']//a").get_attribute('href')
except:
pass
wev['email']=email
try:
website=detail.find_element(By.XPATH, "//div[#class='row'][4]//div[#class='column small-9']//a").get_attribute('href')
except:
pass
wev['website']=website
data.append(wev)
if len(driver.find_elements_by_xpath("//a[#class='button next']")) > 0:
url = "https://zoekeenadvocaat.advocatenorde.nl/zoeken?q=&type=advocaten&limiet=10&sortering=afstand&filters%5Brechtsgebieden%5D=%5B%5D&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D%5Blat%5D=52.132633&locatie%5Bgeo%5D%5Blng%5D=5.291266&locatie%5Bstraal%5D=56&locatie%5Bhash%5D=67eb2b8d0aab60ec69666532ff9527c9&weergave=lijst&pagina={}".format(page)
driver.get(url)
page += 1
if int(page)>5:
break
else:
break
df=pd.DataFrame(data)
print(df)
You can make the pagination in starting url using for loop as follows:
import time
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
chrome_driver = webdriver.Chrome(
service=Service(ChromeDriverManager().install()),
options=options)
data=[]
def supplyvan_scraper():
with chrome_driver as driver:
driver.implicitly_wait(15)
URL = 'https://zoekeenadvocaat.advocatenorde.nl/zoeken?q=&type=advocaten&limiet=10&sortering=afstand&filters%5Brechtsgebieden%5D=%5B%5D&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D%5Blat%5D=52.132633&locatie%5Bgeo%5D%5Blng%5D=5.291266&locatie%5Bstraal%5D=56&locatie%5Bhash%5D=67eb2b8d0aab60ec69666532ff9527c9&weergave=lijst&pagina={page}'
for page in range(1,11):
driver.get(URL.format(page=page))
time.sleep(3)
page_links = [element.get_attribute('href') for element in driver.find_elements(By.XPATH, "//span[#class='h4 no-margin-bottom']//a")]
for link in page_links:
wev={}
driver.get(link)
time.sleep(2)
try:
title = driver.find_element(By.CSS_SELECTOR, '.title h3').text
except:
pass
wev['title']=title
try:
advocaten=driver.find_element(By.CSS_SELECTOR,".secondary").text
except:
pass
wev['advocaten']=advocaten
details=driver.find_elements(By.XPATH,"//section[#class='lawyer-info']")
for detail in details:
try:
address=detail.find_element_by_xpath("//div[#class='column medium-6']").text.strip()
except:
pass
wev['address']=address
try:
email=detail.find_element(By.XPATH, "//div[#class='row'][3]//div[#class='column small-9']//a").get_attribute('href')
except:
pass
wev['email']=email
try:
website=detail.find_element(By.XPATH, "//div[#class='row'][4]//div[#class='column small-9']//a").get_attribute('href')
except:
pass
wev['website']=website
data.append(wev)
df=pd.DataFrame(data)
print(df)
You also can try:
URL = 'https://zoekeenadvocaat.advocatenorde.nl/zoeken?q=&type=advocaten&limiet=10&sortering=afstand&filters%5Brechtsgebieden%5D=%5B%5D&filters%5Bspecialisatie%5D=0&filters%5Btoevoegingen%5D=0&locatie%5Badres%5D=Holland&locatie%5Bgeo%5D%5Blat%5D=52.132633&locatie%5Bgeo%5D%5Blng%5D=5.291266&locatie%5Bstraal%5D=56&locatie%5Bhash%5D=67eb2b8d0aab60ec69666532ff9527c9&weergave=lijst&pagina={page}'
for page in range(1,11):
url=URL.format(page=page)
driver.get(url)

Selenium button not being clicked but is being highlighted

What I'm trying to do is making nike product auto buyer the problem is after selecting size it doesn't let me click through selenium I even tried to click manually but nothing pops up this is my code where I try to click (not full code):
from selenium import webdriver
from selenium.common.exceptions import JavascriptException
from selenium.webdriver import ChromeOptions
import re
from bs4 import BeautifulSoup
import requests
import json
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
user = os.environ['USERNAME']
snkrsurl = "https://www.nike.com/t/air-zoom-pegasus-38-womens-running-shoe-wide-gg8GBK/CW7358-500" #input("Please input your SNKRS url \n")
size = float(input("Please input size \n"))
options = ChromeOptions()
options.add_experimental_option('excludeSwitches',['enable-logging'])
options.add_experimental_option("useAutomationExtension", False)
options.add_experimental_option("detach",True)
options.add_argument("--disable-notifications")
chrome = webdriver.Chrome(options=options)
if "https://" in snkrsurl:
pass
elif "http://" in snkrsurl:
pass
else:
snkrsurl = "http://"+snkrsurl
chrome.get(snkrsurl)
with requests.Session() as session:
soup = BeautifulSoup(session.get(snkrsurl).text, features="lxml")
script = soup.find("script", string=re.compile('INITIAL_REDUX_STATE')).string
redux = json.loads(script[script.find('{'):-1])
products = redux["Threads"]["products"]
wait = WebDriverWait(chrome, 15)
def step1(i,v):
for key, product in products.items():
if float(product["skus"][i]["nikeSize"]) == v:
print("Found")
if v.is_integer():
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="gen-nav-footer"]/nav/button'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()='{}']".format(int(v))))).click()
chrome.execute_script("window.scroll(0,609)")
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[text()="Add to Bag"]'))).click()
break
else:
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="gen-nav-footer"]/nav/button'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()='{}']".format(v)))).click()
e = chrome.find_element_by_css_selector("#floating-atc-wrapper > div > button.ncss-btn-primary-dark.btn-lg.add-to-cart-btn")
chrome.execute_script("arguments[0].scrollIntoView(true);")
e.click()
break
else:
pass
for i,v in products.items():
global length
length = len(v['skus'])
break
for i in range(length):
length -=1
step1(length,size)
I use window.scroll to go to that element because if I don't it throws error saying element is not interactable and yes checkout is being only clickable from real chrome.
Thanks

Categories

Resources