I am using pytest.mark.parametrize for data driven testing. Now when I am generating the html report, the test case name is coming like below which includes all the parameters(data). My goal is to capture only the test case name like "test_RSA_Health" and remove all additional details from the "Test" column of the report. Is it possible?
My Code:
conftest
import time
import allure
import pytest
from allure_commons.types import AttachmentType
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from Utilities.filepath import *
#pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
test_fn = item.obj
docstring = getattr(test_fn, '__doc__')
if docstring:
rep.nodeid = docstring
return rep
#pytest.fixture(scope="function")
def selenium_driver(request):
chrome_options = Options()
chrome_options.add_argument("--headless")
# chrome_options.add_argument("--window-size=1920,1080")
# chrome_options.add_argument('--start-maximized')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument("--disable-extensions")
# chrome_options.add_argument('disable-infobars')
s = Service("C:\\Users\\aprat\\OneDrive\\Desktop\\selenium\\chromedriver98\\chromedriver.exe")
url = "https:test.com"
driver = webdriver.Chrome(service=s, options=chrome_options)
driver.maximize_window()
driver.set_window_size(1200, 600)
driver.get(url)
driver.find_element(By.NAME, "user_name").send_keys("9998887776")
driver.find_element(By.NAME, "password_name").send_keys("qwerty123")
driver.find_element(By.XPATH, "//button[#type= 'submit']").click()
time.sleep(3)
request.cls.driver = driver
yield driver
driver.close()
#pytest.fixture()
def log_on_failure(request, selenium_driver):
yield
item = request.node
driver = selenium_driver
if item.rep_call.failed:
allure.attach(driver.get_screenshot_as_png(), name="screenshot", attachment_type=AttachmentType.PNG)
test script:
import time
import pytest
from Pages.HomePage import HomePage
from TestCases.BaseTest import BaseTest
from Utilities import dataProvider
class Test_RSA_Health(BaseTest):
#pytest.mark.parametrize("pin,sumvalue,mobileno,selfage,fullname,email,firstname,lastname,dob,income,pan,designation,add1,add2,height,weight,nomfirstname,nomlastname,nomdob", dataProvider.get_data("rsa_health"))
def test_RSA_Health(self,pin,sumvalue,mobileno,selfage,fullname,email,firstname,lastname,dob,income,pan,designation,add1,add2,height,weight,nomfirstname,nomlastname,nomdob):
home = HomePage(self.driver)
healthinsuranepage = home.SelectHealth()
self.VerifyPresence_PinCodeTextBox()
healthinsuranepage.landing_page()
healthinsuranepage.InputPin(pin)
healthinsuranepage.SelectSum(str(sumvalue))
healthinsuranepage.InputMobileNo(mobileno)
insureddetailspage = healthinsuranepage.ClickNext()
self.VerifyPresence_SelfCheckBox()
insureddetailspage.landing_page()
insureddetailspage.SelectMemberSelf()
self.VerifyPresence_SelfAgeTextBox()
insureddetailspage.InputAge(selfage)
time.sleep(2)
quotespage = insureddetailspage.ClickNext()
time.sleep(5)
quotespage.landing_page()
quotespage.ShareQuotes()
time.sleep(3)
quotespage.SelectAllQuotes()
time.sleep(2)
quotespage.ClickNext1()
self.VerifyPresence_NameTextBox()
quotespage.InputName(fullname)
quotespage.InputEmail(email)
quotespage.InputMobileNo(mobileno)
time.sleep(2)
quotespage.ClickSubmit()
time.sleep(2)
self.VerifyPresence_CloseButton()
time.sleep(2)
quotespage.ClickCloseButton()
time.sleep(2)
policydetailspage = quotespage.RSAPlanSelect()
time.sleep(3)
propdetailspage = policydetailspage.ConfirmTenure()
policydetailspage.landing_page()
self.VerifyPresence_FirstNameTextBox()
propdetailspage.landing_page()
propdetailspage.InputFirstName(firstname)
propdetailspage.InputLastName(lastname)
propdetailspage.InputDOB(dob)
propdetailspage.SelectPropGender()
propdetailspage.InputEmailId(email)
propdetailspage.InputContactNo(mobileno)
propdetailspage.InputIncome(income)
propdetailspage.InputPANCard(pan)
propdetailspage.SelectOccupationDropdown()
self.VerifyPresence_SelectOccupationOption()
propdetailspage.SelectOccupation()
propdetailspage.InputDesignation(designation)
propdetailspage.SelectMaritalStatusDropdown()
self.VerifyPresence_MaritalStatusOption()
propdetailspage.SelectMaritalStatus()
propdetailspage.SelectEducationDropdown()
self.VerifyPresence_QualificationOption()
propdetailspage.SelectQualification()
propdetailspage.SelectTPANameDropdown()
self.VerifyPresence_TPANameOption()
propdetailspage.SelectTPA()
propdetailspage.InputAdd1(add1)
propdetailspage.InputAdd2(add2)
selfdetailspage = propdetailspage.ClickNext()
self.VerifyPresence_SelfFirstNameTextBox()
selfdetailspage.landing_page()
selfdetailspage.InputSelfFirstName(firstname)
selfdetailspage.InputSelfLastName(lastname)
selfdetailspage.InputSelfDOB(dob)
selfdetailspage.SelectSelfGender()
selfdetailspage.InputSelfHeight(height)
selfdetailspage.InputSelfWeight(weight)
selfdetailspage.InputSelfDesignation(designation)
selfdetailspage.InputNomineeFName(nomfirstname)
selfdetailspage.InputNomineeLName(nomlastname)
selfdetailspage.InputNomineeDOB(nomdob)
selfdetailspage.SelectNomineeGender()
selfdetailspage.SelectNomineeRltnDropdown()
self.VerifyPresence_NomRelationOption()
selfdetailspage.SelectNomineeRelation()
questionariespage = selfdetailspage.ClickNext()
time.sleep(4)
questionariespage.landing_page()
policyreviewpage = questionariespage.ClickNext()
self.VerifyPresence_NameValidationText()
policyreviewpage.landing_page()
proposer_name = policyreviewpage.GetName()
proposer_email = policyreviewpage.GetEmail()
proposer_mobno = policyreviewpage.GetPhoneNo()
try:
assert proposer_name == fullname
assert proposer_email == email
assert int(proposer_mobno) == mobileno
except Exception as e:
raise e
policyreviewpage.FinalSubmit()
self.VerifyPresence_ShareButton()
policyreviewpage.SharePolicy()
Like this:
test_details = [{'pin': 444, 'sumvalue': 444,.....}]
def pytest_generate_tests(metafunc):
if 'test_data' in metafunc.fixturenames:
metafunc.parametrize("test_data", test_details)
def test_RSA_Health(self,test_data)
pin = test_data['pin']
Or like this:
test_details = [{'pin': 444, 'sumvalue': 444,.....}]
#pytest.mark.parametrize("test_data", [(test_details)]
def test_RSA_Health(self,test_data)
pin = test_data['pin']
Related
Hi I am trying to write a sign-up bot for UEFA.com using Selenium as requests I find to be too difficult for me to try so I am just working on automating the sign-up process even if it is a lot slower.
I am able to get to the final stage where I click on Create an Account, but faced with a reCaptcha which only appears after clicking on Create an Account. And after solving the captcha there is no 'Submit' button but it will automatically submit the details for you.
I am able to get the captcha token returned from 2captcha solving service, and inputted it into the innerHTML of the g-response-token field using javascript. However I do not know how to submit the captcha and the form.
import requests
import time
from selenium.webdriver.support.ui import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from seleniumwire import webdriver
import pyautogui
from twocaptcha import TwoCaptcha
import random
import os
from random import randint
import sys
firstnames = []
lastnames = []
API_Key = '6LehfZUbAAAAAJhue_6BVqqxLulLiXLP0rEgpdRH'
# Open Names File
with open('firstnames.txt', 'r') as f:
for name in f:
name = name.strip()
firstnames.append(name)
with open('lastnames.txt', 'r') as e:
for name in e:
name = name.strip()
lastnames.append(name)
with open('proxies.txt') as f:
proxy = f.readlines()
proxy_rand = randint(1, 35)
s_proxy = str(proxy[proxy_rand])
p_strip = s_proxy.rstrip()
# Proxy Input and Format
bare_proxy = p_strip.split(':')
username = bare_proxy[2]
password = bare_proxy[3]
ip = bare_proxy[0]
port = bare_proxy[1]
options = {
'proxy': {
'http': f'http://{username}:{password}#{ip}:{port}',
'https': f'https://{username}:{password}#{ip}:{port}',
'no_proxy': 'localhost,127.0.0.1'
}
}
os.environ['PATH'] += 'C:/SeleniumDrivers'
homepage_URL = 'https://www.uefa.com/tickets/'
driver = webdriver.Chrome(seleniumwire_options=options)
driver.get(homepage_URL)
# Accessing Register Page
reject_cookies = driver.find_element(By.ID, 'onetrust-reject-all-handler')
reject_cookies.click()
time.sleep(1)
login_button = driver.find_element(By.CSS_SELECTOR, "a[class='btn btn-secondary tickets__btn js-tracking-card']")
login_button.click()
time.sleep(10)
create_account = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[2]/div/form/div[4]/a')
create_account.click()
time.sleep(10)
# Inputting SignUp Details
letters = 'abcdefghijklmnopqrstuvwxyz'
a = random.choice(letters)
b = random.choice(letters)
c = random.choice(letters)
d = random.choice(letters)
email = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[2]/div/form/div[1]/div[6]/input')
email.send_keys(f'{a}{b}{c}{d}#nottingham.pro')
time.sleep(2)
password = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[2]/div/form/div[1]/div[7]/input')
password.send_keys('19741002Rw!')
time.sleep(2)
first_name = driver.find_element(By.XPATH, '//*[#id="gigya-textbox-130722358975432270"]')
first_range = len(firstnames) - 1
random_first = randint(1, first_range)
f_name = firstnames[random_first]
first_name.send_keys(f'{f_name}')
time.sleep(2)
last_name = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[2]/div/form/div[1]/div[9]/input')
last_range = len(lastnames) - 1
random_last = randint(1, first_range)
l_name = lastnames[random_last]
last_name.send_keys(f'{l_name}')
time.sleep(2)
day_of_birth = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[2]/div/form/div[1]/div[10]/div[1]/input')
day = randint(1, 28)
day_of_birth.send_keys(f'{day}')
time.sleep(2)
month_of_birth = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[2]/div/form/div[1]/div[10]/div[2]/input')
month = randint(1, 12)
month_of_birth.send_keys(f'{month}')
time.sleep(2)
year_of_birth = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[2]/div/form/div[1]/div[10]/div[3]/input')
year = randint(1940, 2000)
year_of_birth.send_keys(f'{year}')
driver.execute_script("window.scrollTo(0, 500)")
time.sleep(2)
pyautogui.moveTo(353, 619)
time.sleep(2)
pyautogui.click()
time.sleep(5)
current_url = driver.current_url
print(current_url)
g_key = '6LehfZUbAAAAAJhue_6BVqqxLulLiXLP0rEgpdRH'
def SolveCaptcha():
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
api_key = os.getenv(g_key, 'a733edea49a8327795d56edc9f06d391')
solver = TwoCaptcha(api_key)
try:
result = solver.recaptcha(
sitekey=g_key,
url=current_url)
except Exception as e:
print(e)
else:
return result
result = SolveCaptcha()
code = result['code']
print(code)
token = f'document.getElementById("g-recaptcha-response").innerHTML="{code}";'
driver.execute_script(token)
time.sleep(10000)
As you can see by the end of the code I have managed to input the captcha token but not sure how to submit as there is no submit button
I have tried to look for a callback function but can't seem to find it when I inspect the page.
submit the first form on the page:
driver.execute_script('document.forms[0].submit()')
# 뉴스 크롤링.py
#######################################'사용후핵연료' 키워드 검색##################################################
import sys, os
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import selenium
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from datetime import datetime, timedelta
from pandas import DataFrame
import time
from openpyxl.workbook import Workbook
sleep_sec = 0.5
wb = Workbook()
# User-Agent를 입력해주세요.
headers = {'User-Agent' : '________________'}
query = 'spent nuclear fuel'
yesterday = (datetime.today() - timedelta(1)).strftime("%Y.%m.%d")
def news_crawling():
service = Service(executable_path=ChromeDriverManager().install())
browser = webdriver.Chrome(service=service)
print('브라우저를 실행시킵니다(자동 제어)\n')
news_url = 'https://www.google.com/search?q={0}&tbm=nws&source-news]'.format(query, yesterday)
browser.get(news_url)
time.sleep(sleep_sec)
print('\n크롤링을 시작합니다.')
#####동적 제어로 페이지 넘어가며 크롤링
news_dict = {}
idx = 1
cur_page = 1
news_num = 1000000
while True:
table = browser.find_element("xpath",'.//div[#data-hveid="CBAQAA"]')
li_list = table.find_elements("xpath",'.//li[contains(#class="vJOb1e aIfcHf Hw13jc"]')
area_list = [li.find_element("xpath",'.//div[#class="mCBkyc y355M ynAwRc MBeuO nDgy9d"]') for li in li_list]
for a in area_list[:min(len(area_list), news_num-idx+1)]:
n = a.find_element("xpath",'.//div[#role="heading"]')
n_url = n.get_attribute('href')
try:
img = a.find_element(By.CSS_SELECTOR,'img#dimg_').find_element(By.CSS_SELECTOR, 'img')
img = img.get_attribute('src')
except:
img = " "
news_dict[idx] = {'Title' : n.get_attribute('title'),
'url' : n_url,
'thumbnail': img}
idx += 1
try:
next_btn = browser.find_element(By.CSS_SELECTOR, 'a#pnnext')
next_btn.click()
cur_page +=1
# pages = browser.find_element("xpath",'//div[#class="sc_page_inner"]')
# next_page_url = [p for p in pages.find_elements("xpath",'.//a') if p.text == str(cur_page)][0].get_attribute('href')
pages = browser.find_element("xpath",'//table[#class="fl"]')
next_page_url = [p for p in pages.find_elements("xpath",'.//a') if p.text == str(cur_page)][0].get_attribute('aria-lable')
browser.get(next_page_url)
time.sleep(sleep_sec)
except:
print('\n브라우저를 종료합니다.\n' + '=' * 100)
time.sleep(0.7)
browser.close()
break
########################################################여기까지 수정 완료################################################################
# 엑셀파일 추출
print('데이터프레임 변환\n')
news_df = DataFrame(news_dict).T
folder_path = os.getcwd()
xlsx_file_name = '{}_{}.xlsx'.format(query, yesterday)
news_df.to_excel(xlsx_file_name, index=False)
print('엑셀 저장 완료 | 경로 : {}\\{}\n'.format(folder_path, xlsx_file_name))
news_crawling()
this is my code. I use it on Korean website and it works well. But after I modified it for google search, it wouldn't work.
I want to search something on google and then get the news titles into a xlsx file.
I before used it in Korean website, so I changed the part below
table = browser.find_element("xpath",'.//div[#data-hveid="CBAQAA"]')
li_list = table.find_elements("xpath",'.//li[contains(#class="vJOb1e aIfcHf Hw13jc"]')
area_list = [li.find_element("xpath",'.//div[#class="mCBkyc y355M ynAwRc MBeuO nDgy9d"]') for li in li_list]
and when I run the code, it only gives me an empty xlsx file.
can anyone help with this please? I would be so appreciate.
Here is one possible solution:
from openpyxl import Workbook
from datetime import datetime
from datetime import timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def get_url(query: str, min_date: str, max_date: str) -> str:
return f'https://www.google.com/search?q={query}&tbm=nws&source-news&tbs=cdr:1,cd_min:{min_date},cd_max:{max_date}'
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
service = Service(executable_path="path/to/your/chromedriver.exe")
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 5)
yesterday = (datetime.now() - timedelta(1)).strftime("%m.%d.%Y")
driver.get(get_url('spent nuclear fuel', yesterday, yesterday))
url_locator = (By.CSS_SELECTOR, '#rso a')
title_locator = (By.CSS_SELECTOR, 'a div[role="heading"]')
thumbnail_locator = (By.CSS_SELECTOR, '#rso a>div>div:first-child img')
workbook = Workbook()
worksheet = workbook.active
page = 1
while True:
print(f'Current page: {page}')
url_web_elements = wait.until(EC.visibility_of_all_elements_located(url_locator))
title_web_elements = wait.until(EC.presence_of_all_elements_located(title_locator))
thumbnail_web_elements = wait.until(EC.visibility_of_all_elements_located(thumbnail_locator))
titles = [title.text.replace(',', '.') for title in title_web_elements]
urls = [link.get_attribute('href') for link in url_web_elements]
thumbnails = [thumbnail.get_attribute('src') for thumbnail in thumbnail_web_elements]
for data in zip(titles, urls, thumbnails):
news = {
'title' : data[0],
'url' : data[1],
'thumbnail': data[2]
}
worksheet.append(list(news.values()))
try:
page += 1
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#pnnext'))).click()
except TimeoutException:
break
workbook.save(f'google_news_{yesterday}.xlsx')
driver.quit()
Output is xlsx file google_news_11.10.2022.xlsx
In the get_url function, you can pass a range of dates for which the news will be displayed. For example get_url('spent nuclear fuel', 01.11.2022, 11.11.2022)
You can also save data to csv using this solution:
import csv
from datetime import datetime
from datetime import timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def get_url(query: str, min_date: str, max_date: str) -> str:
return f'https://www.google.com/search?q={query}&tbm=nws&source-news&tbs=cdr:1,cd_min:{min_date},cd_max:{max_date}'
def save_to_csv(data: list) -> None:
with open(file='google_news.csv', mode='a', encoding="utf-8") as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow([*data])
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
service = Service(executable_path="path/to/your/chromedriver.exe")
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 5)
yesterday = (datetime.now() - timedelta(1)).strftime("%m.%d.%Y")
driver.get(get_url('spent nuclear fuel', yesterday, yesterday))
url_locator = (By.CSS_SELECTOR, '#rso a')
title_locator = (By.CSS_SELECTOR, 'a div[role="heading"]')
thumbnail_locator = (By.CSS_SELECTOR, '#rso a>div>div:first-child img')
page = 1
while True:
print(f'Current page: {page}')
url_web_elements = wait.until(EC.visibility_of_all_elements_located(url_locator))
title_web_elements = wait.until(EC.presence_of_all_elements_located(title_locator))
thumbnail_web_elements = wait.until(EC.visibility_of_all_elements_located(thumbnail_locator))
titles = [title.text.replace(',', '.') for title in title_web_elements]
urls = [link.get_attribute('href') for link in url_web_elements]
thumbnails = [thumbnail.get_attribute('src') for thumbnail in thumbnail_web_elements]
for data in zip(titles, urls, thumbnails):
news = {
'title' : data[0],
'url' : data[1],
'thumbnail': data[2]
}
save_to_csv(news.values())
try:
page += 1
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#pnnext'))).click()
except TimeoutException:
break
driver.quit()
Output is csv file google_news.csv:
COP27: nuclear boss doesn't expect surge in waste recycling,https://news.yahoo.com/cop27-nuclear-boss-doesnt-expect-072631885.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAbAAEAAwEBAQEAAAAAAAAAAAAGBAUHAgMAAf/EADcQAAEDAwIDBgQFAwUBAAAAAAECAwQABRESIQYxQRMUIlFhcTKBkdEHI0KhsTNS8CRFgrLBFf/EABoBAAIDAQEAAAAAAAAAAAAAAAMEAgUGAQD/xAAlEQACAgIBAwQDAQAAAAAAAAABAgADBBEhBRIxIiNBURMyYXH/2gAMAwEAAhEDEQA/AHNqb0oAIr3uctiMyvtFY25VHfuUW3xVLcWkECgabq5d+IWS6o9gF7IzXWtAMcxsGy1S3wJMXHcYlrm9noSTkUyt0tMqACDk4xUTidhItaVITgAiqe0zFwYLuUg4GEZ5Z6UMv7p3HmrFmGG+RPdt1TMiQmMCp0Z+HmmuU8Jy7i3mTNdbBGVIUSvP15e9XtqbbbjIwElSvEsjmSfOrdT2R4APYD7UAuWP8inYB/sFy+D1oYLTjwe5kaxmoUiyzrfFBD3aKCfENgPTHy8sGmUpx5eVKBCR686OXaSiVHejurU08nBC080nz9R9zXq22SpkbV7QGWGXQVwVHYYOCcnb5HeqyU4lMXSTueVdxWpnavtk/Ao4TglJ9v8ANqjXSFju5QsleCVoAI0eXP06VC3HUeoSywep2FfxNyYn4AtDXZKecAK1b5NNezSjwgDAo5wIvMTT5ZFJXFpCyCd6pbLHtbmBFaVOdTHf/pyLiouPLJT0TnYVNsgPfkK8lCqO3ZDY9qY8L2xUiOqSdhqOPlV1olpoCyV08+Jo11i95sq0gblG1Z02yp951tKvgZUSnzwMfyRWnwiZFuCefhrMb6pdovylAYQokEeaTsaPYvIaUWGxeq2keedS7snFdoYbYYdfWp8oBWAk6UEjkSNs1YcW3W4wbc0uIpplT6gEuBOvbzopYuF1z2Q7GlKjtNuFKlIG6xnV+4O/tWgS0xWwzFUtrQ22lASpQyRy2FDI0OImmydEQBbb9xA7JKe9rkFKQpTRi5QQeWCKQ3q3ynrb3tqPpkFA1ozz9s0raYioaQ8lAUkJynPPFVM+7pkOlpvSANjUWIUgzqqWBHxM0Uq3vPJDTkluaBpKQjAwOnPPpXN0elNhSXmylRGAPb+fPPrVtdLHHnXw6XA3I2KlJIBIJ238xg/4ah8UOlhLUVjDi4yezdVzOrnv9f4oljkjUlhVKHDGXH4fvhuKpThA3NcXviJTdwWhoApA86LWS6OsrLeceldyt3iTzNWnRelVhDdYN7lZ1LLIt7EMq4WOzBrWeBoqRw6gqG6wVfWsfgrwgD0ra+Cik2JgdOzFVlQ9Rml6nb7C6+5e2gBEXSemaCfibBy2mSgfCcH2pvb3B2i2x0NVfF8USra6gjmKM42sqcaw15IMLcC3dtmILetWCtWoeoOBUS4SeF7Zf3ZNzlCWUbd3P5nY533z7edGY7yoctt4A/lK3APTrSF6AmQ+3ItkK1qLnjEp6KFr3355570sp+4/lVBXJX5jFriONdGUot2SNGU6kFO2NtjUNmAtp1Tzx9QOlfWu1OQYq3pTxfeVlSlqPX09K8J94AZUwga1nbNCfkwCEAaEp+IJExuLKftzqmpAbOFo3IT1xnrRNqWWLclHZkrPiWo7kqO5J9aZRY6lMrQ9lSnU4IHPB6D/ADrVBKtTtnkSYM7GtlZ3/uT0V8xUyQU8Q+GPeOjo6hmDIUq47jG9Xz48fyo0y6lV8IRyzSd34h7Vtel8YqzK9TGso8w/BGU/Ktl4CcC7MgZ3AxWNwdhWlcDTuxUI5OArcVjkbtebXMp/JjHXxE8SR2d6WyT8Q1CrG5th1pQPLFGL2+Yd7jPjbOQaRqdD0UKBzkUdCCSJTZFRVUs+xMguMfRcZLSRzWSKl8M3SVb9MNB1pdWEIB/SSdsfWp13aaj3R1bvUchR1+SI9zjuo8KG3EuH5HNC/EzGO25lK1kNydcRhPlXMK7OQVBJONY5E+Xp865hxxgrfUCsDO3Snt5tUKUy488tLCCkqL2cAD1ofBl2SNIQ5IdckME4PZNEHA6qzjb23qD47k+mV6ZSa9XmIuFLPrInPp8I/pg/q9faqf8AFu3RlwI1zKcSkud3KgcakEFWD7Ebe5p3bLjAucJL9rkNvMDw5b/SR0I6e1A/xgkaYNvig7rcW4f+IAH/AGNM1VhBqK22M7d0yOLaHk3IOxyHmzz3GR96SOtryPArl5VVxVdmd/1bc6aWu4xYNsjNm1RJK1pK1uOpBOdRGP2q0p6kcdO3WxFjinIfzzM9jtrSsNKSQsK0lJ6EHBpbalORJ8QnbxgV4OW4p4qilbZS3IUF48iOf3+dLOKLXFj24zEulL7J8KQNiMj75qiCFuZr2zUrXR8EGfvGbjSER1LcR2v9ud8eeKo3eKJwjCOy4G0YwVjdR+1Gw+pwnJJwcZPvXKnMnbpTq1BTuZy3MssrFfgCTFv63NTisk9Sc5qBJ8UheRnwkYr8dS24U9oPEnkQSD9RXzmO1FTik17iJK7jwmwG3iYyG2nHlJP9U7aU+2cE/KgZhP3C4otsXlzcV/aKUuzu1/Du1JzgFaGsA4zpBz+6c1H4HHY8REKxofGNaiSSsbgDJ966P1M9rmKuGeFo9pQlbDjrboBClBXPfqOVUXGVkn8RXtbbZaQ1DjJSHFgpStZJVgHptjPyrQ86Ekqz4d+VBrlIiTJzvfuINLPaHRHjlOw6ZO+TQGfXMPXWGPMIP/h/eWWVu5jOaRnS29ufTfFTYjCGLZARMww8WAtTa8ZTqJVg/WrC4Mxf9nuD5cbxrbU4Clfp6Gq55sSVB2VGUXCB8XOl7LS3Bj1VKoe6LVXB/u6O7zICV6R+W4gjfqM5/wDKouIzOu9tcjMWlwyjqBeQtJbUMHlvvk46V62/uqn2mTboPjVoU4WAVkdTk9aSXaz29NpekCKgOoGEkE7dKZorD/sYhmZL0L7YBP8AeIB4V4KQpgv8Ql6MpzJbjrBRyOMqPP6VX8V8PMWnL0J1Rb1BKm1nJTnkQeoqbEuct68PQFPKEbQ4lKB+goBIKfI7b+eaPT7rKfjOtOqCkkJPtv0pvIoahu0ncSxsgXp3a1KpatLgIOxGDXzqhkHPOuXh/p1eoz9K8HlHb0TQIxGVuuBXw1FhqOSxNcIHoUgj91Gp1vfXHdQ8k+JtYWn3ByKIQFrEppIUdJTnT0z50jaOwzvtRk5Wc+ZthmNdwMwH8rsu1z6YzWLWi+xLc+XJFrZfkqcKi+rc7+h5dafokOH8N3nCfEIjqB7ZUP4rHHyStQpcr5EIGKnc025z094SiVCQ22vZtxlIGfPf71yuTFYwgOtKGMjW5pIHtXklwu8LMOuAKWpCCSR10k1nK5DnaLyrUSr9W9KFfVLJG9O5/9k="
UN Nuclear Chief Says Recycling Nuclear Waste 'Difficult ...,https://www.theepochtimes.com/un-nuclear-chief-says-recycling-nuclear-waste-difficult-after-biden-looks-to-fund-reprocessing-projects_4855151.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAcAAADAAMBAQEAAAAAAAAAAAAEBQYCAwcBCAD/xAA3EAABAwMCBQIEBAMJAAAAAAABAgMEAAUREiEGEzFBUSJhFDJxkUJSgbFiocEHFTNDctHi8PH/xAAaAQADAQEBAQAAAAAAAAAAAAACBAUDAQAG/8QAJREAAgICAwABAwUAAAAAAAAAAAECAwQREiExIhOB0RQyQWFx/9oADAMBAAIRAxEAPwBshrONqIbZz2rc01tRDbNVCeaEMe1eqj+KMS3ikfFt6VZ4zbcfT8S/nTn8I7nHehlJJbZ5Jt6QDceIrbAkfDuKW44M55ScgHxSxPFc9xzDFtHLPRS81JRyqRcHC0VPq1kBfXbNWlqiktp1J3xttU63LnHwpUYcZ+hcbiWMuS3Hmx1sLXsFLHpJ9jVH8M2tIUkZBpZIs8STF0PICxjcEdK84cfXElrtUhwrx6mCTupPj3Io8bL+o+MvQcrD+kuUfBmYoSMpG9DOMK8U+5NYLZB7U7on7J9bGrHprSqKCelPXI+nOBQi0J1b0R43so2FEpRQkdSthRKlLQNxQ7PG7TttXHP7SZLz3EshDJWOQhDZz9NW33rsCXcDcVyHi1Yb4uu6u5WkDPf0p/bFY3P4m1K+QviWR74JKy24rPoQlLmnUrGSad2UT+HLxGhTFKdZkt5SUnIRntv3Brbw/diplSXPkZRqUcZwKDau8+RxGxITbHHGQrGwJAT0GT0B361MluW9leEVHT2VV44kZtEtmPITracOFOAYV0/D5xW1otybtAejOpUlTjYSoe5J/YGmcSHDuUYvy4PLWUqSUvoGpIORj9xmtHD1p5HEQajgGNHSlRJ7DG365KvuaChJ2LXoWW2q3vwseXtWKm6K0ivCmrPI+fAFt0IuP6tgKbLRWkpGeookzwijvNKI3xR6HEkYwFCp9CCPlVmi2StJyM1wPQ2U0NOpvceDXPP7RbUG8XdsK1qIbeGNumyv5Afas77xxKaeWzaw2lCdueoaio+Ujpj71I3Lie7TIzkWVLU/Hc+ZK0pPv4zRSqcohVtxltBNvjqTw/JcaeS2665oQo9NhnfxTWzxeJ22EuRpdueSlOVMrURnByBnTg/XIqQtF2MN0MTBriqUVaeuD5H2rpnD1wtMiMgIebDhG3Yio9sZQb2WqHGS9CrfdH51t5rzC472MKbUnBBz+2xqt4TihuCuQQdb68lR7gbD+tRd0vMSOQlo89wYCtJ2IHbP61SWfi5ly0PSn4vIYj4QAjfPsKLFh8nMwzrdxUEVZSK1nSM4IpHbuILddXwzDkqDqhnQpJFNfhnOys0+SjBbigr2rQptKyVaqMIUhGF1q1MdyBRpnD5szcFnCnZP6o/5U4tUy5xo7kdLz5Q582tZ6eAO1ZR1JcbStJyFAEHyKKbG/WnlWvdnHY31oV3KWI6wZEBXLPRTS8fcEUwl2VqVwqLqyyY7jCgVZz621bAnPfcU4gMW91SVT0KeKTlLaRsD707uvJulpVbgwpqM4RzFa91AHOM9qm5OYlYlF9Io42L8G5LtnJP7uVLSptnK3sp0JHQ7/wDtUFt4faj25yXd5DsdLWSvlqGMD396s7VwhY4Ljc1hTyFoznLpUnoR0P1NZXrhu03mKiLHuhaIOrSNOlR7ah3H0NKZGTG2a49Iapx5VxfLtk3ardIuVvelt6GEoCXCHF40IOQkEnvtk0VInJTZ48KK/wAxsnW6rBCSr60NfrJdYNvXHfGuOpQUt9g5SUpBwDnAHsD460klsXNmDoYcblRBjCUpKVDxkdqdnWuPKHcdek35cmp+nR+AobjXPuDoCT8iM9ark3hSXNCdRP8Ap2pfwS609wvAU+0W3+UAtKxvmnLiNWySgg+K9FJIyl6alXVah2r1EplYy4E6qHciBI1KIAoYxFKOUOjFHpAnDbBLLkMNkDUz6c57dqcxFKUob5z1FTdudbijQgZJOVHHWqOI9rSlTaB7+1KXZU3BQXhQpx4KXJlPBjNpQlQGon8IopQW4rTpJSkfQUFapaUqAzuR3pgXlJXqD2kH8uOlTXsor+gi3qmI1gNAoz6f9qHevMELLdxtuMHqtms463Va1MzXFKx8qqDdvk+MdEyCVp8aQT9qKuDk/G/8OzmorthbjYmQHm7HLDjb6dDsMqB2P5Seh9j/ACqStbse2SH2L2iWhpKiG3W8HUc7BW2QfNUMRNllqVMjvrhvtEKVy1EYOfy4/wC4od2RJXHb5Nwg81ai6tSkEKKvB296p1Wumlpfh/cn2Vq21b/KGFv4kbjxm2UxZykAbLDClD9qcQb5Glq0olJS6P8ALcBQr+YqZbfvePTNgOgn5Q4Un9Nq3iZdAkIn2cSm87LQA5+u1Iu6xPpjX6eqS7iWKtTreUPpJ/KTvUnfbuqBMSyZCkEthWAnPc1uclt/BrbdZejIcRpzjoe30qRuVyjS5ObtAmqktjllcdWELA/EMjuc0xDJ5rT9FLMVVPkvCKt8hrmhDuAD3NOX7ozbSgAk58b1Ip9SwCe9OVMoVy9WSQOtDGrnNI6rHGOw53iV9Tw+GZKk/wARxRTN4nyRpaRpV/GdqCREZQNQTkjzWaZTrX+GQn6CnVhVr9xi8mz+Bwyicl3nJmcpaxnSEagD9xRiuJJreIt3ipfjY3WjcZ7HfcfpQLL7pYKyo6tPWlFtusp6Upp1SVoJIwoZrWWLR1paM45F3e3tD74uIvU6y84lPsvcD3Br8zEbuSVqQWFaU5K1qCaR3UfDPgs+nV1ryzkPSHGnQFIUncUxKTa4pLZhGKXyfg2fslySsLgvxktax6jKOB/UU3t0S9sDmLnxtJHqKXyr+lRN+1wHGxFeeQFdRzCfPmgBcp2jeW8fqqo99UuTT19inVdFLrZ1uQ7JVESlx9t1tW/gip+6WZE+Ql1bziCEBPpPXrSiySHoshLSXVOIWMkOb4z4qkceUlWwHSkmnBje1ZHTP//Z"
COP27: UN nuclear chief says radioactive waste recycling is 'difficult' technology,https://www.deccanherald.com/international/world-news-politics/cop27-un-nuclear-chief-says-radioactive-waste-recycling-is-difficult-technology-1161036.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAbAAACAwEBAQAAAAAAAAAAAAAEBQIDBgEAB//EAEIQAAIBAwMBBgMFAwcNAAAAAAECAwAEEQUSITETFCJBUWEGcYEjMpGhwRVCsTNSotHi8PEWJTQ1Q0RicnOSk9Lh/8QAGQEAAwEBAQAAAAAAAAAAAAAAAAIDAQQF/8QAJhEAAgIBBAAFBQAAAAAAAAAAAAECEQMSEyExBEFCUZEUIjJSYf/aAAwDAQACEQMRAD8A02K7VdolwIF726PMcluzXCj2Hrj1qSTQtM8KSo0qAF0DAlQemR5VbUiVMlXqlj2r2K2wRxMA8jNEiSIAYUHFD12lasezjnc2cY+VRIqdcxWmEcV6pYrtBpxcg5FWCYjyFQr1Y1YCzud/fDOoT92hP+72rEE/80nX/tx8zR1rZ29nF2VrCkSdcIMZPqfU0MNNvJP9K1S491t0WIH+J/OjraBbeJYkaRlA6yOWJ5z1PzpI99GsznxdLNLNp+lQzvAt40jSyIcNsRd23PlmqYtCk0SX9o2d5N3FIC9xbNI2WIUngj3xTD4t0qW9tYrqzmEV3ZEyRkjhhjlfyoG30XXb3UYbjWmtjbwwtGlvayuAcjGWyOf8KupKuydMzukyXOqyy393MJ5WchYWkJ7NfZaN1PX5Y7eG2ttReOUzqrSBgdi5wQc+nJ5rkfwxr1jqk1zbwwT9orKHMoHB6Ej148qifhG6hTToO7rLOZzNdFnXayAr4RzzwT1xnNNcdV2LzVBdlqepQa89hDqY1iI27SKYwmQwHAJH08/Or01DW7TUtOgu7pJXvP5W37NR2WT5EeWOfoaei1jsLac6RpEcM7L4diRqCfLODSbQ7XU7G6M95o8k9zK/2l21yhKgnnC1yZ3qkqsvjVLkjLf69BrMGmd7tp5XYbtsGNq4ViT9CfwrW4FIdFtriX4g1TUbyFoycQxbiOB59PkK0OKMd02wkQxXsCpYr2KpYpkbObSby4W3VLuzuh962kuZY36eQ3YP0zTmTS3giWQ3WoRI3Qtcsfl97NZq7lt9ViuILu1SK5txnsLogNEcgbuo8jkEHHTniirhoJLSIWHeJbOM9m8rOsok5ORvUkHy9+BXLqr0lUv6EfE17aRW6RXNoJy0BAuHP3PLOQOpPy/Sgxruh/Y7tMkjMjhMB9u3wqcnkceICq/iS3az0+INMZUdH3TXdsFI4G1EGOB7n0pY945hmMtrZRyRQIXjks1+0YnocjgY5+lUWXGu48i7cn0xo3xFoSIHksdQjHP3bhhztDY4f0YV6bVtI/aVssS6gVZApIvJDzIUC4PaeR60LFMqTQZ0rRbiSVg3YPZbike0eInoPw8qC7qRdS28iaehgjlldokyFOdwXPng4C+g9hRu4nwohomvM1F5faTYyGN5NSO1SSyXrkDHUZ7Tk+3lVKa3pEhcRz6odhAJ7w3mcceLnn0rNpqaS20HeE0hi8LOXeLdtCjIBAfrknjr1wPWD6oS1w0NrZLEOzaR44WXdubjILdc7jmrrHH2E1M0NlrenLckd81INNcERhidrHcq49+SM1rHmMZ5P0xXzSyxb6rFHdwWcBd2d2Mbsy4YEDCMfQHzrXXd/BB2rRztPHAjNPKE2BSACQN3XqOn9dTyOMOhoLV2Npb0qSBV8VrfyoHSN9p6bmC/xpDp+raa13HLc3rPbxw75rMWzlyxwR4gMDH1zTr/ACw01Uj7tYStEVBXFmwx7dOfmOKnuDuJlU0y51V3j1aBbieM5jlRcHPXJIAAYEfX08qNs9Nu7YvIzTo0mO1lcLhipBGTjk/ocelXj4sjsrO4jsYXz9622jcZMHBBPPOD8+KTz/FF3qE1ssNqUAhCyiclVDMSTkeeQRXmPxGeStfB27MF2G63bjWIVa5u53eAMEBKrnjGSPkPzqi80UzZk75JL2oCXDNInKjnrjkjj0+dTtP2bFcdo0YklC+ErJwTg5zz0Pyo6b4ytbaWNI7ErCiDiKIYV9hwMjjG4ZxRLK7pIyMF2LYdOkhuJp7TV2t5JvvsCjbh6eWBQnw9E9ldSvDe47GSSPDIHWQeHkj369fKibXVYZ/tJ4reKTd4hIQWf33A4FC6bJbWzXBuGjdHumwfCUIIXHJ4qsJ/a2LKKuh3Jd3ct1bzG6t8W+7A7lwSQB/Ooe1u7mLTLWyF7bym2RQSlpv5UY5ORn+/Smek3mgWt8gljhnWZcrIV8Knb0UexyKCn+I9Julu+1Frhbd2RJFDrnGOh69c49qmvFydJX2P9OqbYM1sqTxXxu40laQkIYsYIU58+M5Hyx15qWp63oK2UtxGvbyyuI50lyd52AAhemMDGPb3oOLVdNM8dxFDDwMFuzxx08P0z7YOKOjfS0tb9IbYiFYo5fBGOd2wZHvyfxNM28vm69hIqMOKViZLhZ7q4lsIO7W8zEhiM7hxjjOc4+nPSrV0+4kz/nKWLbxsW1JA/pVXcX9qs3awbspwQUHiGevXrTfRkjvrZ5UnEKh9oUR7vIHOc+9G7ojyGmEpC/TY0j1K122MqsZ4/F2rH94delDXMUst7M72LkFzjxOAR0/T1ptYXEraxCWLrGLkZXcf5wHNXPMHkJQdAFIUn61bdj7HPpdUJrWELOHGnypwftMSEDg+vFFw2jnT5oxo8wzcRYQ9rkjbJz18v1ortlRWLHYc/dcnAq5bte7bUfLtIrBfQAHP8aN6P6htiiWwlRsfsV2HXK9r/wC3WvR6fKqr2mnNzIW7HDeEbQAfvA/nTftpJwG3MSBkD0rpkdCA+0of3geR/wDaVZkvSMoNJpC9rErF/qxlXOeY2PP/AJajqWk28VyssemReKKJiTET4jGpP+09SfxpnJNC5AI4zk58xir78bmjRWGxoYuT/wBNaZZlXEUY4S6tiWS0ZpGbuYYDjd2f9sf39aLitneK67GJQj2sfBHhYgx9fH5c/wBdFGKFUCEkggnJ9ahDOLSKQA8SHY2f3eQR/Ch503+KMWOubAO6FAO0tYUOOhTGf6dTR+xBVY0jBOcDjPvwaNvAkkIxIu5PI+YqENsHjDNJndyOPKlee/SvgFirzAJp3hbtEQFgSfCRweuahbzuIzy2zHIY9DVVnAsaPICzH0Y5FW20amMNzncwqJR9lkDRXAeO5zhuD1NESRRDbK8jhsEKOm38PKlSlo2LBs8FsEDFGXMjPYozdXByaA6QZbyxxqMMcqPNsnPnQs1wN4IYSKxJIQcgfpS2GR3UhmPD4H513aIJRs9M8+/WgLGAuIWlIZWyuCPMkY86sfUHmIZ154UNwNuwAY9+lKnch9w4ZTgEVxZpG+85PiPU5oC75LH1GbvOwldg/wCI+tU39/LJFKD/ACQkAOD0/KhlG5nyT91qGluJHCx5whPIApkhW+B1HdgyIQ2zK8gNwRUI75oV2KQQD5nFId7EQAknPr5VVduRO3ArdFmOR//Z"
Tested on Python 3.9.10. Used Selenium 4.5.0, openpyxl 3.0.10
"problem lines"
for_tariff_loop = driver.find_elements_by_xpath("//span[#class='phx-radio__element']")
radio_label_list = for_tariff_loop[i].find_element_by_css_selector('span[class="phx-radio__label"]')
print(radio_label_list)
time.sleep(1)
website I'm scraping https://www.telekom.de/unterwegs/apple/apple-iphone-13-pro/graphit-512gb
label image
I was not able to print the radio buttons label according to checked button. I don't know what is the mistake and where I did it. could anyone help on this. It will be helpful for me to learn. Change tariff links given below links,
import xlwt
from selenium import webdriver
import re
import time
from datetime import date
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
class telekommobiles:
def __init__(self):
self.url="https://www.telekom.de/mobilfunk/geraete/smartphone?page=1&pageFilter=promotion"
self.country='DE'
self.currency='GBP'
self.VAT='Included'
self.shipping = 'free shipping within 3-4 weeks'
self.Pre_PromotionPrice ='N/A'
self.color ='N/A'
def telekom(self):
#try:
driver=webdriver.Chrome()
driver.maximize_window()
driver.get(self.url)
today = date.today()
time.sleep(5)
cookies = driver.find_element_by_css_selector('button.cl-btn.cl-btn--accept-all').click()
print("cookies accepted")
links_prod_check = []
prod_models = []
prod_manufacturer =[]
prod_memorys = []
product_colors =[]
product_price_monthly_payments = []
product_price_one_time_payments =[]
product_links = []
containers = driver.find_elements_by_css_selector('div[class="styles_item__12Aw4"]')
i = 1
for container in containers:
p_links =container.find_element_by_tag_name('a').get_attribute('href')
i = i + 1
product_links.append(p_links)
#print(p_links)
for links in product_links:
driver.get(links)
#time.sleep(5)
#print(driver.current_url)
#links_prod_check.append(driver.current_url)
coloroptions = WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By.XPATH,"//li[#data-qa='list_ColorVariant']")))
#print(coloroptions)
for i in range(len(coloroptions)):
coloroption = driver.find_elements_by_xpath("//li[#data-qa='list_ColorVariant']")
coloroption[i].click()
#print(coloroption[i])
time.sleep(3)
memoryoptions = WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By.XPATH,"//span[#class='phx-radio__element']")))
for i in range(len(memoryoptions)):
memoryoption = driver.find_elements_by_xpath("//span[#class='phx-radio__element']")
try:
memoryoption[i].click()
except:
pass
time.sleep(5)
change_traiff = driver.find_element_by_css_selector('button[class="phx-link phx-list-of-links__link js-mod tracking-added"]').click()
time.sleep(3)
#looping for each section
section_loops = driver.find_elements_by_css_selector('section[class="tariff-catalog--layer"]')
#print(len(section_loops))
for section_loop in section_loops:
#print(section_loop)
time.sleep(5)
#Headings
heading_1 = section_loop.find_element_by_css_selector('h2[class="page-title page-title--lowercase"]').text
print(heading_1)
# looping for each separate boxes
each_box_subcontainers = section_loop.find_elements_by_css_selector('.phx-tariff-box__section')
#print(len(each_box_subcontainers))
for subcontainer in each_box_subcontainers:
#print(subcontainer)
looping_for_tariff = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH,"//span[#class='phx-radio__element']")))
#print(looping_for_tariff)
for i in range(len(looping_for_tariff)):
#print(i)
try:
for_tariff_loop = driver.find_elements_by_xpath("//span[#class='phx-radio__element']")
for_tariff_loop[i].click()
time.sleep(3)
except:
pass
for_tariff_loop = driver.find_elements_by_xpath("//span[#class='phx-radio__element']")
radio_label_list = for_tariff_loop[i].find_element_by_css_selector('span[class="phx-radio__label"]')
print(radio_label_list)
time.sleep(1)
change_traiff_close_button = driver.find_element_by_css_selector('span[class="icon-after-yellow-close right close popup-close-tr js-popup-close"]').click()
telekom_de=telekommobiles()
telekom_de.telekom()
You are trying to find element within an element. Finding radio_label_list using for_tariff_loop[i], xpath for radio_label_list will become like below:
//span[#class='phx-radio__element']//span[#class="phx-radio__label"]
Which does not exist in the DOM.
I tried the last part of the code. And was able to print the Memory size like below. Do try and confirm:
Replaced css-selector for radio_label_list with this xpath ./following-sibling::span
looping_for_tariff = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, "//span[#class='phx-radio__element']")))
# print(looping_for_tariff)
for i in range(len(looping_for_tariff)):
# print(i)
try:
for_tariff_loop = driver.find_elements_by_xpath("//span[#class='phx-radio__element']")
for_tariff_loop[i].click()
time.sleep(3)
except:
pass
for_tariff_loop = driver.find_elements_by_xpath("//span[#class='phx-radio__element']")
radio_label_list = for_tariff_loop[i].find_element_by_xpath("./following-sibling::span").text
print(radio_label_list)
time.sleep(1)
As per the comments, check this code:
driver.get("https://www.telekom.de/unterwegs/apple/apple-iphone-13-pro/graphit-512gb")
wait = WebDriverWait(driver,30)
wait.until(EC.element_to_be_clickable((By.XPATH,"//button[text()='Accept All']"))).click()
wait.until(EC.element_to_be_clickable((By.XPATH,"//ul[contains(#class,'phx-tariff-notification-box-new__element--desktop-tablet')]/li[2]/button"))).click()
length = len(driver.find_elements_by_class_name("phx-tariff-box__section"))
for i in range(length):
print("----------------------------------------------------------------------------------------------------------")
options = driver.find_elements_by_class_name("phx-tariff-box__section")
datas = options[i].find_element_by_xpath(".//div[contains(#class,'phx-tariff-box__volume')]").get_attribute("innerText")
print("data: {}".format(datas))
len_types = len(options[i].find_elements_by_xpath(".//div[#class='phx-tariff-box__radios-inner']//label"))
types = options[i].find_elements_by_xpath(".//div[#class='phx-tariff-box__radios-inner']//label")
if len(types) == 0:
price = options[i].find_element_by_xpath(".//p[#data-qa='block_TariffPrice']").get_attribute("innerText")
print(price)
else:
for j in range(len_types):
types[j].click()
time.sleep(2)
options = driver.find_elements_by_class_name("phx-tariff-box__section")
types = options[i].find_elements_by_xpath(".//div[#class='phx-tariff-box__radios-inner']//label")
try:
types[j].find_element_by_xpath("./input[#checked]")
type = types[j].find_element_by_xpath("./span[2]").get_attribute("innerText")
price = options[i].find_element_by_xpath(".//p[#data-qa='block_TariffPrice']").get_attribute("innerText")
print(f"{type}: {price}")
except:
pass
I'm trying to create yahoo account using python selenium, and while creating i have to bypass a recaptcha. I'm using 2Captcha API to automate solving captchas
My issue is i can't solve recaptcha
based on my tests, i noticed that yahoo is using entreprise captcha, not sure if it's a V2 or V3
Here the API Documentation : https://2captcha.com/2captcha-api
here is my code :
import os
import time
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import random
from twocaptcha import TwoCaptcha
opt = Options()
opt.add_argument("--disable-infobars")
opt.add_argument("start-maximized")
# Pass the argument 1 to allow and 2 to block
opt.add_experimental_option("excludeSwitches", ["enable-logging"])
opt.add_experimental_option("prefs", {
"profile.default_content_setting_values.media_stream_mic": 2,
"profile.default_content_setting_values.media_stream_camera": 2,
"profile.default_content_setting_values.geolocation": 2,
"profile.default_content_setting_values.notifications": 2
})
executable_path = r'chromedriver'
os.environ["webdriver.chrome.driver"] = executable_path
global driver
driver = webdriver.Chrome(r'chromedriver', options=opt)
time.sleep(5)
driver.get("https://login.yahoo.com/account/create")
# Fname and Lname
time.sleep(6)
driver.find_element_by_xpath("//input[#name='firstName']").send_keys("fname")
time.sleep(3)
driver.find_element_by_xpath("//input[#name='lastName']").send_keys("lname")
# Email
time.sleep(3)
numberid = random.randint(100000, 900000)
driver.find_element_by_xpath("//input[#name='yid']").send_keys("fname" + str(numberid) + "lname")
# Password
time.sleep(3)
driver.find_element_by_xpath("//input[#name='password']").send_keys("TestEPWD.")
######## number region +
FC = '(+212)'
option_el = driver.find_element_by_xpath("//option[contains(text(),'%s')]" % FC)
option_el.click()
driver.find_element_by_xpath("//input[#name='phone']").send_keys('684838340')
# Choose date
Month = random.randint(1, 12)
Months = "//option[#value='{}']".format(Month)
monthselect = driver.find_element_by_xpath(Months)
monthselect.click()
time.sleep(3)
Day = random.randint(1, 27)
driver.find_element_by_xpath("//input[#name='dd']").send_keys(Day)
time.sleep(3)
Year = random.randint(1975, 2000)
driver.find_element_by_xpath("//input[#name='yyyy']").send_keys(Year)
time.sleep(3)
list = ["Man", "Woman"]
item = random.choice(list)
driver.find_element_by_xpath("//input[#name='freeformGender']").send_keys(item)
time.sleep(3)
driver.find_element_by_xpath("//button[#name='signup']").click()
time.sleep(5)
# CAPTCHA PART :
api_key = os.getenv('APIKEY_2CAPTCHA', 'mycaptchaAPI')
solver = TwoCaptcha(api_key)
yy = driver.current_url
try:
result = solver.recaptcha(
sitekey="6LeGXAkbAAAAAAMGHQaxwylqpyvtF2jJMrvJff1h",
url=yy,
entreprise=1,
version='v3',
score=0.2
)
except Exception as e:
print(e)
else:
print('result: ' + str(result))
I am trying to modify my existing Selenium Pytest Page Object Model setup method to make a call to Selenium Grid on Ip "http://localhost:4444/wd/hub".
However when I try to run my test class I am getting a Error forwarding the new session cannot find : Capabilities exception. Please find below the code:
conftest.py
import options as options
import pytest
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from webdriver_manager.chrome import ChromeDriverManager
selenium_grid_url = "http://192.168.1.8:4444/wd/hub"
#pytest.fixture()
def setup():
# if browser=='chrome':
# driver=webdriver.Chrome(ChromeDriverManager().install())
# else:
# driver=webdriver.Ie()
dc = DesiredCapabilities.CHROME
dc['platform'] = "WIN10"
dc['version'] = '89'
options = webdriver.ChromeOptions()
driver = webdriver.Remote(desired_capabilities=dc,
command_executor=selenium_grid_url, options=options)
return driver
#
# def pytest_addoption(parser): # This will get browser value from Command Line
# parser.addoption("--browser")
#
# #pytest.fixture()
# def browser(request): # This will return the Browser value to setup method
# return request.config.getoption("--browser")
##############----------------PyTest HTML Report--------------###########################
# It is hook for Adding Environment info to HTML Report
def pytest_configure(config):
config._metadata['Project Name'] = 'nop Commerce'
config._metadata['Module Name'] = 'Customers'
config._metadata['Tester'] = 'Ishan'
# It is hook for delete/Modify Environment info to HTML Report
#pytest.mark.optionalhook
def pytest_metadata(metadata):
metadata.pop("JAVA_HOME", None)
metadata.pop("Plugins", None)
Page Class : LoginPage.py
import time
from selenium import webdriver
class LoginPage:
textbox_username_id="Email"
textbox_password_id="Password"
button_login_xpath="//input[#class='button-1 login-button']"
link_logout_linktext="Logout"
def __init__(self,driver):
self.driver=driver
def setUserName(self,username):
self.driver.find_element_by_id(self.textbox_username_id).clear()
self.driver.find_element_by_id(self.textbox_username_id).send_keys(username)
def setPassword(self,password):
self.driver.find_element_by_id(self.textbox_password_id).clear()
self.driver.find_element_by_id(self.textbox_password_id).send_keys(password)
time.sleep(10)
def clickLogin(self):
self.driver.find_element_by_xpath(self.button_login_xpath).click()
def clickLogout(self):
self.driver.find_element_by_link_text(self.link_logout_linktext).click()
My pytest test file:- test_login.py
import pytest
from selenium import webdriver
from pageObjects.LoginPage import LoginPage
from utilities.readProperties import ReadConfig
from utilities.customLogger import LogGen
class Test_001_Login:
baseURL = ReadConfig.getApplicationURL()
username = ReadConfig.getUsername()
password = ReadConfig.getPassword()
logger= LogGen.loggen()
#pytest.mark.sanity
#pytest.mark.regression
def test_homePageTitle(self, setup):
self.logger.info("*******************Test_001_Login***********************")
self.logger.info("*******************Verifying Home Page Title***********************")
self.driver = setup
self.driver.maximize_window()
self.driver.get(self.baseURL)
act_title = self.driver.title
if act_title == "Your store. Login":
assert True
self.logger.info('*******************Test Passed - HomePageTitle***********************')
self.driver.close()
else:
self.driver.save_screenshot("D:\\Python Programs\\SeleniumPavanSDET\\NopCommerce\Screenshots\\" + "test_homePageTitle.png")
self.driver.close()
self.logger.error('*******************Test Failed - HomePageTitle***********************')
assert False
#pytest.mark.sanity
#pytest.mark.regression
def test_login(self,setup):
self.logger.info('*******************Test_001_Login***********************')
self.logger.info('*******************Verifying Login Test**********************')
self.driver = setup
self.driver.maximize_window()
self.driver.get(self.baseURL)
self.lp = LoginPage(self.driver)
self.lp.setUserName(self.username)
self.lp.setPassword(self.password)
self.lp.clickLogin()
act_title = self.driver.title
if act_title == "Dashboard / nopCommerce administration":
assert True
self.logger.info('*******************Test Passed - TestLogin***********************')
self.driver.close()
else:
self.driver.save_screenshot("D:\\Python Programs\\Selenium\\NopCommerce\Screenshots\\"+"test_login.png")
self.driver.close()
self.logger.error('*******************Test Failed - TestLogin***********************')
assert False
Exception:-
> driver = webdriver.Remote(desired_capabilities=dc,
command_executor=selenium_grid_url, options=options)
E selenium.common.exceptions.WebDriverException: Message: Error forwarding the new session cannot find : Capabilities {browserName: chrome, goog:chromeOptions: {args: [], extensions: []}, platform: WIN10, version: 89}
E Stacktrace:
E at org.openqa.grid.web.servlet.handler.RequestHandler.process (RequestHandler.java:118)
E at org.openqa.grid.web.servlet.DriverServlet.process (DriverServlet.java:85)
E at org.openqa.grid.web.servlet.DriverServlet.doPost (DriverServlet.java:69)
E at javax.servlet.http.HttpServlet.service (HttpServlet.java:707)
E at javax.servlet.http.HttpServlet.service (HttpServlet.java:790)
E at org.seleniumhq.jetty9.servlet.ServletHolder.handle (ServletHolder.java:865)
E at org.seleniumhq.jetty9.servlet.ServletHandler.doHandle (ServletHandler.java:535)
E at org.seleniumhq.jetty9.server.handler.ScopedHandler.handle (ScopedHandler.java:146)
E at org.seleniumhq.jetty9.security.SecurityHandler.handle (SecurityHandler.java:548)
E at org.seleniumhq.jetty9.server.handler.HandlerWrapper.handle (HandlerWrapper.java:132)
E at org.seleniumhq.jetty9.server.handler.ScopedHandler.nextHandle (ScopedHandler.java:257)
E at org.seleniumhq.jetty9.server.session.SessionHandler.doHandle (SessionHandler.java:1595)
E at org.seleniumhq.jetty9.server.handler.ScopedHandler.nextHandle (ScopedHandler.java:255)
E at org.seleniumhq.jetty9.server.handler.ContextHandler.doHandle (ContextHandler.java:1340)
E at org.seleniumhq.jetty9.server.handler.ScopedHandler.nextScope (ScopedHandler.java:203)
E at org.seleniumhq.jetty9.servlet.ServletHandler.doScope (ServletHandler.java:473)
E at org.seleniumhq.jetty9.server.session.SessionHandler.doScope (SessionHandler.java:1564)
E at org.seleniumhq.jetty9.server.handler.ScopedHandler.nextScope (ScopedHandler.java:201)
E at org.seleniumhq.jetty9.server.handler.ContextHandler.doScope (ContextHandler.java:1242)
E at org.seleniumhq.jetty9.server.handler.ScopedHandler.handle (ScopedHandler.java:144)
E at org.seleniumhq.jetty9.server.handler.HandlerWrapper.handle (HandlerWrapper.java:132)
E at org.seleniumhq.jetty9.server.Server.handle (Server.java:503)
E at org.seleniumhq.jetty9.server.HttpChannel.handle (HttpChannel.java:364)
E at org.seleniumhq.jetty9.server.HttpConnection.onFillable (HttpConnection.java:260)
E at org.seleniumhq.jetty9.io.AbstractConnection$ReadCallback.succeeded (AbstractConnection.java:305)
E at org.seleniumhq.jetty9.io.FillInterest.fillable (FillInterest.java:103)
E at org.seleniumhq.jetty9.io.ChannelEndPoint$2.run (ChannelEndPoint.java:118)
E at org.seleniumhq.jetty9.util.thread.strategy.EatWhatYouKill.runTask (EatWhatYouKill.java:333)
E at org.seleniumhq.jetty9.util.thread.strategy.EatWhatYouKill.doProduce (EatWhatYouKill.java:310)
E at org.seleniumhq.jetty9.util.thread.strategy.EatWhatYouKill.tryProduce (EatWhatYouKill.java:168)
E at org.seleniumhq.jetty9.util.thread.strategy.EatWhatYouKill.run (EatWhatYouKill.java:126)
E at org.seleniumhq.jetty9.util.thread.ReservedThreadExecutor$ReservedThread.run (ReservedThreadExecutor.java:366)
E at org.seleniumhq.jetty9.util.thread.QueuedThreadPool.runJob (QueuedThreadPool.java:765)
E at org.seleniumhq.jetty9.util.thread.QueuedThreadPool$2.run (QueuedThreadPool.java:683)
E at java.lang.Thread.run (None:-1)
Not sure why this error is coming. I have registered my selenium server as hub and also registered respective nodes.
If anybody could help let me know as to why this error is occuring it would be great. Thank you.
Just replaced that step on mine, works fine for me:
selenium_grid_url = "http://localhost:4444/wd/hub"
#pytest.fixture()
def setup():
dc = DesiredCapabilities.CHROME
options = webdriver.ChromeOptions()
driver = webdriver.Remote(desired_capabilities=dc,
command_executor=selenium_grid_url, options=options)
return driver
by the way, how do you launch selenium-grid, like standalone server?like:
java -jar selenium-server-standalone-3.141.59.jar