So basically, I have a list of droplists that I need to interact with.
I know how to interact with the first droplist.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
import time
PATH = "C:\Program Files (x86)\chromedriver.exe"
options = Options()
options.headless = False
driver = webdriver.Chrome(PATH,options = options)
driver.set_window_size(1920,1080)
driver.get("https://www.compraensanjuan.com")
time.sleep(3)
link = driver.find_element_by_link_text("Mi cuenta")
link.click()
time.sleep(3)
email = driver.find_element_by_name("email")
email.send_keys("yourmail")
password = driver.find_element_by_name("clave")
password.send_keys("Yourpassword")
password.send_keys(Keys.RETURN)
time.sleep(3)
drp = Select(driver.find_element_by_id("acciones"))
drp.select_by_visible_text("Actualizar")
driver.back()
But how do I repeat the same action for all the following droplists??
You may be able to get a list of the options, and then iterate over them. Something like:
options = driver.find_element_by_name('DropdownElement')
options = options.text.split('\n')
for opt in options:
driver.find_element_by_xpath("//select[#name='DropdownElement']/option[text()='"+opt+"']").click()
Related
Using this url I want to locate div tags which has attribute data-asin . When I use //div[#data-asin] in Chrome Inspect mode it gives 21 elements. But while trying to get these elements via Selenium in both ways, explicit wait and direct length gives 0. As I guess Selenium remote browser is unable to get anyone of these elements as a DOM tree. code is below
import pandas as pd
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
#reading from csv file url-s
def readCSV(path_csv):
df=pd.read_csv(path_csv)
return df
fileCSV=readCSV(r'C:\Users\Admin\Downloads\urls.csv')
length_of_column_urls=fileCSV['linkamazon'].last_valid_index()
def create_driver():
chrome_options = Options()
chrome_options.headless = True
chrome_options.add_argument("start-maximized")
# options.add_experimental_option("detach", True)
chrome_options.add_argument("--no-sandbox")
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
chrome_options.add_experimental_option('useAutomationExtension', False)
chrome_options.add_argument('--disable-blink-features=AutomationControlled')
webdriver_service = Service(r'C:\Users\Admin\Downloads\chromedriver107v\chromedriver.exe')
driver = webdriver.Chrome(service=webdriver_service, options=chrome_options)
return driver
#going to urls 1-by-1
def goToUrl_Se(driver):
global counter
counter = 0
for i in range(0, length_of_column_urls + 1):
xUrl = fileCSV.iloc[i, 1]
print(xUrl,i)
# going to url(amazn) via Selenium WebDriver
driver.get(xUrl)
parse_data()
counter+=1
driver.quit()
#fetch-parse the data from url page
def parse_data():
global asin, title, bookform, priceNewProd,author
wait=WebDriverWait(driver,timeout=77)
try:
x_index=wait.until(EC.visibility_of_all_elements_located((By.TAG_NAME,'//div[#data-asin]')))###Attention here
print(len(x_index))
except:
y_index=driver.find_elements(By.TAG_NAME,'//div[#data-asin]')###Anf attention here
print(len(y_index))
driver=create_driver()
goToUrl_Se(driver)
You have to mention XPATH not TAG_NAME:
try:
x_index=wait.until(EC.visibility_of_all_elements_located((By.XPATH,'//div[#data-asin]')))###Attention here
print(len(x_index))
except:
y_index=driver.find_elements(By.XPATH,'//div[#data-asin]')###Anf attention here
print(len(y_index))
i want to get video link from website https://www.ofw.su/family-feud-july-29-2022
but i can't. This my code:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import time
from datetime import datetime
from random import randint
import random
import string
import os
def get(link):
CHROMEDRIVER_PATH = 'chromedriver.exe'
options = webdriver.ChromeOptions()
options.add_argument("user-data-dir=E:\\profile")
options.add_argument("--disable-notifications")
#options.add_argument("--headless")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,options=options)
driver.get(link)
time.sleep(2)
url_video = driver.find_element_by_xpath("/html/body/div/div[2]/div[3]/video").get_attribute('src')
print(url_video)
return url_video
link = "https://www.ofw.su/family-feud-july-29-2022"
get(link)
I didn't get any links
The element you are trying to access is inside the iframe.
So, in order to access elements inside the iframe you have to switch to that iframe as follows:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import time
from datetime import datetime
from random import randint
import random
import string
import os
def get(link):
CHROMEDRIVER_PATH = 'chromedriver.exe'
options = webdriver.ChromeOptions()
options.add_argument("user-data-dir=E:\\profile")
options.add_argument("--disable-notifications")
#options.add_argument("--headless")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,options=options)
driver.get(link)
time.sleep(2)
iframe = driver.find_element_by_xpath("//iframe[#class='embed-responsive-item']")
driver.switch_to.frame(iframe)
url_video = driver.find_element_by_xpath("/html/body/div/div[2]/div[3]/video").get_attribute('src')
print(url_video)
return url_video
link = "https://www.ofw.su/family-feud-july-29-2022"
get(link)
When you finish working with elements inside the iframe, in order to switch to the regular content you should do that with the following code:
driver.switch_to.default_content()
Also, you should use explicit waits instead of hardcoded delays time.sleep(2) and use relative locators, not the absolute XPaths like this /html/body/div/div[2]/div[3]/video
Is there a way we can click on the top checkbox highlighted and get all files downloaded by clicking the download button at the bottom? This way we can avoid going through all the files.
existing code:
# import statements
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#step-1:to get list of files:
data_url_page = requests.get("https://mft.rrc.texas.gov/link/0e33071d-9891-4009-970c-5aa26da5e31a", verify=False)
files_list = []
soup = bs(data_url_page.content, "lxml")
for table_div in soup.find_all('div', class_='ui-datatable-tablewrapper'):
files_td = table_div.find_all('td', class_='NameColumn')
for file_name in files_td:
if file_name:
files_list.append(file_name.get_text())
#step-2 : for download all files.
def http_file_downloader(data_url_files, files_list):
options = Options()
options.add_argument("--headless")
options.set_preference("browser.download.folderList", 2)
options.set_preference("browser.download.manager.showWhenStarting", False)
options.set_preference("service_log_path", "/home/myuser")
options.set_preference("browser.download.dir", "a/b/c/dir/")
options.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/force-download")
driver = webdriver.Firefox(options=options)
driver.get(data_url_files)
try:
for file_name in files_list:
elem = driver.find_element(by=By.XPATH, value="//a[text()='"+file_name+"']")
elem.click()
finally:
driver.quit()
# calling above method
http_file_downloader("https://mft.rrc.texas.gov/link/0e33071d-9891-4009-970c-5aa26da5e31a", files_list)
enter image description here
Please click this link for image
I want to click on the second link in result in google search area using selenium-web-driver
( Need a method that suits for any google search result )
example page
This is my code, How can I modify the if statement
import speech_recognition as sr
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option("detach", True)
global driver
driver = webdriver.Chrome('C:\Windows\chromedriver.exe', options=chrome_options)
chrome_options.add_argument("--start-maximized")
wait = WebDriverWait(driver, 10)
def google(text):
if "first link" in text:
RESULTS_LOCATOR = "//div/h3/a"
WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.XPATH, RESULTS_LOCATOR)))
page1_results = driver.find_elements(By.XPATH, RESULTS_LOCATOR)
for item in page1_results:
print(item.text)
# driver.find_element_by_class_name().click()
else:
ggwp=text.replace(" ", "")
driver.get("https://www.google.com")
driver.find_element_by_xpath('//*[#id="tsf"]/div[2]/div[1]/div[1]/div/div[2]/input').send_keys(ggwp)
driver.find_element_by_xpath('//*[#id="tsf"]/div[2]/div[1]/div[3]/center/input[1]').send_keys(Keys.ENTER)
Second link can by placed in different div-s.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
browser = webdriver.Chrome(executable_path=os.path.abspath(os.getcwd()) + "/chromedriver")
link = 'http://www.google.com'
browser.get(link)
# search keys
search = browser.find_element_by_name('q')
search.send_keys("python")
search.send_keys(Keys.RETURN)
# click second link
for i in range(10):
try:
browser.find_element_by_xpath('//*[#id="rso"]/div['+str(i)+']/div/div[2]/div/div/div[1]/a').click()
break
except:
pass
I have this code , it opens chrome, but it doesn't want to continue with the code. Don't really know how to fix the issue. I do NOT want it to open selenium webdriver, want it to open my own local chrome path, I want at the same time to make the script to read read elements and print the values.
import names, time, random
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
def AccGen():
while True:
# *************Static***************
prefs = {"profile.managed_default_content_settings.images": 1}
options = Options()
# options.add_argument('--disable-gpu')
# options.add_argument("--disable-extensions")
# options.add_argument('--disable-notifications')
options.add_experimental_option("prefs", prefs)
options.add_argument("--window-size=1600,900")
browser = webdriver.Chrome(executable_path='C:/Users/Jonathan/AppData/Local/Google/Chrome/Application/Chrome.exe',chrome_options=options)
browser.implicitly_wait(10)
# ------------------------------------
# Access to site
browser.get(
"https://accounts.google.com/SignUp?service=mail&continue=https%3A%2F%2Fmail.google.com%2Fmail%2F<mpl=default"
)
###################################################################
firstName = names.get_first_name()
lastName = names.get_last_name()
email = '{}.{}{}'.format(firstName, lastName, random.randint(1000, 9999))
password = '2001jl00'
###################################################################
# Write in random Name
WebDriverWait(browser, 20).until(
EC.visibility_of_element_located(
(By.XPATH, '//*[#id="firstName"]'))).send_keys(firstName)
https://mystb.in/vevivuneku.coffeescript