python using selenium webdriver mouser - python

I'm trying to open the Mouser website and use the search bar to send some data. Here's an example of the code but I can't get the right CSS selector. Thank you.
import time
from openpyxl import load_workbook
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = webdriver.Chrome(executable_path='C:/Users/amuri/AppData/Local/Microsoft/WindowsApps/PythonSoftwareFoundation.Python.3.9_qbz5n2kfra8p0/site-packages/chromedriver.exe')
driver.implicitly_wait(1)
url ='https://www.mouser.com/'
driver.get(url)
print(driver.title)
wait = WebDriverWait(driver, timeout=1)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#as-input-066 .form-control")))
elem = driver.find_element_by_css_selector("#as-input-066 .form-control")
elem.click()
elem.send_keys("myString")

Try the following css:
.form-control.headerSearchBox.search-input.js-search-autosuggest.as-input
xpath is even shorter:
//input[contains(#id,'as-input')]
Explanation: it looks at id that contains as-input
One more suggestion:
Change
wait = WebDriverWait(driver, timeout=1)
to
wait = WebDriverWait(driver, timeout=15)
1 second is too small timeout. It should be at least 10.

Related

selenium cannot send keys to input box

I'm trying to login with seleinum automatically. I've used
driver.execute_script
driver.find_element_by_css_selector
driver.find_element_by_xpath
.
from selenium import webdriver
from webdriver_manager import chrome
driver = webdriver.Chrome(chrome.ChromeDriverManager().install())
driver.get("https://naco999.com/")
driver.find_element_by_css_selector('#login_id').send_keys("id")
driver.find_element_by_css_selector("#login_pw").send_keys("pw")
But none of these seems to work. How can I?
There are 3 elements matching #login_id css_selector.
Try using this:
driver.find_element_by_css_selector(".header-one #login_id").send_keys("id")
driver.find_element_by_css_selector(".header-one #login_pw").send_keys("pw")
Also, you should add a wait to send the text when the elements are loaded.
Like this:
from selenium import webdriver
from webdriver_manager import chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(chrome.ChromeDriverManager().install())
wait = WebDriverWait(driver, 20)
driver.get("https://naco999.com/")
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".header-one #login_id"))).send_keys("id")
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".header-one #login_pw"))).send_keys("pw")

Selenium python getting stuck post driver.get('URL') for Internet Explorer

My selenium script is stuck post driver.get("url") and is not moving forward, later it error out.i am using below code. post executing this is paused for long, I have tried all options from the advance setting of the IE browser
from selenium import webdriver
from bs4 import BeautifulSoup
import time
from tqdm import tqdm
email='XXXXX'
password='XXXXX'
options = webdriver.IeOptions()
options.ignore_protected_mode_settings = True
driver = webdriver.Ie('C:\Program Files (x86)\selenium-
3.141.0\selenium\webdriver\ie\IEdriverServer.exe')
driver.get('https://s2fs.axisbank.com/EFTClient/Account/Login.htm')
email_box = driver.find_element_by_name('username')
email_box.send_keys(email)
pass_box = driver.find_element_by_name('password')
pass_box.send_keys(password)
submit_button = driver.find_element_by_id('loginSubmit')
submit_button.click()
time.sleep(3)
File2393= driver.find_element_by_link_text('Checkbox For Item 919020028802393.csv')
File2393.click()
time.sleep(1)
File3303= driver.find_element_by_link_text('Checkbox For Item 920020034873303.csv')
File3303.click()
time.sleep(1)
download = driver.find_element_by_class('icomoon icon-download2 toolbar-button')
download.click()
print("File is been downloaded")
You are missing a wait / delay before accessing the first element on the page.
You can simply add a sleep there, like this:
driver.get('https://s2fs.axisbank.com/EFTClient/Account/Login.htm')
time.sleep(10)
email_box = driver.find_element_by_name('username')
email_box.send_keys(email)
But it is better to use explicit waits
well, this URL :-
https://s2fs.axisbank.com/EFTClient/Account/Login.htm
is not loading at all in my browser, but if it works for you then you can try with Explicit wait as below :
options = webdriver.IeOptions()
options.ignore_protected_mode_settings = True
driver = webdriver.Ie('C:\Program Files (x86)\selenium-3.141.0\selenium\webdriver\ie\IEdriverServer.exe')
driver.maximize_window()
driver.implicitly_wait(30)
driver.get("https://s2fs.axisbank.com/EFTClient/Account/Login.htm")
wait = WebDriverWait(driver, 10)
email_box = wait.until(EC.element_to_be_clickable((By.NAME, "username")))
email_box.send_keys('email')
These would be the imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
I think, the site is not reachable, you can try to use the correct URL to access the page,
Accessing the element, you could use the explicitWait
email_box = WebDriverWait(driver,10).until(EC.visibility_of_element_located((By.XPATH,"element_XPATH")))
email_box.send_Keys("UserName")
import
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait

Selenium not printing inner text of div

I am using selenium to try to scrape data from a website (https://www.mergentarchives.com/), and I am attempting to get the innerText from this element:
<div class="x-paging-info" id="ext-gen200">Displaying reports 1 - 15 of 15</div>
This is my code so far:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
driver = webdriver.Firefox()
driver.maximize_window()
search_url = 'https://www.mergentarchives.com/search.php'
driver.get(search_url)
assert 'Mergent' in driver.title
company_name_input = '//*[#id="ext-comp-1009"]'
search_button = '//*[#id="ext-gen287"]'
driver.implicitly_wait(10)
driver.find_element_by_xpath(company_name_input).send_keys('3com corp')
driver.find_element_by_xpath(search_button).click()
driver.implicitly_wait(20)
print(driver.find_element_by_css_selector('#ext-gen200').text)
basically I am just filling out a search form, which works, and its taking me to a search results page, where the number of results is listed in a div element. When I attempt to print the text of this element, I simply get a blank space, there is nothing written and no error.
[Finished in 21.1s]
What am I doing wrong?
I think you may need explicit Wait :
wait = WebDriverWait(driver, 10)
info = wait.until(EC.visibility_of_element_located((By.XPATH, "//div[#class = 'x-paging-info' and #id='ext-gen200']"))).get_attribute('innerHTML')
print(info)
Imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
You may need to put a condition by verifying if search results loaded or not and once its loaded you can use below code
print(driver.find_element_by_id('ext-gen200').text)

Python, selenium find_element_by_link_text not working

I am trying to scrape a website. Where in I have to press a link. for this purpose, I am using selenium library with chrome drive.
from selenium import webdriver
url = 'https://sjobs.brassring.com/TGnewUI/Search/Home/Home?partnerid=25222&siteid=5011&noback=1&fromSM=true#Applications'
browser = webdriver.Chrome()
browser.get(url)
time.sleep(3)
link = browser.find_element_by_link_text("Don't have an account yet?")
link.click()
But it is not working. Any ideas why it is not working? Is there a workaround?
You can get it done in several ways. Here is one of such. I've used driver.execute_script() command to force the clicking. You should not go for hardcoded delay as they are very inconsistent.
Modified script:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
url = 'https://sjobs.brassring.com/TGnewUI/Search/Home/Home?partnerid=25222&siteid=5011&noback=1&fromSM=true#Applications'
driver = webdriver.Chrome()
driver.get(url)
item = wait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "a[ng-click='newAccntScreen()']")))
driver.execute_script("arguments[0].click();",item)

Python, Selenium, and Beautiful Soup for URL

I am trying to write a script using Selenium to access pastebin do a search and print out in text the URL results. I need the visible URL results and nothing else.
<div class="gs-bidi-start-align gs-visibleUrl gs-visibleUrl-long" dir="ltr" style="word-break:break-all;">pastebin.com/VYQTSbzY</div>
Current script is:
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
browser = webdriver.Firefox()
browser.get('http://www.pastebin.com')
search = browser.find_element_by_name('q')
search.send_keys("test")
search.send_keys(Keys.RETURN)
soup=BeautifulSoup(browser.page_source)
for link in soup.find_all('a'):
print link.get('href',None),link.get_text()
You don't actually need BeautifulSoup. selenium itself is very powerful at locating element:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
browser = webdriver.Firefox()
browser.get('http://www.pastebin.com')
search = browser.find_element_by_name('q')
search.send_keys("test")
search.send_keys(Keys.RETURN)
# wait for results to appear
wait = WebDriverWait(browser, 10)
results = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.gsc-resultsbox-visible")))
# grab results
for link in results.find_elements_by_css_selector("a.gs-title"):
print link.get_attribute("href")
browser.close()
Prints:
http://pastebin.com/VYQTSbzY
http://pastebin.com/VYQTSbzY
http://pastebin.com/VAAQCjkj
...
http://pastebin.com/fVUejyRK
http://pastebin.com/fVUejyRK
Note the use of an Explicit Wait which helps to wait for the search results to appear.

Categories

Resources