How to clear search area when using selenium for web scraping - python

from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.common.keys import Keys
import datetime
import pandas as pd
import requests
driver = webdriver.Chrome('./chromedriver')
driver.set_window_size(800,600)
driver.implicitly_wait(5)
driver.get('https://news.google.com/?hl=ko&gl=KR&ceid=KR%3Ako')
driver.implicitly_wait(2)
words = ['atom','mechanic','clock']
for word in words:
if datetime.datetime.today()==0:
word = word+'when:3d'
else:
word = word +'when:1d'
elem = driver.find_element_by_xpath('//*[#id="gb"]/div[2]/div[2]/div/form/div[1]/div/div/div/div/div[1]/input[2]')
elem.send_keys(Keys.CONTROL + 'a' + Keys.NULL, word.decode('utf-8'))
elem.send_keys(Keys.RETURN)
Above is the entire script for web scraping the rest is working fine but this part, elem.send_keys(Keys.CONTROL + 'a' + Keys.NULL, word.decode('utf-8')), doesn't do anything. Any tips on how to fix this to clear search area for next words?

To clear the search area when using Selenium you have to induce WebDriverWait for theelement_to_be_clickable() and you can use the following Locator Strategy:
Using XPATH:
element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//*[#id='gb']/div[2]/div[2]/div/form/div[1]/div/div/div/div/div[1]/input[2]")))
element.click()
element.clear()
Note : You have to add the following imports:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC

Related

Selenium not printing inner text of div

I am using selenium to try to scrape data from a website (https://www.mergentarchives.com/), and I am attempting to get the innerText from this element:
<div class="x-paging-info" id="ext-gen200">Displaying reports 1 - 15 of 15</div>
This is my code so far:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
driver = webdriver.Firefox()
driver.maximize_window()
search_url = 'https://www.mergentarchives.com/search.php'
driver.get(search_url)
assert 'Mergent' in driver.title
company_name_input = '//*[#id="ext-comp-1009"]'
search_button = '//*[#id="ext-gen287"]'
driver.implicitly_wait(10)
driver.find_element_by_xpath(company_name_input).send_keys('3com corp')
driver.find_element_by_xpath(search_button).click()
driver.implicitly_wait(20)
print(driver.find_element_by_css_selector('#ext-gen200').text)
basically I am just filling out a search form, which works, and its taking me to a search results page, where the number of results is listed in a div element. When I attempt to print the text of this element, I simply get a blank space, there is nothing written and no error.
[Finished in 21.1s]
What am I doing wrong?
I think you may need explicit Wait :
wait = WebDriverWait(driver, 10)
info = wait.until(EC.visibility_of_element_located((By.XPATH, "//div[#class = 'x-paging-info' and #id='ext-gen200']"))).get_attribute('innerHTML')
print(info)
Imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
You may need to put a condition by verifying if search results loaded or not and once its loaded you can use below code
print(driver.find_element_by_id('ext-gen200').text)

python using selenium webdriver mouser

I'm trying to open the Mouser website and use the search bar to send some data. Here's an example of the code but I can't get the right CSS selector. Thank you.
import time
from openpyxl import load_workbook
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = webdriver.Chrome(executable_path='C:/Users/amuri/AppData/Local/Microsoft/WindowsApps/PythonSoftwareFoundation.Python.3.9_qbz5n2kfra8p0/site-packages/chromedriver.exe')
driver.implicitly_wait(1)
url ='https://www.mouser.com/'
driver.get(url)
print(driver.title)
wait = WebDriverWait(driver, timeout=1)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#as-input-066 .form-control")))
elem = driver.find_element_by_css_selector("#as-input-066 .form-control")
elem.click()
elem.send_keys("myString")
Try the following css:
.form-control.headerSearchBox.search-input.js-search-autosuggest.as-input
xpath is even shorter:
//input[contains(#id,'as-input')]
Explanation: it looks at id that contains as-input
One more suggestion:
Change
wait = WebDriverWait(driver, timeout=1)
to
wait = WebDriverWait(driver, timeout=15)
1 second is too small timeout. It should be at least 10.

Getting an empty list when scraping with Selenium

I am trying to create a python function that can scrape the article titles of a search result on Popular Science's website.
I have written this code, which has worked for a similar science-related website but when I run it specifically for Popular Science, it returns an empty list.
Code:
from selenium import webdriver
import pandas as pd
def scraper(text):
driver = webdriver.Chrome(executable_path='chromedriver.exe')
wired_dict = []
driver.get("https://www.popsci.com/search-results/" + text + "/")
search = driver.find_elements_by_class_name("siq-partner-result")
for words in search:
wired_dict.append(words.text)
return wired_dict
print(scraper("science"))
You can use driver.implicitly_wait(10) for wait while page is loaded.
from selenium import webdriver
def scrapper(text):
driver = webdriver.Chrome('./chromedriver')
driver.get(f"https://www.popsci.com/search-results/{text}/")
driver.implicitly_wait(10)
search = driver.find_elements_by_class_name("siq-partner-result")
wired_dict = [word.text for word in search]
print(wired_dict)
scrapper('sample')
This page takes a while to load. You are using driver.find_elements_by_class_name before the page has finished loading, so it's not finding those elements.
You can test this theory by import time and time.sleep(5) just before the search code.
The best solution is to keep checking until the elements are loaded with WebDriverWait() wait until the elements have loaded.
from selenium import webdriver
import pandas as pd
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
def scraper(text):
driver = webdriver.Chrome(executable_path='chromedriver.exe')
wired_dict = []
driver.get("https://www.popsci.com/search-results/" + text + "/")
delay = 3
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.CLASS_NAME, 'siq-partner-result')))
search = driver.find_elements_by_class_name("siq-partner-result")
for words in search:
wired_dict.append(words.text)
return wired_dict
You can use WebDriverWait for the desired element to visible and then try to find the elements.
Using XPATH :
WebDriverWait(driver, 30).until(EC.visibility_of_element_located((By.XPATH, "//*[#class='siq-partner-result']")))
search = driver.find_elements_by_class_name("siq-partner-result")
Note : You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC

How to use mulitple try except in selenium python

this code when given a list of cities goes and searches on google and extract data then covert it into a dataframe
In some cases have to use different xpaths to extract the data. there are three xpaths in total.
Trying to do this :
if
1 doesnt work go to 2
2 doesnt work go to 3
3 doesnt work.
use driver.quit ()
tried this code used NoSuchElementException
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
import pandas as pd
from selenium.common.exceptions import NoSuchElementException
df_output = pd.DataFrame(columns=["City", "pincode"])
url = "https://www.google.com/"
chromedriver = ('/home/me/chromedriver/chromedriver.exe')
driver = webdriver.Chrome(chromedriver)
driver.implicitly_wait(30)
driver.get(url)
search = driver.find_element_by_name('q')
mlist1=['polasa']
for i in mlist1:
try:
search.send_keys(i,' pincode')
search.send_keys(Keys.RETURN)
WebDriverWait(driver, 10).until(expected_conditions.visibility_of_element_located((By.XPATH, '//div[#class="IAznY"]//div[#class="title"]')))
elmts = driver.find_elements_by_xpath('//div[#class="IAznY"]//div[#class="title"]')
df_output = df_output.append(pd.DataFrame(columns=["City", "pincode"], data=[[i,elmts[0].text]]))
driver.quit()
except NoSuchElementException:
try:
elements=driver.find_element_by_xpath("//div[#class='Z0LcW']")
df_output = df_output.append(pd.DataFrame(columns=["City", "pincode"], data=[[i,elements.text]]))
driver.quit()
except NoSuchElementException:
try:
elements=driver.find_element_by_xpath("//div[#class='Z0LcW AZCkJd']")
df_output = df_output.append(pd.DataFrame(columns=["City", "pincode"], data=[[i,elements.text]]))
driver.quit()
except:
driver.quit()
this code works used one of the 3 tags here
need to combine 3 tags in a single code.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import re
import pandas as pd
import os
import html5lib
import json
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
import pandas as pd
url = "https://www.google.com/"
chromedriver = ('/home/me/chromedriver/chromedriver.exe')
driver = webdriver.Chrome(chromedriver)
driver.implicitly_wait(30)
driver.get(url)
search = driver.find_element_by_name('q')
search.send_keys('polasa',' pincode')
search.send_keys(Keys.RETURN)
elements=driver.find_element_by_xpath("//div[#class='Z0LcW']")
elements.text
``
You don't really need 3 try-catchs. You can do this without throwing exceptions by locating elements (plural) given a locator and then check the length of the collection returned. If length = 0, no elements were found.
The locators you are using don't require XPath so you can instead use a CSS selector and combine all three with an OR and avoid the three checks. (Note: you can do the same thing with XPath but the results are messier and harder to read)
Here are your 3 locators combined into one using OR (the comma) in CSS selector syntax
div.IAznY div.title, div.Z0LcW, div.Z0LcW.AZCkJd
...and the updated code using the combined locator and without the nested try-catch.
...
locator = (By.CSS_SELECTOR, 'div.IAznY div.title, div.Z0LcW, div.Z0LcW.AZCkJd')
for i in mlist1:
search.send_keys(i,' pincode')
search.send_keys(Keys.RETURN)
WebDriverWait(driver, 10).until(expected_conditions.visibility_of_element_located(*locator)
elements = driver.find_elements_by_css_selector(*locator)
df_output = df_output.append(pd.DataFrame(columns=["City", "pincode"], data=[[i,elements[0].text]]))
driver.quit()
NOTE: I used your original locators and wasn't returning any results with any of the three. Are you sure they are correct?
Also note... I pulled the driver.quit() out of the loop. I'm not sure if you intended it to be inside or not but from the code provided, if the try succeeds in the first iteration, the browser will quit. You only have one item so you probably didn't notice this yet but would have been confused when you added another item to the iteration.

No such Element Exception using selenium in python

from selenium import webdriver
from selenium.webdriver.common.keys import Keys
chrome_path=r"C:\Users\Priyanshu\Downloads\Compressed\chromedriver_win32\chromedriver.exe"
driver=webdriver.Chrome(chrome_path)
driver.get("https://www.flipkart.com/?")
search = driver.find_element_by_name('q')
search.send_keys("laptop")
search.send_keys(Keys.RETURN)
driver.find_element_by_xpath(""" //*[#id="container"]/div/div[2]/div[2]/div/div[2]/div[2]/div/section/ul/li[2]""").click()
I am getting no such element present in the last line of code.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
chrome_path=r"C:\Users\Priyanshu\Downloads\Compressed\chromedriver_win32\chromedriver.exe"
driver=webdriver.Chrome(chrome_path)
driver.get("https://www.flipkart.com/search?q=laptop&otracker=start&as-show=off&as=off")
driver.find_element_by_xpath(""" //*[#id="container"]/div/div[2]/div[2]/div/div[2]/div[2]/div/section/ul/li[2]""").click()
If I am doing like this its working fine.
The element is not immediately available, wait for it to be present first:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
wait = WebDriverWait(driver, 10)
search = wait.until(EC.presence_of_element_located((By.NAME, 'q')))
search.send_keys("laptop")
search.send_keys(Keys.RETURN)
element = wait.until(EC.presence_of_element_located(By.XPATH, '//*[#id="container"]/div/div[2]/div[2]/div/div[2]/div[2]/div/section/ul/li[2]'))
element.click()
Note that, assuming you want to get to the "Popularity" menu header, why don't simplify the XPath expression and use the element's text:
//li[. = "Popularity"]

Categories

Resources