Selenium and BeautifulSoup scraper only scrapes first page of results - python

I have built a scraper, which works just fine in terms of handling data, but for some reason, it only scrapes the first page of the desired search.
I have two functions, one to find the elements I want in the page and the other to search for a NEXT link and click it if it exists. Otherwise, the scraper prints just that page and continues. I am using the following:
from __future__ import print_function
import fileinput
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8')
letters = ["x"]
for letter in letters:
try:
driver = webdriver.Chrome()
#driver.set_window_size(1120, 550)
driver.get("http://sam.gov")
driver.find_element_by_css_selector("a.button[title='Search Records']").click()
except:
print("Failed for " + letter)
pass
else:
driver.find_element_by_id('q').send_keys(letter)
driver.find_element_by_id('RegSearchButton').click()
def findRecords():
bsObj = BeautifulSoup(driver.page_source, "html.parser")
tableList = bsObj.find_all("table", {"class":"width100 menu_header_top_emr"})
tdList = bsObj.find_all("td", {"class":"menu_header width100"})
for table,td in zip(tableList,tdList):
a = table.find_all("span", {"class":"results_body_text"})
b = td.find_all("span", {"class":"results_body_text"})
hf = open("sam.csv",'a')
hf.write(', '.join(tag.get_text().strip() for tag in a+b) +'\n')
def crawl():
if driver.find_element_by_id('anch_16'):
print("Found next button")
findRecords()
driver.find_element_by_id('anch_16').click()
print("Going to next page")
else:
print("Scraping last page for " + letter)
findRecords()
print("Done scraping letter " + letter + "\nNow cleaning results file...")
seen = set() # set for fast O(1) amortized lookup
for line in fileinput.FileInput('sam.csv', inplace=1):
if line in seen: continue # skip duplicate
seen.add(line)
print(line)
print("Scraping and cleaning done for " + letter)
crawl()
driver.quit()

Related

Accessing an element for scraping data

I want to access the highlighted element. This is part of the html to access the sub comments section in 9gag website. I'm using this meme https://9gag.com/gag/a5EAv9O as an example input for the program.
I used the following code to access but it doesn't work.
sub_com_html = item.find_element(By.CSS_SELECTOR, '//*/div/section/section[2]').Get_attribute("innerHTML")
Edit:
I'm able to access the section now and print some subcomments. Thanks to #Arundeep Chohan for correcting my silly mistake. But there's an issue. It’s accessing the sub comments section but its repeating the sub comments for different main comments. This screenshot is part of the output with main comment and sub comments as list. You can see that it’s repeating same data which is wrong. It's also giving the sub comments for only a few of the comments and skipping the rest. Theoretically it should work fine but I don't understand what's going wrong here.
This is the whole code I'm working with now. The goal is to scrape all the comments and sub comments of a meme.
import csv
from email.mime import image
from re import T
from tkinter import SCROLL, Image
from unittest import result
import instanceof as instanceof
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
import undetected_chromedriver as uc
if __name__ == '__main__':
options = Options()
# options.headless = True
driver = uc.Chrome(service=Service(ChromeDriverManager().install()), options=options)
driver.maximize_window()
driver.get("https://9gag.com/gag/a5EAv9O")
time.sleep(5)
# click on I accept cookies
actions = ActionChains(driver)
try:
consent_button = driver.find_element(By.XPATH, '//*[#id="qc-cmp2-ui"]/div[2]/div/button[2]')
actions.move_to_element(consent_button).click().perform()
except:
pass
for i in range(31):
actions.click()
actions.send_keys(Keys.ARROW_DOWN).perform()
time.sleep(4)
# click on fresh comments section
fresh_comments = driver.find_element(By.XPATH, '//*[#id="page"]/div[1]/section[2]/section/header/div/button[2]')
actions.move_to_element(fresh_comments).click(on_element=fresh_comments).perform()
time.sleep(5)
# click on lood more comments button to load all the comments
fresh_comments = driver.find_element(By.CSS_SELECTOR, '.comment-list__load-more')
actions.move_to_element(fresh_comments).click(on_element=fresh_comments).perform()
miN = 1000
results = []
comments = {}
while miN <= 20000:
window = 'window.scrollTo(0,' + str(miN) + ')'
driver.execute_script(window)
time.sleep(3)
# Dealing with all comments
try:
# Scrape the main comments
try:
All_comments = driver.find_elements(By.CSS_SELECTOR, "div.vue-recycle-scroller__item-view")
except:
All_comments = driver.find_elements(By.CSS_SELECTOR, "div.vue-recycle-scroller__item-view")
del_comm_cnt = 1
for item in All_comments:
try:
html = item.get_attribute("innerHTML")
if "comment-list-item__text" in html:
comment = item.find_element(By.CSS_SELECTOR, "div.comment-list-item__text").text
elif "comment-list-item__deleted-text" in html:
comment = item.find_element(By.CSS_SELECTOR, "div.comment-list-item__deleted-text").text
comment = comment + str(del_comm_cnt)
del_comm_cnt += 1
if(comments.get(comment) == None):
sub_coms_list = []
comments[comment] = ""
# get sub comments
if "comment-list-item__replies" in html:
# item.find_element(By.CSS_SELECTOR, "div.comment-list-item__replies").click()
sub_comments = item.find_element(By.CSS_SELECTOR, "div.comment-list-item__replies")
actions.move_to_element(sub_comments).click(on_element=sub_comments).perform()
sub_com_section = item.find_element(By.XPATH, '//*/div/section/section[2]')
sub_com_html = sub_com_section.get_attribute("innerHTML")
#sub_coms = sub_com_section.find_elements(By.CSS_SELECTOR, "section.comment-list-item__wrapper comment-list-item__wrapper_reply")
sub_coms = sub_com_section.find_elements(By.CSS_SELECTOR, "div.comment-list-item__text")
for com in sub_coms:
sub_coms_list.append(com.text)
comments[comment] = sub_coms_list
except:
pass
except:
pass
miN = miN + 1500
driver.quit()
for i in comments:
print(i, "\n", comments[i], "\n\n")

Python Scraping from website

I've tried to write a web scraper for https://www.waug.com/area/?idx=15:
#!/usr/bin/env python3
#_*_coding:utf8_*_
import requests
from bs4 import BeautifulSoup
url = requests.get('https://www.abcd.com/area/?abc=15')
html = url.text
soup = BeautifulSoup(html, 'html.parser')
count = 1
names = soup.select('#good_{} > div > div.class_name > div > div'.format(count))
prices = soup.select('#good_{} > div > div.class_name > div.class_name'.format(count))
for name in names:
while count < 45:
print(name.text)
count = count + 1
for price in prices:
while count < 45:
print(price.text)
count = count + 1
The output is only 45 times first item name and no price. How can I get all item name and price? I want to get item name and price on same line. (I've changed the url and some of the class names just in case)
In order to be sure to get the right name for the right title I'd get the whole "item-good" class.
Then using a for loop would allow me to be sure that the title I am getting matches its price.
Here's an example of how to parse a website with BeautifulSoup:
#!/usr/bin/env python3
#_*_coding:utf8_*_
import requests
from bs4 import BeautifulSoup
url = requests.get('https://www.waug.com/area/?idx=15')
html = url.text
soup = BeautifulSoup(html, 'html.parser')
count = 1
items = soup.findAll("div", {"class": "item-good"})
for item in items:
item_title = item.find("div", {"class": "good-title-text"})
item_price = item.find("div", {"class": "price-selling"})
print item_title.text + " " + item_price.text
# If you get encoding errors delete the row above and uncomment the one below
#print item_title.text.encode("utf-8") + " " + item_price.text.encode("utf-8")
As per OP's request this is not enough because there is a "more" button to push in the webpage in order to retrieve all the results.
This can be done using Selenium Webdriver.
=== IMPORTANT NOTE ===
In order to make this work you'll need to copy in your script folder also the "chromedriver" file.
You can download it from this Google website.
Here's the script:
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
browser = webdriver.Chrome()
browser.get('https://www.waug.com/area/?idx=15')
for number in range(10):
try:
WebDriverWait(browser, 60).until(EC.presence_of_element_located((By.ID, "more_good")))
more_button = browser.find_element_by_id('more_good')
more_button.click()
time.sleep(10)
except:
print "Scrolling is now complete!"
source = browser.page_source
# This source variable should be used as input for BeautifulSoup
print source
Now it is tie to merge the two explained soultions in order to get the final requested result.
Please keep it mind that this is just a quick'n'dirty hack and needs proper error handling and polishing but it should be enough to get you started:
#!/usr/bin/env python3
#_*_coding:utf8_*_
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
browser = webdriver.Chrome()
browser.get('https://www.waug.com/area/?idx=15')
def is_page_load_complete():
close_button = browser.find_element_by_id('close_good');
return close_button.is_displayed();
while(True):
WebDriverWait(browser, 60).until(EC.presence_of_element_located((By.ID, "more_good")))
time.sleep(10)
more_button = browser.find_element_by_id('more_good')
if (more_button.is_displayed()):
more_button.click()
else:
if (is_page_load_complete()):
break
source = browser.page_source
soup = BeautifulSoup(source, 'html.parser')
items = soup.findAll("div", {"class": "item-good"})
for item in items:
item_title = item.find("div", {"class": "good-title-text"})
item_price = item.find("div", {"class": "price-selling"})
print item_title.text + " " + item_price.text
# If you get encoding errors comment the row above and uncomment the one below
#print item_title.text.encode("utf-8") + " " + item_price.text.encode("utf-8")
print "Total items found: " + str(len(items))

Selenium python not clickable element at point(X, Y) instead other element would receive the click(here the element neither a button nor a link)

This is a link to an e-commerce website which I would like to crawl. I am searching for a way to click on Most Helpful, positive, negative, most recent and by certified buyers section and scrape the values. Heads up, it's not a button so ActionChains and Javascript code is not working on it.
I want to move from one to another either by using click or with any other methods. I tried By using javascript executor and ActionChains but I am unable to get it.
For this My Xpath is:
path = '//div[#class="o5jqS-"]/div[X]//div[contains(#class,"_3MuAT6")]'
which actually returns an element. The "X" value is replaced in a loop with 1 to 5. 1 signifying "Most helpful" and 5 signifying "By Certfied Buyers"
My code is below:
for j in range(0,5):
new_xpath = xpath_hash["FirstPageReviews"]["TitleOfReviewType"].replace("[X]", "[" + str(j + 1) + "]")
new_xpath1 = xpath_hash["FirstPageReviews"]["TitleElement"].replace("[X]", "[" + str(j + 1) + "]")
title_element = driver.find_element_by_xpath(new_xpath1)
driver.execute_script("arguments[0].click();", title_element)
#ActionChains(driver).move_to_element(title_element).click().perform()
you could use my code base on page object, i have tried this worked
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
class SeleniumBaseClass(object):
def __init__(self,driver):
self.driver = driver
def open(self,URL):
self.driver.get(URL)
def driverURLChange(self,URL):
print("change URL" + URL)
self.driver.get(URL)
def currentUrl(self):
print("URL " + self.driver.current_url)
return self.driver.current_url
def locateElement(self, loc):
try:
print(loc)
element = WebDriverWait(self.driver,10).until(EC.visibility_of_element_located(loc))
return element
except:
print ("cannot find {0} element".format(loc))
return None
def waitForElementInvisible(self,loc):
#load-spinner
try:
element = WebDriverWait(self.driver,10).until(EC.invisibility_of_element_located(loc))
return True
except:
print ("cannot invisibility_of_element {0} element".format(loc))
return False
def send_key_with_Element(self,loc,value):
self.locateElement(loc).clear()
self.locateElement(loc).send_keys(value)
def click_with_Element(self,loc):
self.locateElement(loc).click()
def clickElementsBySendKey(self,loc,value):
self.locateElement(loc).send_keys(value)
customdriver = SeleniumBaseClass(webdriver.Chrome())
customdriver.open("https://www.flipkart.com/sony-mdr-zx110-wired-headphones/p/itmehuh6zm9s7kgz?pid=ACCDZRSEYPFHAT76&srno=s_1_1&otracker=search&lid=LSTACCDZRSEYPFHAT76TANM1F&qH=a684a6245806d98f")
HelpfulTab = (By.XPATH,"//div[contains(text(),'Most Helpful')]")
PositiveTab = (By.XPATH,"//div[contains(text(),'Positive')]")
customdriver.click_with_Element(PositiveTab)

How to put mulitple value in loop and result to csv python selenium

Actually i want to get the value from here. Getting product hyper link is working fine. i want to get product information, price etc.. from above link in same in for loop. How to put result data into CSV file. please help me.
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
import time
chrome_path = r"C:\Users\Venkatesh\AppData\Local\Programs\Python\Python35\chromedriver.exe"
driver = webdriver.Chrome(chrome_path)
driver.get("https://www.flipkart.com/mobiles")
search = driver.find_element_by_xpath("""//*[#id="container"]/div/div[2]/div/div[2]/div/div/div[1]/section/div[3]/div/div/a""").click()
delay = 20 # seconds
try:
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.XPATH, "//*[#id='container']/div/div[2]/div[2]/div/div[2]/div/div[3]/div[1]/div/div[1]/a/div[2]/div[1]/div[1]")))
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
print("Page is ready")
except TimeoutException:
print("Loading took too much time")
time.sleep(10)
for post in driver.find_elements_by_class_name("_1UoZlX"):
print(post.get_attribute("href"))
time.sleep(2)
driver.quit()
Output:
Page is ready
https://www.flipkart.com/moto-g5-plus-fine-gold-32-gb/p/itmes2zjvwfncxxr?pid=MOBEQHMGED7F9CZ2&srno=b_1_1&otracker=browse
&lid=LSTMOBEQHMGED7F9CZ2KHTBI8
https://www.flipkart.com/moto-g5-plus-lunar-grey-32-gb/p/itmes2zjvwfncxxr?pid=MOBEQHMGMAUXS5BF&srno=b_1_2&otracker=brows
e&lid=LSTMOBEQHMGMAUXS5BFVCF0ZO
https://www.flipkart.com/moto-e3-power-black-16-gb/p/itmekgt2fbywqgcv?pid=MOBEKGT2HGDGADFW&srno=b_1_3&otracker=browse&li
d=LSTMOBEKGT2HGDGADFWP5NHBY
https://www.flipkart.com/micromax-bolt-q381-plus-coffee-16-gb/p/itmeskgycnfghsat?pid=MOBESAMDG2GNUBC5&srno=b_1_4&otracke
r=browse&lid=LSTMOBESAMDG2GNUBC5KRPH8Q
https://www.flipkart.com/lenovo-k6-power-grey-dark-grey-32-gb/p/itmezenfhm4mvptw?pid=MOBEZENFZBPW8UMF&srno=b_1_5&otracke
r=browse&lid=LSTMOBEZENFZBPW8UMF7P8NY0
https://www.flipkart.com/lenovo-k6-power-gold-32-gb/p/itmezenfhm4mvptw?pid=MOBEZEMYH7FQBGBQ&srno=b_1_6&otracker=browse&l
id=LSTMOBEZEMYH7FQBGBQRHVU0S
https://www.flipkart.com/lenovo-k6-power-silver-32-gb/p/itmezenfhm4mvptw?pid=MOBEZEMX6CZHCJVY&srno=b_1_7&otracker=browse
&lid=LSTMOBEZEMX6CZHCJVYOIBM0E
https://www.flipkart.com/lenovo-vibe-k5-note-grey-64-gb/p/itmepcfqfdx9bdxs?pid=MOBEPCFQRJ6KFYZS&srno=b_1_8&otracker=brow
se&lid=LSTMOBEPCFQRJ6KFYZSI4DRRB
https://www.flipkart.com/lenovo-vibe-k5-note-gold-64-gb/p/itmepcfqfdx9bdxs?pid=MOBEPCFQ3ZSYTRUZ&srno=b_1_9&otracker=brow
se&lid=LSTMOBEPCFQ3ZSYTRUZGFSZCU
https://www.flipkart.com/samsung-galaxy-nxt-gold-32-gb/p/itmemzd4gepexjya?pid=MOBEMZD4KHRF5VZX&srno=b_1_10&otracker=brow
se&lid=LSTMOBEMZD4KHRF5VZX7FNU5S
https://www.flipkart.com/moto-e3-power-white-16-gb/p/itmekgt23fgwdgkg?pid=MOBEKGT2SVHPAHTM&srno=b_1_11&otracker=browse&l
id=LSTMOBEKGT2SVHPAHTMJA8RQ1
https://www.flipkart.com/lenovo-k6-power-silver-32-gb/p/itmezenfghddrfmc?pid=MOBEZENFKXZ4HSCG&srno=b_1_12&otracker=brows
e&lid=LSTMOBEZENFKXZ4HSCGC1OOAM
https://www.flipkart.com/lenovo-k6-power-gold-32-gb/p/itmezenfghddrfmc?pid=MOBEZENFSZGTQGWF&srno=b_1_13&otracker=browse&
lid=LSTMOBEZENFSZGTQGWFUR1LY1
https://www.flipkart.com/lenovo-k6-power-dark-gray-32-gb/p/itmezenfghddrfmc?pid=MOBEZENFG8BPDPSU&srno=b_1_14&otracker=br
owse&lid=LSTMOBEZENFG8BPDPSUUANLO6
https://www.flipkart.com/lava-arc-blue/p/itmezgyfszhmwfzt?pid=MOBEF6D24ZT6YHFJ&srno=b_1_15&otracker=browse&lid=LSTMOBEF6
D24ZT6YHFJZ6N7XC
https://www.flipkart.com/lenovo-vibe-k5-plus-3-gb-silver-16-gb/p/itmektn3t9rg9hnn?pid=MOBEKEF8ATFZZ8GN&srno=b_1_16&otrac
ker=browse&lid=LSTMOBEKEF8ATFZZ8GNY7WZBU
https://www.flipkart.com/lenovo-vibe-k5-plus-3-gb-gold-16-gb/p/itmektn3t9rg9hnn?pid=MOBEKEF8JYGKZCTF&srno=b_1_17&otracke
r=browse&lid=LSTMOBEKEF8JYGKZCTFUTCYS4
https://www.flipkart.com/lenovo-vibe-k5-plus-3-gb-dark-grey-16-gb/p/itmektn3t9rg9hnn?pid=MOBEKEF86VVUE8G2&srno=b_1_18&ot
racker=browse&lid=LSTMOBEKEF86VVUE8G2YCW5OP
https://www.flipkart.com/samsung-galaxy-nxt-black-32-gb/p/itmemzd4byrufyu7?pid=MOBEMZD4G83T5HKZ&srno=b_1_19&otracker=bro
wse&lid=LSTMOBEMZD4G83T5HKZVMFKK6
https://www.flipkart.com/samsung-galaxy-on8-gold-16-gb/p/itmemvarkqg5dyay?pid=MOBEMJR2NDM4EAHQ&srno=b_1_20&otracker=brow
se&lid=LSTMOBEMJR2NDM4EAHQ8BMJIN
https://www.flipkart.com/samsung-galaxy-on7-black-8-gb/p/itmedhx3jgmu2gps?pid=MOBECCA5SMRSKCNY&srno=b_1_21&otracker=brow
se&lid=LSTMOBECCA5SMRSKCNYWC8DYC
https://www.flipkart.com/samsung-galaxy-on7-gold-8-gb/p/itmedhx3jgmu2gps?pid=MOBECCA5Y5HBYR3Q&srno=b_1_22&otracker=brows
e&lid=LSTMOBECCA5Y5HBYR3QPDPGLJ
https://www.flipkart.com/samsung-galaxy-on5-gold-8-gb/p/itmedhx3uy3qsfks?pid=MOBECCA5FHQD43KA&srno=b_1_23&otracker=brows
e&lid=LSTMOBECCA5FHQD43KAFXOZYB
https://www.flipkart.com/lenovo-p2-gold-32-gb/p/itmeq5ygvgq9vyfn?pid=MOBEZFHHURMWYSFN&srno=b_1_24&otracker=browse&lid=LS
TMOBEZFHHURMWYSFNBBG6L0
https://www.flipkart.com/asus-zenfone-max-black-32-gb/p/itmege3d5pjpmknc?pid=MOBEGE3DYZM3ZYWB&srno=b_1_25&otracker=brows
e&lid=LSTMOBEGE3DYZM3ZYWBPCOZHP
https://www.flipkart.com/lenovo-vibe-k5-note-grey-32-gb/p/itmejj6kmhh2khk9?pid=MOBEJJ6KYARZGWJC&srno=b_1_26&otracker=bro
wse&lid=LSTMOBEJJ6KYARZGWJCCV4LRX
https://www.flipkart.com/swipe-elite-sense-4g-volte/p/itmeh6yfycypxfdz?pid=MOBEH6YFZYZZNCZK&srno=b_1_27&otracker=browse&
lid=LSTMOBEH6YFZYZZNCZKWVY6ES
https://www.flipkart.com/swipe-elite-sense-4g-volte/p/itmeh6yfycypxfdz?pid=MOBEH6YFZRTEMDBG&srno=b_1_28&otracker=browse&
lid=LSTMOBEH6YFZRTEMDBGYJNCJI
https://www.flipkart.com/xolo-era-1x-4g-volte-black-gun-metal-8-gb/p/itmerhq8uhtehukg?pid=MOBEHMEKGCZCGMB8&srno=b_1_29&o
tracker=browse&lid=LSTMOBEHMEKGCZCGMB8DCWHIY
https://www.flipkart.com/swipe-konnect-grand-black-8-gb/p/itmeqcgxvkyfzsgj?pid=MOBEQCGXN6HTZE2C&srno=b_1_30&otracker=bro
wse&lid=LSTMOBEQCGXN6HTZE2CXUT5W1
https://www.flipkart.com/lenovo-vibe-k5-note-gold-32-gb/p/itmejj6kczvxej4g?pid=MOBEJJ6K5A3GQ9SU&srno=b_1_31&otracker=bro
wse&lid=LSTMOBEJJ6K5A3GQ9SUZERSAR
https://www.flipkart.com/lyf-water-f1-black-32-gb/p/itmezh76z9jqsa8z?pid=MOBEZH76AFWSZVNH&srno=b_1_32&otracker=browse&li
d=LSTMOBEZH76AFWSZVNHOOBURN
https://www.flipkart.com/samsung-galaxy-j5-6-new-2016-edition-black-16-gb/p/itmegmrnzqjcpfg9?pid=MOBEG4XWHJDWMQDF&srno=b
_1_33&otracker=browse&lid=LSTMOBEG4XWHJDWMQDFZIWO93
https://www.flipkart.com/samsung-galaxy-j5-6-new-2016-edition-white-16-gb/p/itmegmrnzqjcpfg9?pid=MOBEG4XWJG7F9A6Z&srno=b
_1_34&otracker=browse&lid=LSTMOBEG4XWJG7F9A6ZHJOVBG
https://www.flipkart.com/samsung-galaxy-j5-6-new-2016-edition-gold-16-gb/p/itmegmrnzqjcpfg9?pid=MOBEG4XWFTBRMMBY&srno=b_
1_35&otracker=browse&lid=LSTMOBEG4XWFTBRMMBYZPYEGS
https://www.flipkart.com/moto-m-grey-64-gb/p/itmenqavgcezzk2y?pid=MOBENQATHQTKG7AV&srno=b_1_36&otracker=browse&lid=LSTMO
BENQATHQTKG7AVGFQI4N
https://www.flipkart.com/moto-m-gold-64-gb/p/itmenqavgcezzk2y?pid=MOBENQAVANRMEGAP&srno=b_1_37&otracker=browse&lid=LSTMO
BENQAVANRMEGAPHWU47I
https://www.flipkart.com/moto-m-silver-64-gb/p/itmenqavgcezzk2y?pid=MOBENQAVFTG6FPXX&srno=b_1_38&otracker=browse&lid=LST
MOBENQAVFTG6FPXXHZBIGV
https://www.flipkart.com/apple-iphone-6-silver-16-gb/p/itme8dvfeuxxbm4r?pid=MOBEYHZ2NUZGCHKN&srno=b_1_39&otracker=browse
&lid=LSTMOBEYHZ2NUZGCHKN7PMDIN
https://www.flipkart.com/samsung-galaxy-on8-black-16-gb/p/itmemvarprh8hegn?pid=MOBEMJRFZXZBESQW&srno=b_1_40&otracker=bro
wse&lid=LSTMOBEMJRFZXZBESQWCFHWJ0
https://www.flipkart.com/panasonic-eluga-tapp-silver-grey-16-gb/p/itmezf54ey3gf8ne?pid=MOBENRHGWZWKEGGF&srno=b_1_41&otra
cker=browse&lid=LSTMOBENRHGWZWKEGGFMJELY2
https://www.flipkart.com/panasonic-eluga-tapp-champagne-gold-16-gb/p/itmezf54ey3gf8ne?pid=MOBENRHGEQEJHSZM&srno=b_1_42&o
tracker=browse&lid=LSTMOBENRHGEQEJHSZMD8R5FE
https://www.flipkart.com/apple-iphone-6s-rose-gold-32-gb/p/itmen2yymnfcrxsz?pid=MOBEN2XYK8WFEGM8&srno=b_1_43&otracker=br
owse&lid=LSTMOBEN2XYK8WFEGM8QJW5XA
https://www.flipkart.com/lenovo-p2-grey-graphite-grey-32-gb/p/itmeq5ygvgq9vyfn?pid=MOBEZFHH2JYGXSNF&srno=b_1_44&otracker
=browse&lid=LSTMOBEZFHH2JYGXSNFNWKEAD
https://www.flipkart.com/forme-n1/p/itmeff8s2hdrfhyg?pid=MOBEFF8SHZPYKCRY&srno=b_1_45&otracker=browse&lid=LSTMOBEFF8SHZP
YKCRYEKQPPR
https://www.flipkart.com/forme-n1/p/itmeff8s2hdrfhyg?pid=MOBEFF8SSZNHCUND&srno=b_1_46&otracker=browse&lid=LSTMOBEFF8SSZN
HCUNDRC6GLT
https://www.flipkart.com/samsung-galaxy-on5-black-8-gb/p/itmekszmsqgpgygy?pid=MOBECCA5BJUVUGNP&srno=b_1_47&otracker=brow
se&lid=LSTMOBECCA5BJUVUGNPRKEGMG
https://www.flipkart.com/lenovo-p2-grey-graphite-grey-32-gb/p/itmeq5ygebzgqgfb?pid=MOBEZFHHVD8KXE7G&srno=b_1_48&otracker
=browse&lid=LSTMOBEZFHHVD8KXE7GB0OS6I
https://www.flipkart.com/lenovo-p2-gold-32-gb/p/itmeq5ygebzgqgfb?pid=MOBEZFHHGE2RXQUY&srno=b_1_49&otracker=browse&lid=LS
TMOBEZFHHGE2RXQUY2XDB97
https://www.flipkart.com/samsung-galaxy-j7-gold-16-gb/p/itmeafbfjhsydbpw?pid=MOBE93GWSMGZHFSK&srno=b_1_50&otracker=brows
e&lid=LSTMOBE93GWSMGZHFSKT6OZOB
https://www.flipkart.com/samsung-z2-gold-8-gb/p/itmenkygvprd5dwt?pid=MOBENKYGHFUHT6BH&srno=b_1_51&otracker=browse&lid=LS
TMOBENKYGHFUHT6BHVSHMDE
https://www.flipkart.com/leeco-le-2-grey-32-gb/p/itmejeucxaxmnk8k?pid=MOBEJFTH4C9Z2YZR&srno=b_1_52&otracker=browse&lid=L
STMOBEJFTH4C9Z2YZRVVL0EL
https://www.flipkart.com/lyf-water-10-black-16-gb/p/itmemj7d8qfkfu4r?pid=MOBEMJ7C7YMDMVDQ&srno=b_1_53&otracker=browse&li
d=LSTMOBEMJ7C7YMDMVDQPCFALX
https://www.flipkart.com/micromax-canvas-nitro-2-grey-silver-16-gb/p/itme7nhzw56hv2ga?pid=MOBE7NHZP7GHZ7SG&srno=b_1_54&o
tracker=browse&lid=LSTMOBE7NHZP7GHZ7SGCYGNI3
https://www.flipkart.com/moto-g-turbo-white-16-gb/p/itmecc4uhbue7ve6?pid=MOBECC4UQTJ5QZFR&srno=b_1_55&otracker=browse&li
d=LSTMOBECC4UQTJ5QZFR9CAUPO
https://www.flipkart.com/moto-g-turbo-black-16-gb/p/itmecc4uhbue7ve6?pid=MOBECC4UZTSGKWWZ&srno=b_1_56&otracker=browse&li
d=LSTMOBECC4UZTSGKWWZOQKAIZ
https://www.flipkart.com/apple-iphone-6-space-grey-16-gb/p/itme8dvfeuxxbm4r?pid=MOBEYHZ2YAXZMF2J&srno=b_1_57&otracker=br
owse&lid=LSTMOBEYHZ2YAXZMF2JEVWVNC
https://www.flipkart.com/yu-yunicorn-rush-silver-32-gb/p/itmenffyjfp8ubyg?pid=MOBEJ3MFUQAF8XJS&srno=b_1_58&otracker=brow
se&lid=LSTMOBEJ3MFUQAF8XJSBPC8L4
https://www.flipkart.com/yu-yunicorn-gold-rush-32-gb/p/itmenffyjfp8ubyg?pid=MOBEJ3MF23Q9MGMH&srno=b_1_59&otracker=browse
&lid=LSTMOBEJ3MF23Q9MGMHZ49MG2
https://www.flipkart.com/micromax-canvas-nitro-2-white-gold-16-gb/p/itme7nhzw56hv2ga?pid=MOBE8TJBHGQYHNPT&srno=b_1_60&ot
racker=browse&lid=LSTMOBE8TJBHGQYHNPTVL3HS0
Used openpyxl to create a csv for each run with the filename+timestamp. Links that are fetched are written to the csv file eventually.
I couldn't find the exact links that have been given and hence I chose my own links which are similar in case. This code has different links per se, but the solution scales up to be same for your case #venkatesh
One more thing: Try to keep xpaths as relative as possible, and the classes with such gibberish as _13oc-S Would not hold good as they tend to change dynamically for each DOM refresh or each browser instance.
from webdriver_manager.chrome import ChromeDriverManager
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import openpyxl
current_time = time.strftime('%Y%m%d%H%M%S')
xlpath = "linktracker" + current_time + ".csv"
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get("https://www.flipkart.com/mobiles")
driver.maximize_window()
# Searches for a certain brand of phpnes (POCO). Inefficient way of locator finding though
search = driver.find_element(By.XPATH, "(//*[#alt='Shop Now'])[2]").click()
time.sleep(10) # bad practice, but used for now. Webdriverwait to be used instead.
each_element = "//a[#rel='noopener noreferrer']" # locates each desired element in the search page (each phone block)
posts = driver.find_elements(By.XPATH, each_element)
print(len(posts))
ls=[]
for post in range(len(posts)-1): # len-1 because the last item is a footer and not the desired link in my view
# concatanates the subscript to element xpath: e.g.: (//*[#element = 'ele'])[1] ... (//*[#element = 'ele'])[n]
each_post = driver.find_element(By.XPATH, '(' + each_element + ')' + '[' + str(post + 1) + ']')
each_link = each_post.get_attribute("href")
ls.append(each_link)
wb = openpyxl.Workbook() # creates a workbook
sheet = wb.active
c=0
# looping through the created list and writing the values to the created workbook
for i in ls:
sheet.cell(row=c+1, column=1).value = i
c+=1 # incrementing the row for each iteration of i
wb.save(xlpath) # saving the workbook with the name as given in the xlpath variable above
driver.quit()
Result in csv - image

Selenium find all elements by xpath

I used selenium to scrap a scrolling website and conducted the code below
import requests
from bs4 import BeautifulSoup
import csv
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import unittest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
import unittest
import re
output_file = open("Kijubi.csv", "w", newline='')
class Crawling(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.set_window_size(1024, 768)
self.base_url = "http://www.viatorcom.de/"
self.accept_next_alert = True
def test_sel(self):
driver = self.driver
delay = 3
driver.get(self.base_url + "de/7132/Seoul/d973-allthingstodo")
for i in range(1,1):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
html_source = driver.page_source
data = html_source.encode("utf-8")
My next step was to crawl specific information from the website like the price.
Hence, I added the following code:
all_spans = driver.find_elements_by_xpath("/html/body/div[5]/div/div[3]/div[2]/div[2]/div[1]/div[1]/div")
print(all_spans)
for price in all_spans:
Header = driver.find_elements_by_xpath("/html/body/div[5]/div/div[3]/div[2]/div[2]/div[1]/div[1]/div/div[2]/div[2]/span[2]")
for span in Header:
print(span.text)
But I get just one price instead all of them. Could you provide me feedback on what I could improve my code? Thanks:)
EDIT
Thanks to your guys I managed to get it running. Here is the additional code:
elements = driver.find_elements_by_xpath("//div[#id='productList']/div/div")
innerElements = 15
outerElements = len(elements)/innerElements
print(innerElements, "\t", outerElements, "\t", len(elements))
for j in range(1, int(outerElements)):
for i in range(1, int(innerElements)):
headline = driver.find_element_by_xpath("//div[#id='productList']/div["+str(j)+"]/div["+str(i)+"]/div/div[2]/h2/a").text
price = driver.find_element_by_xpath("//div[#id='productList']/div["+str(j)+"]/div["+str(i)+"]/div/div[2]/div[2]/span[2]").text
deeplink = driver.find_element_by_xpath("//div[#id='productList']/div["+str(j)+"]/div["+str(i)+"]/div/div[2]/h2/a").get_attribute("href")
print("Header: " + headline + " | " + "Price: " + price + " | " + "Deeplink: " + deeplink)
Now my last issue is that I still do not get the last 20 prices back, which have a English description. I only get back the prices which have German description. For English ones, they do not get fetched although they share the same html structure.
E.g. html structure for the English items
headline = driver.find_element_by_xpath("//div[#id='productList']/div[6]/div[1]/div/div[2]/h2/a")
Do you guys know what I have to modify? Any feedback is appreciated:)
To grab all prices on that page you should use such XPATH:
Header = driver.find_elements_by_xpath("//span[contains(concat(' ', normalize-space(#class), ' '), 'price-amount')]")
which means: find all span elements with class=price-amount, why so complex - see here
But more simply to find the same elements is by CSS locator:
.price-amount

Categories

Resources