Unable to print foodpanda product links using selenium python - python

As i want to extract link from a href tag but it no print any result from https://www.foodpanda.pk/restaurants/new?lat=24.9414896&lng=67.1676002&vertical=restaurants
from selenium import webdriver
driver = webdriver.Chrome('F:/chromedriver')
driver.get("https://www.foodpanda.pk/restaurants/new?lat=24.9414896&lng=67.1676002&vertical=restaurants")
# response = scrapy.Selector(text=driver.page_source)
list = driver.find_elements_by_css_selector("ul.vendor-list li")
length = len(driver.find_elements_by_css_selector("ul.vendor-list li"))
for i in range(length):
try:
name = driver.find_elements_by_css_selector(".headline .name")[i].text
time = driver.find_elements_by_css_selector(".badge-info")[i].text.strip()
rating = driver.find_elements_by_css_selector(".rating")[i].text
dealtag = driver.find_elements_by_css_selector(".multi-tag")[i].text
link = driver.find_elements_by_css_selector(".vendor [href]")[i].text
print(name,link,time,rating,dealtag)
except:
pass

Please read the code, This code is working fine in my computer.
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 30)
driver.get('https://www.foodpanda.pk/restaurants/new?lat=24.9414896&lng=67.1676002&vertical=restaurants')
Vendor_list = driver.find_elements_by_xpath("//figure[#class=\"vendor-tile item\"]/ancestor::li")
for vendor in Vendor_list:
print("-------------------")
print("Restaurant Name :- " + vendor.find_element_by_xpath(".//span[#class=\"name fn\"]").text)
print("Badge :- " + vendor.find_element_by_xpath(".//span[#class=\"badge-info\"]").text[:2] +
vendor.find_element_by_xpath(".//span[#class=\"badge-info\"]/span").text)
try:
print("Rating :- " + vendor.find_element_by_xpath(".//span[#class=\"rating\"]").text)
except:
print("No Rating Available")
try:
print("Muti Tag :- " + vendor.find_element_by_xpath(".//span[#class=\"multi-tag\"]").text)
except:
print("No Tag Info")
print("Vendor URL :- " + vendor.find_element_by_xpath(".//a").get_attribute("href"))
If it solves your problem then please mark it as answer.

There are no elements with exact class name vendor there.
You should use something like //*[contains(#class,'vendor')]//a[#href]
I used Xpath since I prefer working with it, but you can also use similar css_selector

Related

Get this error when try to iterate using selenium. "stale element reference: element is not attached to the page document"

I write a python script. first, it visits this website. then click on the arrow on the right side and go to the new web page to collect some data. finally back to the previous page and do the same thing with next item.
Web page : https://register.fca.org.uk/s/search?q=capital&type=Companies
This is the code.
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.wait import WebDriverWait
import time
url = 'https://register.fca.org.uk/s/search?q=capital&type=Companies'
service = Service('link to come driver')
service.start()
driver = webdriver.Remote(service.service_url)
driver.get(url)
time.sleep(12)
divs = driver.find_elements_by_xpath('//div[#class="result-card_main"]')
for d in divs:
RN = ''
companyName = ''
companyName = d.find_element_by_tag_name('h2').text
RNData = d.find_element_by_xpath('.//div[#class="result-card_figure-offset"]').text
RN = RNData.split(':')[1].strip()
d.click()
time.sleep(12)
phoneNumber = ''
phoneNumberData = driver.find_elements_by_xpath('//*[#id="who-is-this-details-content"]/div[1]/div[2]/div[2]/div/div/div[2]')
phoneNumber = phoneNumberData[0].text.split('\n')[1]
print(RN)
print(companyName)
print(phoneNumber)
driver.execute_script("history.back();")
it givesme this Error:
selenium.common.exceptions.StaleElementReferenceException: Message: stale element reference: element is not attached to the page document
How can I solve this problem?
Here's a quick and dirty way to avoid that error, change your code like this:
url = 'https://register.fca.org.uk/s/search?q=capital&type=Companies'
driver.get(url)
time.sleep(12)
divs = driver.find_elements_by_xpath('//div[#class="result-card_main"]')
for i in range(len(divs)):
time.sleep(4)
d = driver.find_elements_by_xpath('//div[#class="result-card_main"]')
RN = ''
companyName = ''
companyName = d[i].find_element_by_tag_name('h2').text
RNData = d[i].find_element_by_xpath('.//div[#class="result-card_figure-offset"]').text
RN = RNData.split(':')[1].strip()
d[i].click()
time.sleep(12)
phoneNumber = ''
phoneNumberData = driver.find_elements_by_xpath('//*[#id="who-is-this-details-content"]/div[1]/div[2]/div[2]/div/div/div[2]')
phoneNumber = phoneNumberData[0].text.split('\n')[1]
print(RN)
print(companyName)
print(phoneNumber)
driver.execute_script("window.history.go(-1)")

scraping with selenium cant click on clickable text

I am trying to scrape some data from yahoo finance, for each stock, I want to get the historical data. Taking the Apple stock. I should go to https://finance.yahoo.com/quote/AAPL/history?p=AAPL and choose "MAX" from "Time Period". so
I believe the script I wrote so far is getting the date element, but somehow clicking on it to be able to choose "MAX" is not working.
here is my whole script:
# using linux here
project_path = os.getcwd()
driver_path = project_path + "/" + "chromedriver"
yahoo_finance = "https://finance.yahoo.com/quote/"
driver = webdriver.Chrome(driver_path)
def get_data(symbol='AAPL'):
stock_history_link = yahoo_finance + symbol + '/history?p=' + symbol
driver.get(stock_history_link)
date_picker = '//div[contains(#class, "D(ib)") and contains(#class, "Pos(r)") and contains(#class, "Cur(p)")' \
'and contains(#class, "O(n):f")]'
try:
print("I am inside")
date_picker_2 = "//div[#class='Pos(r) D(ib) O(n):f Cur(p)']"
date_picker_element = driver.find_element_by_xpath(date_picker_2)
print("date_picker_element: ", date_picker_element)
date_picker_element.click()
try:
print("I will be waiting for the date")
my_dropdown = WebDriverWait(driver, 100).until(
EC.presence_of_element_located((By.ID, 'dropdown-menu'))
)
print(my_dropdown)
print("I am not waiting anymore")
except TimeoutException as e:
print("wait timed out")
print(e)
except WebDriverException:
print("Something went wrong while trying to pick the max date")
if __name__ == '__main__':
try:
get_data()
except:
pass
# finally:
# driver.quit()
To click the button with Max just open it up and target it.
driver.get("https://finance.yahoo.com/quote/AAPL/history?p=AAPL")
wait = WebDriverWait(driver, 10)
wait.until(EC.element_to_be_clickable((By.XPATH, "//span[#class='C($linkColor) Fz(14px)']"))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, "//button[#data-value='MAX']"))).click()
Element:
<button class="Py(5px) W(45px) Fz(s) C($tertiaryColor) Cur(p) Bd Bdc($seperatorColor) Bgc($lv4BgColor) Bdc($linkColor):h Bdrs(3px)" data-value="MAX"><span>Max</span></button>
Imports:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
You have the wrong xpath for the date_picker_2:
date_picker_2 = '//*[#id="Col1-1-HistoricalDataTable-Proxy"]/section/div[1]/div[1]/div[1]/div/div/div/span'
Using requests:
import requests
import datetime
end = int(datetime.datetime.strptime(datetime.date.today().isoformat(), "%Y-%m-%d").timestamp())
url = f"https://finance.yahoo.com/quote/AAPL/history?period1=345427200&period2={end}&interval=1d&filter=history&frequency=1d&includeAdjustedClose=true"
requests.get(url)
Gets you to the same end page.

How do I export data from multiple pages into a csv file?

I am working on a scraping project, and am in the final stages. Right now, my code can navigate to the first profile, scrape the data from that profile, print that data, then move on to the next profile, and repeat the process. Now, I want to put the data I collect into a csv file instead of printing it. I am not sure how to do this, so I am looking for guidance/updates to my current code. Thank you for your help!
My current code:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from selenium.common.exceptions import NoSuchElementException
driver = webdriver.Chrome("/Users/nzalle/Downloads/chromedriver")
driver.get("https://directory.bcsp.org/")
count = int(input("Number of Profiles to Scrape: "))
body = driver.find_element_by_xpath("//body")
profile_count = driver.find_elements_by_xpath("//div[#align='right']/a")
while len(profile_count) < count: # Get links up to "count"
body.send_keys(Keys.END)
sleep(1)
profile_count = driver.find_elements_by_xpath("//div[#align='right']/a")
for link in profile_count: # Calling up links
temp = link.get_attribute('href') # temp for
driver.execute_script("window.open('');") # open new tab
driver.switch_to.window(driver.window_handles[1]) # focus new tab
driver.get(temp)
# scrape code
Name = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[1]/div[2]/div').text
IssuedBy = "Board of Certified Safety Professionals"
CertificationorDesignaationNumber = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[3]/table/tbody/tr[1]/td[3]/div[2]').text
CertfiedorDesignatedSince = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[3]/table/tbody/tr[3]/td[1]/div[2]').text
try:
AccreditedBy = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[3]/table/tbody/tr[5]/td[3]/div[2]/a').text
except NoSuchElementException:
AccreditedBy = "N/A"
try:
Expires = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[3]/table/tbody/tr[5]/td[1]/div[2]').text
except NoSuchElementException:
Expires = "N/A"
Data = (Name + " , " + IssuedBy + " , " + CertificationorDesignaationNumber + " , " + CertfiedorDesignatedSince + " , " + AccreditedBy + " , " + Expires)
print(Data)
driver.close()
driver.switch_to.window(driver.window_handles[0])
driver.close()

Python Scraping from website

I've tried to write a web scraper for https://www.waug.com/area/?idx=15:
#!/usr/bin/env python3
#_*_coding:utf8_*_
import requests
from bs4 import BeautifulSoup
url = requests.get('https://www.abcd.com/area/?abc=15')
html = url.text
soup = BeautifulSoup(html, 'html.parser')
count = 1
names = soup.select('#good_{} > div > div.class_name > div > div'.format(count))
prices = soup.select('#good_{} > div > div.class_name > div.class_name'.format(count))
for name in names:
while count < 45:
print(name.text)
count = count + 1
for price in prices:
while count < 45:
print(price.text)
count = count + 1
The output is only 45 times first item name and no price. How can I get all item name and price? I want to get item name and price on same line. (I've changed the url and some of the class names just in case)
In order to be sure to get the right name for the right title I'd get the whole "item-good" class.
Then using a for loop would allow me to be sure that the title I am getting matches its price.
Here's an example of how to parse a website with BeautifulSoup:
#!/usr/bin/env python3
#_*_coding:utf8_*_
import requests
from bs4 import BeautifulSoup
url = requests.get('https://www.waug.com/area/?idx=15')
html = url.text
soup = BeautifulSoup(html, 'html.parser')
count = 1
items = soup.findAll("div", {"class": "item-good"})
for item in items:
item_title = item.find("div", {"class": "good-title-text"})
item_price = item.find("div", {"class": "price-selling"})
print item_title.text + " " + item_price.text
# If you get encoding errors delete the row above and uncomment the one below
#print item_title.text.encode("utf-8") + " " + item_price.text.encode("utf-8")
As per OP's request this is not enough because there is a "more" button to push in the webpage in order to retrieve all the results.
This can be done using Selenium Webdriver.
=== IMPORTANT NOTE ===
In order to make this work you'll need to copy in your script folder also the "chromedriver" file.
You can download it from this Google website.
Here's the script:
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
browser = webdriver.Chrome()
browser.get('https://www.waug.com/area/?idx=15')
for number in range(10):
try:
WebDriverWait(browser, 60).until(EC.presence_of_element_located((By.ID, "more_good")))
more_button = browser.find_element_by_id('more_good')
more_button.click()
time.sleep(10)
except:
print "Scrolling is now complete!"
source = browser.page_source
# This source variable should be used as input for BeautifulSoup
print source
Now it is tie to merge the two explained soultions in order to get the final requested result.
Please keep it mind that this is just a quick'n'dirty hack and needs proper error handling and polishing but it should be enough to get you started:
#!/usr/bin/env python3
#_*_coding:utf8_*_
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
browser = webdriver.Chrome()
browser.get('https://www.waug.com/area/?idx=15')
def is_page_load_complete():
close_button = browser.find_element_by_id('close_good');
return close_button.is_displayed();
while(True):
WebDriverWait(browser, 60).until(EC.presence_of_element_located((By.ID, "more_good")))
time.sleep(10)
more_button = browser.find_element_by_id('more_good')
if (more_button.is_displayed()):
more_button.click()
else:
if (is_page_load_complete()):
break
source = browser.page_source
soup = BeautifulSoup(source, 'html.parser')
items = soup.findAll("div", {"class": "item-good"})
for item in items:
item_title = item.find("div", {"class": "good-title-text"})
item_price = item.find("div", {"class": "price-selling"})
print item_title.text + " " + item_price.text
# If you get encoding errors comment the row above and uncomment the one below
#print item_title.text.encode("utf-8") + " " + item_price.text.encode("utf-8")
print "Total items found: " + str(len(items))

Selenium find all elements by xpath

I used selenium to scrap a scrolling website and conducted the code below
import requests
from bs4 import BeautifulSoup
import csv
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import unittest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
import unittest
import re
output_file = open("Kijubi.csv", "w", newline='')
class Crawling(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.set_window_size(1024, 768)
self.base_url = "http://www.viatorcom.de/"
self.accept_next_alert = True
def test_sel(self):
driver = self.driver
delay = 3
driver.get(self.base_url + "de/7132/Seoul/d973-allthingstodo")
for i in range(1,1):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
html_source = driver.page_source
data = html_source.encode("utf-8")
My next step was to crawl specific information from the website like the price.
Hence, I added the following code:
all_spans = driver.find_elements_by_xpath("/html/body/div[5]/div/div[3]/div[2]/div[2]/div[1]/div[1]/div")
print(all_spans)
for price in all_spans:
Header = driver.find_elements_by_xpath("/html/body/div[5]/div/div[3]/div[2]/div[2]/div[1]/div[1]/div/div[2]/div[2]/span[2]")
for span in Header:
print(span.text)
But I get just one price instead all of them. Could you provide me feedback on what I could improve my code? Thanks:)
EDIT
Thanks to your guys I managed to get it running. Here is the additional code:
elements = driver.find_elements_by_xpath("//div[#id='productList']/div/div")
innerElements = 15
outerElements = len(elements)/innerElements
print(innerElements, "\t", outerElements, "\t", len(elements))
for j in range(1, int(outerElements)):
for i in range(1, int(innerElements)):
headline = driver.find_element_by_xpath("//div[#id='productList']/div["+str(j)+"]/div["+str(i)+"]/div/div[2]/h2/a").text
price = driver.find_element_by_xpath("//div[#id='productList']/div["+str(j)+"]/div["+str(i)+"]/div/div[2]/div[2]/span[2]").text
deeplink = driver.find_element_by_xpath("//div[#id='productList']/div["+str(j)+"]/div["+str(i)+"]/div/div[2]/h2/a").get_attribute("href")
print("Header: " + headline + " | " + "Price: " + price + " | " + "Deeplink: " + deeplink)
Now my last issue is that I still do not get the last 20 prices back, which have a English description. I only get back the prices which have German description. For English ones, they do not get fetched although they share the same html structure.
E.g. html structure for the English items
headline = driver.find_element_by_xpath("//div[#id='productList']/div[6]/div[1]/div/div[2]/h2/a")
Do you guys know what I have to modify? Any feedback is appreciated:)
To grab all prices on that page you should use such XPATH:
Header = driver.find_elements_by_xpath("//span[contains(concat(' ', normalize-space(#class), ' '), 'price-amount')]")
which means: find all span elements with class=price-amount, why so complex - see here
But more simply to find the same elements is by CSS locator:
.price-amount

Categories

Resources