I'm trying to scrape Amazon prices with phantomjs and python. I want to parse it with beautiful soup, to get the new and used prices for books, the problem is: when I pass the source of the request I do with phantomjs the prices are just 0,00, the code is this simple test.
I'm new in web scraping but I don't understand if is amazon who have measures to avoid scraping prices or I'm doing it wrong because I was trying with other more simple pages and I can get the data I want.
PD I'm in a country not supported to use amazon API, that's why the scraper is necesary
import re
import urlparse
from selenium import webdriver
from bs4 import BeautifulSoup
from time import sleep
link = 'http://www.amazon.com/gp/offer-listing/1119998956/ref=dp_olp_new?ie=UTF8&condition=new'#'http://www.amazon.com/gp/product/1119998956'
class AmzonScraper(object):
def __init__(self):
self.driver = webdriver.PhantomJS()
self.driver.set_window_size(1120, 550)
def scrape_prices(self):
self.driver.get(link)
s = BeautifulSoup(self.driver.page_source)
return s
def scrape(self):
source = self.scrape_prices()
print source
self.driver.quit()
if __name__ == '__main__':
scraper = TaleoJobScraper()
scraper.scrape()
First of all, to follow #Nick Bailey's comment, study the Terms of Use and make sure there are no violations on your side.
To solve it, you need to tweak PhantomJS desired capabilities:
caps = webdriver.DesiredCapabilities.PHANTOMJS
caps["phantomjs.page.settings.userAgent"] = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/53 (KHTML, like Gecko) Chrome/15.0.87"
self.driver = webdriver.PhantomJS(desired_capabilities=caps)
self.driver.maximize_window()
And, to make it bullet-proof, you can make a Custom Expected Condition and wait for the price to become non-zero:
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class wait_for_price(object):
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try :
element_text = EC._find_element(driver, self.locator).text.strip()
return element_text != "0,00"
except StaleElementReferenceException:
return False
Usage:
def scrape_prices(self):
self.driver.get(link)
WebDriverWait(self.driver, 200).until(wait_for_price((By.CLASS_NAME, "olpOfferPrice")))
s = BeautifulSoup(self.driver.page_source)
return s
Good answer on setting the user agent for phantomjs to that of a normal browser. Since you said that your country is being blocked by amazon, then I would imagine that you also need to set a proxy.
here is an example of how to start phantomJS in python with a firefox useragent and a proxy.
from selenium.webdriver import *
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
service_args = [ '--proxy=1.1.1.1:port', '--proxy-auth=username:pass' ]
dcap = dict( DesiredCapabilities.PHANTOMJS )
dcap["phantomjs.page.settings.userAgent"] = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:36.0) Gecko/20100101 Firefox/36.0"
driver = PhantomJS( desired_capabilities = dcap, service_args=service_args )
where 1.1.1.1 is your proxy ip and port is the proxy port. Also username and password are only necessary if your proxy requires authentication.
Another framework to try is Scrapy it is simpler than selenium, which is used to simulate browser interactions. Scrapy gives you classes for easily parsing data using CSS selectors or XPath, and a pipeline to store that data in whatever format you'd like, like writing it to a MongoDB database for example
Often times you can write a fully build spider and deploy it to the Scrapy cloud in under 10 lines of code
Checkout this YT video on how to use Scrapy for scraping Amazon reviews as a use case
Related
I have written a script in python using selenium to fetch the business summary (which is within p tag) located at the bottom right corner of a webpage under the header Company profile. The webpage is heavily dynamic, so I thought to use a browser simulator. I have created a css selector, which is able to parse the summary if I copy the html elements directly from that webpage and try on it locally. For some reason, when I tried the same selector within my below script, it doesn't do the trick. It throws timeout exception error instead. How can I fetch it?
This is my try:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
link = "https://in.finance.yahoo.com/quote/AAPL?p=AAPL"
def get_information(driver, url):
driver.get(url)
item = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "[id$='-QuoteModule'] p[class^='businessSummary']")))
driver.execute_script("arguments[0].scrollIntoView();", item)
print(item.text)
if __name__ == "__main__":
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 20)
try:
get_information(driver,link)
finally:
driver.quit()
It seem that there is no Business Summary block initially, but it is generated after you scroll page down. Try below solution:
from selenium.webdriver.common.keys import Keys
def get_information(driver, url):
driver.get(url)
driver.find_element_by_tag_name("body").send_keys(Keys.END)
item = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "[id$='-QuoteModule'] p[class^='businessSummary']")))
print(item.text)
You have to scroll the page down twice until the element will be present:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
link = "https://in.finance.yahoo.com/quote/AAPL?p=AAPL"
def get_information(driver, url):
driver.get(url)
driver.find_element_by_tag_name("body").send_keys(Keys.END) # scroll page
time.sleep(1) # small pause between
driver.find_element_by_tag_name("body").send_keys(Keys.END) # one more time
item = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "[id$='-QuoteModule'] p[class^='businessSummary']")))
driver.execute_script("arguments[0].scrollIntoView();", item)
print(item.text)
if __name__ == "__main__":
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 20)
try:
get_information(driver,link)
finally:
driver.quit()
If you will scroll only one time it won't work properly at some reason(at least for me). I think it depends on window dimensions, on the smaller window you have to scroll more than on a bigger one.
Here is a much simpler approach using requests and working with the JSON data that is already in the page. I would also recommend to always use request if possible. It may take some extra work but the end result is a lot more reliable / cleaner. You could also take my example a lot further and parse the JSON to work directly with it (you need to clean up the text to be valid JSON). In my example I just use split which was just faster to do but it could lead to problems down the road when doing something more complex.
import requests
from lxml import html
url = 'https://in.finance.yahoo.com/quote/AAPL?p=AAPL'
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
r = requests.get(url, headers=headers)
tree = html.fromstring(r.text)
data= [e.text_content() for e in tree.iter('script') if 'root.App.main = ' in e.text_content()][0]
data = data.split('longBusinessSummary":"')[1]
data = data.split('","city')[0]
print (data)
I need to scrape google suggestions from search input. Now I use selenium+phantomjs webdriver.
search_input = selenium.find_element_by_xpath(".//input[#id='lst-ib']")
search_input.send_keys('phantomjs har python')
time.sleep(1.5)
from lxml.html import fromstring
etree = fromstring(selenium.page_source)
output = []
for suggestion in etree.xpath(".//ul[#role='listbox']/li//div[#class='sbqs_c']"):
output.append(" ".join([s.strip() for s in suggestion.xpath(".//text()") if s.strip()]))
but I see in firebug XHR request like this. And response - simple text file with data that I need. Then I look at log:
selenium.get_log("har")
I can't see this request. How can I catch it? I need this url for using as template for requests lib to use it with other search words. Or may be possible run js that initiate this request with other(not from input field) arguments, is it possible?
You can solve it with Python+Selenium+PhantomJS only.
Here is the list of things I've done to make it work:
pretend to be a browser with a head by changing the PhantomJS's User-Agent through Desired Capabilities
use Explicit Waits
ask for the direct https://www.google.com/?gws_rd=ssl#q=phantomjs+har+python url
Working solution:
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
desired_capabilities = webdriver.DesiredCapabilities.PHANTOMJS
desired_capabilities["phantomjs.page.customHeaders.User-Agent"] = "Mozilla/5.0 (Linux; U; Android 2.3.3; en-us; LG-LU3000 Build/GRI40) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"
driver = webdriver.PhantomJS(desired_capabilities=desired_capabilities)
driver.get("https://www.google.com/?gws_rd=ssl#q=phantomjs+har+python")
wait = WebDriverWait(driver, 10)
# focus the input and trigger the suggestion list to be shown
search_input = wait.until(EC.visibility_of_element_located((By.NAME, "q")))
search_input.send_keys(Keys.ARROW_DOWN)
search_input.click()
# wait for the suggestion box to appear
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "ul[role=listbox]")))
# parse suggestions
print "List of suggestions: "
for suggestion in driver.find_elements_by_css_selector("ul[role=listbox] li[dir]"):
print suggestion.text
Prints:
List of suggestions:
python phantomjs screenshot
python phantomjs ghostdriver
python phantomjs proxy
unable to start phantomjs with ghostdriver
Trying to screen scrape a web site without having to launch an actual browser instance in a python script (using Selenium). I can do this with Chrome or Firefox - I've tried it and it works - but I want to use PhantomJS so it's headless.
The code looks like this:
import sys
import traceback
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/53 "
"(KHTML, like Gecko) Chrome/15.0.87"
)
try:
# Choose our browser
browser = webdriver.PhantomJS(desired_capabilities=dcap)
#browser = webdriver.PhantomJS()
#browser = webdriver.Firefox()
#browser = webdriver.Chrome(executable_path="/usr/local/bin/chromedriver")
# Go to the login page
browser.get("https://www.whatever.com")
# For debug, see what we got back
html_source = browser.page_source
with open('out.html', 'w') as f:
f.write(html_source)
# PROCESS THE PAGE (code removed)
except Exception, e:
browser.save_screenshot('screenshot.png')
traceback.print_exc(file=sys.stdout)
finally:
browser.close()
The output is merely:
<html><head></head><body></body></html>
But when I use the Chrome or Firefox options, it works fine. I thought maybe the web site was returning junk based on the user agent, so I tried faking that out. No difference.
What am I missing?
UPDATED: I will try to keep the below snippet updated with until it works. What's below is what I'm currently trying.
import sys
import traceback
import time
import re
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support import expected_conditions as EC
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/53 (KHTML, like Gecko) Chrome/15.0.87")
try:
# Set up our browser
browser = webdriver.PhantomJS(desired_capabilities=dcap, service_args=['--ignore-ssl-errors=true'])
#browser = webdriver.Chrome(executable_path="/usr/local/bin/chromedriver")
# Go to the login page
print "getting web page..."
browser.get("https://www.website.com")
# Need to wait for the page to load
timeout = 10
print "waiting %s seconds..." % timeout
wait = WebDriverWait(browser, timeout)
element = wait.until(EC.element_to_be_clickable((By.ID,'the_id')))
print "done waiting. Response:"
# Rest of code snipped. Fails as "wait" above.
I was facing the same problem and no amount of code to make the driver wait was helping.
The problem is the SSL encryption on the https websites, ignoring them will do the trick.
Call the PhantomJS driver as:
driver = webdriver.PhantomJS(service_args=['--ignore-ssl-errors=true', '--ssl-protocol=TLSv1'])
This solved the problem for me.
You need to wait for the page to load. Usually, it is done by using an Explicit Wait to wait for a key element to be present or visible on a page. For instance:
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
# ...
browser.get("https://www.whatever.com")
wait = WebDriverWait(driver, 10)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.content")))
html_source = browser.page_source
# ...
Here, we'll wait up to 10 seconds for a div element with class="content" to become visible before getting the page source.
Additionally, you may need to ignore SSL errors:
browser = webdriver.PhantomJS(desired_capabilities=dcap, service_args=['--ignore-ssl-errors=true'])
Though, I'm pretty sure this is related to the redirecting issues in PhantomJS. There is an open ticket in phantomjs bugtracker:
PhantomJS does not follow some redirects
driver = webdriver.PhantomJS(service_args=['--ignore-ssl-errors=true', '--ssl-protocol=TLSv1'])
This worked for me
I am scraping an e-commerce website with selenium, because the pages are loaded by Javascipt.
Here's the workflow -:
1. Instantiate a web diver driver in virtual display mode, while sending a random user agent. Using a random user-agent decreases you chances of detection just a little bit. This will not reduce the chances of blocking by IP.
2. For each query term, say "pajamas" - create the search url for that website - and open the url.
3. Get the corresponding text elements from Xpath, say top 10 prod ids, their prices, title of product etc.
4. Store them in a file - that I will further process
I have upwards of 38000 such urls that I need to fetch for the elements on page load.
I did multiprocessing, and I realized quickly that the process was failing since after a while, the website was blocked, so the page load did not happen.
How can I IP spoof in Python and will it work with selenium driving the web for you, not urllib/urlopen ?
Aside of setting the actual fetch via the xpaths, here's the basic code - more specifically, see init_driver
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import argparse
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import codecs, urllib, os
import multiprocessing as mp
from my_custom_path import scraping_conf_updated as sf
from fake_useragent import UserAgent
def set_cookies(COOKIES, exp, driver):
for key, val in COOKIES[exp].items():
driver.add_cookie({'name': key, 'value': val, 'path': '/', 'secure': False, 'expiry': None})
return driver
def check_cookies(driver, exp):
print "printing cookie name & value"
for cookie in driver.get_cookies():
if cookie['name'] in COOKIES[exp].keys():
print cookie['name'], "-->", cookie['value']
def wait_for(driver):
if conf_key['WAIT_FOR_ID'] != '':
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, conf_key['WAIT_FOR_ID'])))
elif conf_key['WAIT_FOR_CLASS'] != '':
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS, conf_key['WAIT_FOR_CLASS'])))
return driver
def init_driver(base_url, url, exp):
display = Display(visible=0, size=(1024, 768))
display.start()
profile = webdriver.FirefoxProfile()
ua = UserAgent(cache=False)
profile.set_preference("general.useragent.override",ua.random)
driver=webdriver.Firefox(profile)
if len(conf_key['COOKIES'][exp]) != 0:
driver.get(base_url)
driver.delete_all_cookies()
driver = set_cookies(COOKIES, exp, driver)
check_cookies(driver, exp)
driver.get(url)
driver.set_page_load_timeout(300)
if len(conf_key['POP_UP']['XPATH']) > 0:
driver = identify_and_close_popup(driver)
driver = wait_for(driver)
return driver
use a vpn provider or an http or socks proxy to change your apparent originating ip address from your target website
I am attempting to use Selenium/BeautifulSoup to unit test a web page. I am getting an error though that I haven't been able to Google.
selenium.common.exceptions.WebDriverException: Message: ''
I am using a Portable version of Firefox and a proxy.
import urllib2
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import time
import sys
def getItemDivs(url):
profile = webdriver.FirefoxProfile()
profile.set_preference("general.useragent.override","Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/21.0")
profile.set_preference("network.proxy.http", "proxy.example.com")
ffbin = webdriver.firefox.firefox_binary.FirefoxBinary('C:\\FirefoxPortable\\App\\Firefox\\firefox.exe')
# IT FAILS ON THE NEXT LINE
driver=webdriver.Firefox(profile, firefox_binary=ffbin)
driver.implicitly_wait(30)
# THIS LINE CONTAINS A VALID COOKIE, BUT IT HAS BEEN REMOVED FOR THIS QUESTION.
driver.add_cookie(<<mycookie>>)
base_url = url
verificationErrors = []
accept_next_alert = True
driver.get(base_url)
scrap1 = driver.page_source
soup = BeautifulSoup(scrap1)
This question is similar to this one, however, in that question they had a successful first request. I haven't had a success.
What can cause this type of exception but leave the message empty?
The problem was that I didn't set the network.proxy.port. Adding this line solved the problem:
profile.set_preference("network.proxy.port", "80")