Better way to take screenshot of a url in Python - python

Problem Description
Currently working on a project which requires me to take browse a url and take a screenshot of the webpage.
After looking various resources i found 3 ways to do so.I will be mentioning all 3 methods iam currently using.
Method - 1 : PhantomJS
from selenium import webdriver
import time
import sys
print 'Without Headless'
_start = time.time()
br = webdriver.PhantomJS()
br.get('http://' + sys.argv[1])
br.save_screenshot('screenshot-phantom.png')
br.quit
_end = time.time()
print 'Total time for non-headless {}'.format(_end - _start)
Method-2 : Headless Browser
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
print 'Headless'
_start = time.time()
options = Options()
options.add_argument("--headless") # Runs Chrome in headless mode.
options.add_argument('--no-sandbox') # # Bypass OS security model
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(chrome_options=options, executable_path='/usr/bin/chromedriver')
driver.get('http://' + sys.argv[1])
driver.save_screenshot('screenshot-headless.png')
driver.quit()
_end = time.time()
print 'Total time for headless {}'.format(_end - _start)
Method - 3 :PyQT
import argparse
import sys
import logging
import sys
import time
import os
import urlparse
from selenium import webdriver
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
class Screenshot(QWebView):
def __init__(self):
self.app = QApplication(sys.argv)
QWebView.__init__(self)
self._loaded = False
self.loadFinished.connect(self._loadFinished)
def capture(self, url, output_file):
_logger.info('Received url {}'.format(url))
_start = time.time()
try:
#Check for http/https
if url[0:3] == 'http' or url[0:4] == 'https':
self.url = url
else:
url = 'http://' + url
self.load(QUrl(url))
self.wait_load(url)
# set to webpage size
frame = self.page().mainFrame()
self.page().setViewportSize(frame.contentsSize())
# render image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
_logger.info('Saving screenshot {} for {}'.format(output_file,url))
image.save(os.path.join(os.path.dirname(os.path.realpath(__file__)),'data',output_file))
except Exception as e:
_logger.error('Error in capturing screenshot {} - {}'.format(url,e))
_end = time.time()
_logger.info('Time took for processing url {} - {}'.format(url,_end - _start))
def wait_load(self,url,delay=1,retry_count=60):
# process app events until page loaded
while not self._loaded and retry_count:
_logger.info('wait_load for url {} retry_count {}'.format(url,retry_count))
self.app.processEvents()
time.sleep(delay)
retry_count -=1
_logger.info('wait_load for url {} expired'.format(url))
self._loaded = False
def _loadFinished(self, result):
self._loaded = True
Issue Faced:
These 3 methods while using,all of them are getting stuck due to one or other error.One such issue faced is asked here Error Question on Stackoverflow.
So out of these 3 methods to take screenshot of a webpage in Python,which is effecient and will work on large scale deployment.

Taken from https://gist.github.com/fabtho/13e4a2e7cfbfde671b8fa81bbe9359fb and rewritten in Python 3
This method will technically work but it will not look good, as many websites will have cookie acceptance pop-ups that will appear in every screenshot, so depending on which website you use, you may wish to remove these first using selenium before beginning the screenshotting process.
from PIL import Image
from io import BytesIO
verbose = 1
browser = webdriver.Chrome(executable_path='C:/yourpath/chromedriver.exe')
browser.get('http://stackoverflow.com/questions/37906704/taking-a-whole-page-screenshot-with-selenium-marionette-in-python')
# from here http://stackoverflow.com/questions/1145850/how-to-get-height-of-entire-document-with-javascript
js = 'return Math.max( document.body.scrollHeight, document.body.offsetHeight, document.documentElement.clientHeight, document.documentElement.scrollHeight, document.documentElement.offsetHeight);'
scrollheight = browser.execute_script(js)
if verbose > 0:
print(scrollheight)
slices = []
offset = 0
while offset < scrollheight:
if verbose > 0:
print(offset)
browser.execute_script("window.scrollTo(0, %s);" % offset)
img = Image.open(BytesIO(browser.get_screenshot_as_png()))
offset += img.size[1]
slices.append(img)
if verbose > 0:
browser.get_screenshot_as_file('%s/screen_%s.png' % ('/tmp', offset))
print(scrollheight)
screenshot = Image.new('RGB', (slices[0].size[0], offset))
offset = 0
for img in slices:
screenshot.paste(img, (0, offset))
offset += img.size[1]
screenshot.save('screenshot.png')
browser.quit()```

Related

Web scraping Google Maps with Selenium uses too much data

I am scraping travel times from Google Maps. The below code scrapes travel times between 1 million random points in Tehran, which works perfectly fine. I also use multiprocessing to get travel times simultaneously. The results are fully replicable, feel free to run the code in a terminal (but not in an interactive session like Spyder as the multiprocessing won't work). This is how what I am scraping looks like on google maps (in this case 22 min is the travel time):
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from multiprocessing import Process, Pipe, Pool, Value
import time
from multiprocessing.pool import ThreadPool
import threading
import gc
threadLocal = threading.local()
class Driver:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
self.driver = webdriver.Chrome(options=options)
def __del__(self):
self.driver.quit() # clean up driver when we are cleaned up
print('The driver has been "quitted".')
#classmethod
def create_driver(cls):
the_driver = getattr(threadLocal, 'the_driver', None)
if the_driver is None:
print('Creating new driver.')
the_driver = cls()
threadLocal.the_driver = the_driver
driver = the_driver.driver
the_driver = None
return driver
success = Value('i', 0)
error = Value('i', 0)
def f(x):
global success
global error
with success.get_lock():
success.value += 1
print("Number of errors", success.value)
with error.get_lock():
error.value += 1
print("counter.value:", error.value)
def scraper(url):
"""
This now scrapes a single URL.
"""
global success
global error
try:
driver = Driver.create_driver()
driver.get(url)
time.sleep(1)
trip_times = driver.find_element(By.XPATH, "//div[contains(#aria-labelledby,'section-directions-trip-title')]//span[#jstcache='198']")
print("got data from: ", url)
print(trip_times.text)
with success.get_lock():
success.value += 1
print("Number of sucessful scrapes: ", success.value)
except Exception as e:
# print(f"Error: {e}")
with error.get_lock():
error.value += 1
print("Number of errors", error.value)
import random
min_x = 35.617487
max_x = 35.783375
min_y = 51.132557
max_y = 51.492329
urls = []
for i in range(1000000):
x = random.uniform(min_x, max_x)
y = random.uniform(min_y, max_y)
url = f'https://www.google.com/maps/dir/{x},{y}/35.8069533,51.4261312/#35.700769,51.5571612,21z'
urls.append(url)
number_of_processes = min(2, len(urls))
start_time = time.time()
with ThreadPool(processes=number_of_processes) as pool:
# result_array = pool.map(scraper, urls)
result_array = pool.map(scraper, urls)
# Must ensure drivers are quitted before threads are destroyed:
del threadLocal
# This should ensure that the __del__ method is run on class Driver:
gc.collect()
pool.close()
pool.join()
print(result_array)
print( "total time: ", round((time.time()-start_time)/60, 1), "number of urls: ", len(URLs))
But after having it run for only 24 hours, it has already used around 80 GB of data! Is there a way to make this more efficient in terms of data usage?
I suspect this excessive data usage is because Selenium has to load each URL completely every time before it can access the HTML and get the target node. Can I change anything in my code to prevent that and still get the travel time?
*Please note that using the Google Maps API is not an option. Because the limit is too small for my application and the service is not provided in my country.
You can use Page Load Strategy.
A Selenium WebDriver has 3 Page Load Strategy:
normal - Waits for all resources to download.
eager - DOM access is ready, but other resources like images may still be loading.
none - Does not block WebDriver at all.
options.page_load_strategy = "none" # ["normal", "eager", "none"]
It might help you (obviously it doesn't perform mirakle, but better than nothing).

Complete and Submit form in python

I am trying to create a python script that complets th form on this page http://segas.gr/index.php/el/2015-best-athlete by selecting the radio button with label "Γιώργος Μηλιαράς (ΣΑΚΑ) 800 μ./1,500 μ./3,000 μ" aka id="male_kids_3".
Here is my code:
import urllib
import urllib2
import webbrowser
url = "http://segas.gr/index.php/el/2015-best-athlete"
data = urllib.urlencode({'male_kids_3': 'checked'})
results = urllib2.urlopen(url, data)
with open("results.html", "w") as f:
f.write(results.read())
webbrowser.open("results.html")
I found a solution using selinium
from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys
def malakia():
#Get url
browser.get("http://segas.gr/index.php/el/2015-best-athlete")
miliaras = browser.find_element_by_id("male_kids_3")
miliaras.click()
validate = browser.find_element_by_name("input_submit_4")
validate.click()
if __name__ == "__main__": #Because we are bad-ass and we know python
#Let's make sme magiKKK
times = int(input("Πόσες φορές θέλεις να ψηφίσεις τον G #babas ??\n"))
#create brwoser object
browser = webdriver.Chrome()
for i in range(times):
malakia()

Scraping with Beautifulsoup-Python

I want to scrape the name of the hotel in the tripadvisor in each review page of the hotel.
I wrote a code in python which is very simple and I think that it isn't false.
But every time it stops at a different point(page for example the first time stopped in page 150 second time in the page 330).
I am 100% that my code are correct. Is there any possibility that tripadvisor block me every time?
I update the code and i use selenium too but the problem is still remain
The updated code is the following:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
import os
import urllib.request
import time
import re
file2 = open(os.path.expanduser(r"~/Desktop/TripAdviser Reviews2.csv"), "wb")
file2.write(b"hotel,Address,HelpCount,HotelCount,Reviewer" + b"\n")
Checker ="REVIEWS"
# example option: add 'incognito' command line arg to options
option = webdriver.ChromeOptions()
option.add_argument("--incognito")
# create new instance of chrome in incognito mode
browser = webdriver.Chrome(executable_path='/Users/thimios/AppData/Local/Google/chromedriver.exe', chrome_options=option)
#print(browser)
# go to website of interest
for i in range(10,50,10):
Websites=["https://www.tripadvisor.ca/Hotel_Review-g190479-d3587956-Reviews-or"+str(i)+"-The_Thief-Oslo_Eastern_Norway.html#REVIEWS"]
print(Websites)
for theurl in Websites:
thepage=browser.get(theurl)
thepage1 = urllib.request.urlopen(theurl)
soup = BeautifulSoup(thepage1, "html.parser")
# wait up to 10 seconds for page to load
timeout = 5
try:
WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, '//*[#id="HEADING"]')))
#print(WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, '//*[#id="HEADING"]'))))
except TimeoutException:
print("Timed out waiting for page to load")
browser.quit()
# Extract the helpful votes, hotel reviews
helpcountarray = ""
hotelreviewsarray = ""
for profile in soup.findAll(attrs={"class": "memberBadging g10n"}):
image = profile.text.replace("\n", "|||||").strip()
#print(image)
if image.find("helpful vote") > 0:
counter = re.findall('\d+', image.split("helpful vote", 1)[0].strip()[-4:])
if len(helpcountarray) == 0:
helpcountarray = [counter]
else:
helpcountarray.append(counter)
elif image.find("helpful vote") < 0:
if len(helpcountarray) == 0:
helpcountarray = ["0"]
else:
helpcountarray.append("0")
print(helpcountarray)
#print(len(helpcountarray))
if image.find("hotel reviews") > 0:
counter = re.findall('\d+', image.split("hotel reviews", 1)[0].strip()[-4:])
if len(hotelreviewsarray) == 0:
hotelreviewsarray = counter
else:
hotelreviewsarray.append(counter)
elif image.find("hotel reviews") < 0:
if len(hotelreviewsarray) == 0:
hotelreviewsarray = ['0']
else:
hotelreviewsarray.append("0")
print(hotelreviewsarray)
#print(len(hotelreviewsarray))
hotel_element = browser.find_elements_by_xpath('//*[#id="HEADING"]')
Address_element = browser.find_elements_by_xpath('//*[#id="HEADING_GROUP"]/div/div[3]/address/div/div[1]')
for i in range(0,10):
print(i)
for x in hotel_element:
hotel = x.text
print(hotel)
#print(type(hotel))
for y in Address_element:
Address = y.text.replace(',', '').replace('\n', '').strip()
print(Address)
#print(type(Address))
HelpCount = helpcountarray[i]
HelpCount = " ".join(str(w) for w in HelpCount)
print(HelpCount)
#print(type(HelpCount))
HotelCount = hotelreviewsarray[i]
HotelCount = " ".join(str(w) for w in HotelCount)
print(HotelCount)
#print(type(HotelCount))
Reviewer = soup.findAll(attrs={"class": "username mo"})[i].text.replace(',', ' ').replace('”', '').replace('“', '').replace('"', '').strip()
print(Reviewer)
Record2 = hotel + "," + Address +"," + HelpCount +"," + HotelCount+"," +Reviewer
if Checker == "REVIEWS":
file2.write(bytes(Record2, encoding="ascii", errors='ignore') + b"\n")
file2.close()
I read somewhere that I should add a header. Something like
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
in order for the web site to allow me to scrape it. Is that true?
Thanks for your help
Yes. there is such a possibility.
Websites use to prevent web scraping, such as detecting and disallowing bots from crawling (viewing) their pages.
The default User-Agent typically refers to automated processes implemented using a python software, so you will want to change it to browser like User-Agent.
Even though, I do not believe you were blocked by TripAdvisor.
Try to slow down the downloading by
import time
...
time.sleep(1)
No, try REAL life slowing it down, using Backoff so the target website doesn't think you're a bot...
import time
for term in ["web scraping", "web crawling", "scrape this site"]:
t0 = time.time()
r = requests.get("http://example.com/search", params=dict(
query=term
))
response_delay = time.time() - t0
time.sleep(10*response_delay) # wait 10x longer than it took them to respond
source:
https://blog.hartleybrody.com/web-scraping-cheat-sheet/#delays-and-backing-off

Take screenshot of full page with Selenium Python with chromedriver

After trying out various approaches... I have stumbled upon this page to take full-page screenshot with chromedriver, selenium and python.
The original code is here. (and I copy the code in this posting below)
It uses PIL and it works great! However, there is one issue... which is it captures fixed headers and repeats for the whole page and also misses some parts of the page during page change. sample url to take a screenshot:
http://www.w3schools.com/js/default.asp
How to avoid the repeated headers with this code... Or is there any better option which uses python only... ( i don't know java and do not want to use java).
Please see the screenshot of the current result and sample code below.
test.py
"""
This script uses a simplified version of the one here:
https://snipt.net/restrada/python-selenium-workaround-for-full-page-screenshot-using-chromedriver-2x/
It contains the *crucial* correction added in the comments by Jason Coutu.
"""
import sys
from selenium import webdriver
import unittest
import util
class Test(unittest.TestCase):
""" Demonstration: Get Chrome to generate fullscreen screenshot """
def setUp(self):
self.driver = webdriver.Chrome()
def tearDown(self):
self.driver.quit()
def test_fullpage_screenshot(self):
''' Generate document-height screenshot '''
#url = "http://effbot.org/imagingbook/introduction.htm"
url = "http://www.w3schools.com/js/default.asp"
self.driver.get(url)
util.fullpage_screenshot(self.driver, "test.png")
if __name__ == "__main__":
unittest.main(argv=[sys.argv[0]])
util.py
import os
import time
from PIL import Image
def fullpage_screenshot(driver, file):
print("Starting chrome full page screenshot workaround ...")
total_width = driver.execute_script("return document.body.offsetWidth")
total_height = driver.execute_script("return document.body.parentNode.scrollHeight")
viewport_width = driver.execute_script("return document.body.clientWidth")
viewport_height = driver.execute_script("return window.innerHeight")
print("Total: ({0}, {1}), Viewport: ({2},{3})".format(total_width, total_height,viewport_width,viewport_height))
rectangles = []
i = 0
while i < total_height:
ii = 0
top_height = i + viewport_height
if top_height > total_height:
top_height = total_height
while ii < total_width:
top_width = ii + viewport_width
if top_width > total_width:
top_width = total_width
print("Appending rectangle ({0},{1},{2},{3})".format(ii, i, top_width, top_height))
rectangles.append((ii, i, top_width,top_height))
ii = ii + viewport_width
i = i + viewport_height
stitched_image = Image.new('RGB', (total_width, total_height))
previous = None
part = 0
for rectangle in rectangles:
if not previous is None:
driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
print("Scrolled To ({0},{1})".format(rectangle[0], rectangle[1]))
time.sleep(0.2)
file_name = "part_{0}.png".format(part)
print("Capturing {0} ...".format(file_name))
driver.get_screenshot_as_file(file_name)
screenshot = Image.open(file_name)
if rectangle[1] + viewport_height > total_height:
offset = (rectangle[0], total_height - viewport_height)
else:
offset = (rectangle[0], rectangle[1])
print("Adding to stitched image with offset ({0}, {1})".format(offset[0],offset[1]))
stitched_image.paste(screenshot, offset)
del screenshot
os.remove(file_name)
part = part + 1
previous = rectangle
stitched_image.save(file)
print("Finishing chrome full page screenshot workaround...")
return True
This answer improves upon prior answers by am05mhz and Javed Karim.
It assumes headless mode, and that a window-size option was not initially set. Before calling this function, ensure the page has loaded fully or sufficiently.
It attempts to set the width and height both to what is necessary. The screenshot of the entire page can sometimes include a needless vertical scrollbar. One way to generally avoid the scrollbar is by taking a screenshot of the body element instead. After saving a screenshot, it reverts the size to what it was originally, failing which the size for the next screenshot may not set correctly.
Ultimately this technique may still not work perfectly well for some examples.
from selenium import webdriver
def save_screenshot(driver: webdriver.Chrome, path: str = '/tmp/screenshot.png') -> None:
# Ref: https://stackoverflow.com/a/52572919/
original_size = driver.get_window_size()
required_width = driver.execute_script('return document.body.parentNode.scrollWidth')
required_height = driver.execute_script('return document.body.parentNode.scrollHeight')
driver.set_window_size(required_width, required_height)
# driver.save_screenshot(path) # has scrollbar
driver.find_element_by_tag_name('body').screenshot(path) # avoids scrollbar
driver.set_window_size(original_size['width'], original_size['height'])
If using Python older than 3.6, remove the type annotations from the function definition.
Screenshots are limited to the viewport but you can get around this by capturing the body element, as the webdriver will capture the entire element even if it is larger than the viewport. This will save you having to deal with scrolling and stitching images, however you might see problems with footer position (like in the screenshot below).
Tested on Windows 8 and Mac High Sierra with Chrome Driver.
from selenium import webdriver
url = 'https://stackoverflow.com/'
path = '/path/to/save/in/scrape.png'
driver = webdriver.Chrome()
driver.get(url)
el = driver.find_element_by_tag_name('body')
el.screenshot(path)
driver.quit()
Returns: (full size: https://i.stack.imgur.com/ppDiI.png)
How it works: set browser height as longest as you can...
#coding=utf-8
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def test_fullpage_screenshot(self):
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--start-maximized')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get("yoururlxxx")
time.sleep(2)
#the element with longest height on page
ele=driver.find_element("xpath", '//div[#class="react-grid-layout layout"]')
total_height = ele.size["height"]+1000
driver.set_window_size(1920, total_height) #the trick
time.sleep(2)
driver.save_screenshot("screenshot1.png")
driver.quit()
if __name__ == "__main__":
test_fullpage_screenshot()
from selenium import webdriver
driver = webdriver.Firefox()
driver.get('https://developer.mozilla.org/')
element = driver.find_element_by_tag_name('body')
element_png = element.screenshot_as_png
with open("test2.png", "wb") as file:
file.write(element_png)
This works for me. It saves the entire page as screenshot.
For more information you can read up the api docs:
http://selenium-python.readthedocs.io/api.html
The key is to turn on the headless mode!
No stitching required and no need for loading the page twice.
Full working code:
URL = 'http://www.w3schools.com/js/default.asp'
options = webdriver.ChromeOptions()
options.headless = True
driver = webdriver.Chrome(options=options)
driver.get(URL)
S = lambda X: driver.execute_script('return document.body.parentNode.scroll'+X)
driver.set_window_size(S('Width'),S('Height')) # May need manual adjustment
driver.find_element_by_tag_name('body').screenshot('web_screenshot.png')
driver.quit()
This is practically the same code as posted by #Acumenus with slight improvements.
Summary of my findings
I decided to post this anyway because I did not find an explanation about what is happening when the headless mode is turned off (the browser is displayed) for screenshot taking purposes.
As I tested (with Chrome WebDriver), if the headless mode is turned on, the screenshot is saved as desired. However, if the headless mode is turned off, the saved screenshot has approximately the correct width and height, but the outcome varies case-by-case. Usually, the upper part of the page which is visible by the screen is saved, but the rest of the image is just plain white. There was also a case with trying to save this Stack Overflow thread by using the above link; even the upper part was not saved which interestingly now was transparent while the rest still white. The last case I noticed was only once with the given W3Schools link; there where no white parts but the upper part of the page repeated until the end, including the header.
I hope this will help for many of those who for some reason are not getting the expected result as I did not see anyone explicitly explaining about the requirement of headless mode with this simple approach.
Only when I discovered the solution to this problem myself, I found a post by #vc2279 mentioning that the window of a headless browser can be set to any size (which seems to be true for the opposite case too). Although, the solution in my post improves upon that that it does not require repeated browser/driver opening or page reloading.
Further suggestions
If for some pages it does not work for you, I suggest trying to add time.sleep(seconds) before getting the size of the page. Another case would be if the page requires scrolling until the bottom to load further content, which can be solved by the scheight method from this post:
scheight = .1
while scheight < 9.9:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight/%s);" % scheight)
scheight += .01
Also, note that for some pages the content may not be in any of the top-level HTML tags like <html> or <body>, for example, YouTube uses <ytd-app> tag.
As a last note, I found one page that "returned" a screenshot still with the horizontal scrollbar, the size of the window needed manual adjustment, i.e., the image width needed to be increased by 18 pixels, like so: S('Width')+18.
After knowing the approach of #Moshisho.
My full standalone working script is... (added sleep 0.2 after each scroll and position)
import sys
from selenium import webdriver
import util
import os
import time
from PIL import Image
def fullpage_screenshot(driver, file):
print("Starting chrome full page screenshot workaround ...")
total_width = driver.execute_script("return document.body.offsetWidth")
total_height = driver.execute_script("return document.body.parentNode.scrollHeight")
viewport_width = driver.execute_script("return document.body.clientWidth")
viewport_height = driver.execute_script("return window.innerHeight")
print("Total: ({0}, {1}), Viewport: ({2},{3})".format(total_width, total_height,viewport_width,viewport_height))
rectangles = []
i = 0
while i < total_height:
ii = 0
top_height = i + viewport_height
if top_height > total_height:
top_height = total_height
while ii < total_width:
top_width = ii + viewport_width
if top_width > total_width:
top_width = total_width
print("Appending rectangle ({0},{1},{2},{3})".format(ii, i, top_width, top_height))
rectangles.append((ii, i, top_width,top_height))
ii = ii + viewport_width
i = i + viewport_height
stitched_image = Image.new('RGB', (total_width, total_height))
previous = None
part = 0
for rectangle in rectangles:
if not previous is None:
driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
time.sleep(0.2)
driver.execute_script("document.getElementById('topnav').setAttribute('style', 'position: absolute; top: 0px;');")
time.sleep(0.2)
print("Scrolled To ({0},{1})".format(rectangle[0], rectangle[1]))
time.sleep(0.2)
file_name = "part_{0}.png".format(part)
print("Capturing {0} ...".format(file_name))
driver.get_screenshot_as_file(file_name)
screenshot = Image.open(file_name)
if rectangle[1] + viewport_height > total_height:
offset = (rectangle[0], total_height - viewport_height)
else:
offset = (rectangle[0], rectangle[1])
print("Adding to stitched image with offset ({0}, {1})".format(offset[0],offset[1]))
stitched_image.paste(screenshot, offset)
del screenshot
os.remove(file_name)
part = part + 1
previous = rectangle
stitched_image.save(file)
print("Finishing chrome full page screenshot workaround...")
return True
driver = webdriver.Chrome()
''' Generate document-height screenshot '''
url = "http://effbot.org/imagingbook/introduction.htm"
url = "http://www.w3schools.com/js/default.asp"
driver.get(url)
fullpage_screenshot(driver, "test1236.png")
Not sure if people are still having this issue.
I've done a small hack that works pretty well and that plays nicely with dynamic zones. Hope it helps
# 1. get dimensions
browser = webdriver.Chrome(chrome_options=options)
browser.set_window_size(default_width, default_height)
browser.get(url)
time.sleep(sometime)
total_height = browser.execute_script("return document.body.parentNode.scrollHeight")
browser.quit()
# 2. get screenshot
browser = webdriver.Chrome(chrome_options=options)
browser.set_window_size(default_width, total_height)
browser.get(url)
browser.save_screenshot(screenshot_path)
Why not just getting the width and height of the page and then resize the driver? So will be something like this
total_width = driver.execute_script("return document.body.offsetWidth")
total_height = driver.execute_script("return document.body.scrollHeight")
driver.set_window_size(total_width, total_height)
driver.save_screenshot("SomeName.png")
This is going to make a screenshot of your entire page without the need to merge together different pieces.
You can achieve this by changing the CSS of the header before the screenshot:
topnav = driver.find_element_by_id("topnav")
driver.execute_script("arguments[0].setAttribute('style', 'position: absolute; top: 0px;')", topnav)
EDIT: Put this line after your window scroll:
driver.execute_script("document.getElementById('topnav').setAttribute('style', 'position: absolute; top: 0px;');")
So in your util.py it will be:
driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
driver.execute_script("document.getElementById('topnav').setAttribute('style', 'position: absolute; top: 0px;');")
If the site is using the header tag, you can do it with find_element_by_tag_name("header")
I changed code for Python 3.6, maybe it will be useful for someone:
from selenium import webdriver
from sys import stdout
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import unittest
#from Login_Page import Login_Page
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from io import BytesIO
from PIL import Image
def testdenovoUIavailable(self):
binary = FirefoxBinary("C:\\Mozilla Firefox\\firefox.exe")
self.driver = webdriver.Firefox(firefox_binary=binary)
verbose = 0
#open page
self.driver.get("http://yandex.ru")
#hide fixed header
#js_hide_header=' var x = document.getElementsByClassName("topnavbar-wrapper ng-scope")[0];x[\'style\'] = \'display:none\';'
#self.driver.execute_script(js_hide_header)
#get total height of page
js = 'return Math.max( document.body.scrollHeight, document.body.offsetHeight, document.documentElement.clientHeight, document.documentElement.scrollHeight, document.documentElement.offsetHeight);'
scrollheight = self.driver.execute_script(js)
if verbose > 0:
print(scrollheight)
slices = []
offset = 0
offset_arr=[]
#separate full screen in parts and make printscreens
while offset < scrollheight:
if verbose > 0:
print(offset)
#scroll to size of page
if (scrollheight-offset)<offset:
#if part of screen is the last one, we need to scroll just on rest of page
self.driver.execute_script("window.scrollTo(0, %s);" % (scrollheight-offset))
offset_arr.append(scrollheight-offset)
else:
self.driver.execute_script("window.scrollTo(0, %s);" % offset)
offset_arr.append(offset)
#create image (in Python 3.6 use BytesIO)
img = Image.open(BytesIO(self.driver.get_screenshot_as_png()))
offset += img.size[1]
#append new printscreen to array
slices.append(img)
if verbose > 0:
self.driver.get_screenshot_as_file('screen_%s.jpg' % (offset))
print(scrollheight)
#create image with
screenshot = Image.new('RGB', (slices[0].size[0], scrollheight))
offset = 0
offset2= 0
#now glue all images together
for img in slices:
screenshot.paste(img, (0, offset_arr[offset2]))
offset += img.size[1]
offset2+= 1
screenshot.save('test.png')
Source : https://pypi.org/project/Selenium-Screenshot/
from Screenshot import Screenshot_Clipping
from selenium import webdriver
import time
ob = Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome()
url = "https://www.bbc.com/news/world-asia-china-51108726"
driver.get(url)
time.sleep(1)
img_url = ob.full_Screenshot(driver, save_path=r'.', image_name='Myimage.png')
driver.quit()
For Chrome, it's also possible to use the Chrome DevTools Protocol:
import base64
...
page_rect = browser.driver.execute_cdp_cmd("Page.getLayoutMetrics", {})
screenshot = browser.driver.execute_cdp_cmd(
"Page.captureScreenshot",
{
"format": "png",
"captureBeyondViewport": True,
"clip": {
"width": page_rect["contentSize"]["width"],
"height": page_rect["contentSize"]["height"],
"x": 0,
"y": 0,
"scale": 1
}
})
with open(path, "wb") as file:
file.write(base64.urlsafe_b64decode(screenshot["data"]))
Credits
This works both in headless and non-headless mode.
Full page screenshots are not a part of the W3C spec. However, many web drivers implement their own endpoints to get a real full page screenshot. I found this method using geckodriver to be superior to the injected "screenshot, scroll, stitch" method, and far better than resizing the window in headless mode.
Example:
from selenium import webdriver
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.firefox.options import Options
options = Options()
options.headless = True
service = Service('/your/path/to/geckodriver')
driver = webdriver.Firefox(options=options, service=service)
driver.get('https://www.nytimes.com/')
driver.get_full_page_screenshot_as_file('example.png')
driver.close()
geckodriver (Firefox)
If you're using geckodriver, you can hit these methods:
driver.get_full_page_screenshot_as_file
driver.save_full_page_screenshot
driver.get_full_page_screenshot_as_png
driver.get_full_page_screenshot_as_base64
I've tested and confirmed these to be working on Selenium 4.07. I don't believe these functions are included in Selenium 3.
The best documentation I could find on these is in this merge
chromedriver (Chromium)
It appears that chromedriver has implemented their own full page screenshot functionality:
https://chromium-review.googlesource.com/c/chromium/src/+/2300980
and the Selenium team appears to be aiming for support in Selenium 4:
https://github.com/SeleniumHQ/selenium/issues/8168
My first answer on StackOverflow. I'm a newbie.
The other answers quoted by the fellow expert coders are awesome & I'm not even in the competition. I'd just like to quote the steps taken from the following link: pypi.org
Refer full-page screenshot section.
open your command prompt and navigate to the directory where Python is installed
cd "enter the directory"
install the module using pip
pip install Selenium-Screenshot
The above module works for python 3.
once the module is installed, try the following code by creating a separate file in python IDLE
from Screenshot import Screenshot_Clipping
from selenium import webdriver
ob = Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome()
url = "https://github.com/sam4u3/Selenium_Screenshot/tree/master/test"
driver.get(url)
# the line below makes taking & saving screenshots very easy.
img_url=ob.full_Screenshot(driver, save_path=r'.', image_name='Myimage.png')
print(img_url)
driver.close()
driver.quit()
For Python using Selenium 4 and Chrome Driver
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
import time
import shutil
def take_full_page_screenshot():
#Install chrome driver
chrome_driver_path = ChromeDriverManager().install()
service = Service(chrome_driver_path)
service.start()
#setup chrome options
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--incognito')
options.add_argument('--start-maximized')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_driver_path, options=options)
#open url and wait for the page to load
driver.get('https://www.stackoverflow.com')
time.sleep(2)
#find the element with longest height on page
element = driver.find_element(By.TAG_NAME, 'body')
total_height = element.size["height"]+1000
#set the window dimensions
driver.set_window_size(1920, total_height)
#save screenshot
driver.save_screenshot("screenshot.png")
#quit driver
driver.quit()
if __name__ == '__main__':
take_full_page_screenshot()
element=driver.find_element_by_tag_name('body')
element_png = element.screenshot_as_png
with open("test2.png", "wb") as file:
file.write(element_png)
There was an error in the code suggested earlier in line 2. Here is the corrected one. Being a noob here, not able to edit my own post as yet.
Sometimes the baove doesn't get best results. So can use another method to get height of all elements and sum them to set the capture height as below:
element=driver.find_elements_by_xpath("/html/child::*/child::*")
eheight=set()
for e in element:
eheight.add(round(e.size["height"]))
print (eheight)
total_height = sum(eheight)
driver.execute_script("document.getElementsByTagName('html')[0].setAttribute('style', 'height:"+str(total_height)+"px')")
element=driver.find_element_by_tag_name('body')
element_png = element.screenshot_as_png
with open(fname, "wb") as file:
file.write(element_png)
BTW, it works on FF.
You can use Splinter
Splinter is an abstraction layer on top of existing browser automation tools such as Selenium
There is a new feature browser.screenshot(..., full=True) in new version 0.10.0.
full=True option will make full screen capture for you.
easy by python, but slowly
import os
from selenium import webdriver
from PIL import Image
def full_screenshot(driver: webdriver):
driver.execute_script(f"window.scrollTo({0}, {0})")
total_width = driver.execute_script("return document.body.offsetWidth")
total_height = driver.execute_script("return document.body.parentNode.scrollHeight")
viewport_width = driver.execute_script("return document.body.clientWidth")
viewport_height = driver.execute_script("return window.innerHeight")
rectangles = []
i = 0
while i < total_height:
ii = 0
top_height = i + viewport_height
if top_height > total_height:
top_height = total_height
while ii < total_width:
top_width = ii + viewport_width
if top_width > total_width:
top_width = total_width
rectangles.append((ii, i, top_width, top_height))
ii = ii + viewport_width
i = i + viewport_height
stitched_image = Image.new('RGB', (total_width, total_height))
previous = None
part = 0
for rectangle in rectangles:
if not previous is None:
driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
file_name = "part_{0}.png".format(part)
driver.get_screenshot_as_file(file_name)
screenshot = Image.open(file_name)
if rectangle[1] + viewport_height > total_height:
offset = (rectangle[0], total_height - viewport_height)
else:
offset = (rectangle[0], rectangle[1])
stitched_image.paste(screenshot, offset)
del screenshot
os.remove(file_name)
part = part + 1
previous = rectangle
return stitched_image
I'm currently using this approach:
def take_screenshot(self, driver, screenshot_name = "debug.png"):
elem = driver.find_element_by_tag_name('body')
total_height = elem.size["height"] + 1000
driver.set_window_size(1920, total_height)
time.sleep(2)
driver.save_screenshot(screenshot_name)
return driver
If you are trying to do this post ~2021, you need to edit the find element command from:
element = driver.find_element_by_tag('body')
to:
from selenium.webdriver.common.by import By
...
element = driver.find_element(By.TAG_NAME, "body")
Slightly modify #ihightower and #A.Minachev's code, and make it work in mac retina:
import time
from PIL import Image
from io import BytesIO
def fullpage_screenshot(driver, file, scroll_delay=0.3):
device_pixel_ratio = driver.execute_script('return window.devicePixelRatio')
total_height = driver.execute_script('return document.body.parentNode.scrollHeight')
viewport_height = driver.execute_script('return window.innerHeight')
total_width = driver.execute_script('return document.body.offsetWidth')
viewport_width = driver.execute_script("return document.body.clientWidth")
# this implementation assume (viewport_width == total_width)
assert(viewport_width == total_width)
# scroll the page, take screenshots and save screenshots to slices
offset = 0 # height
slices = {}
while offset < total_height:
if offset + viewport_height > total_height:
offset = total_height - viewport_height
driver.execute_script('window.scrollTo({0}, {1})'.format(0, offset))
time.sleep(scroll_delay)
img = Image.open(BytesIO(driver.get_screenshot_as_png()))
slices[offset] = img
offset = offset + viewport_height
# combine image slices
stitched_image = Image.new('RGB', (total_width * device_pixel_ratio, total_height * device_pixel_ratio))
for offset, image in slices.items():
stitched_image.paste(image, (0, offset * device_pixel_ratio))
stitched_image.save(file)
fullpage_screenshot(driver, 'test.png')
I have modified jeremie-s' answer so that it only get the url once.
browser = webdriver.Chrome(chrome_options=options)
browser.set_window_size(default_width, default_height)
browser.get(url)
height = browser.execute_script("return document.body.parentNode.scrollHeight")
# 2. get screenshot
browser.set_window_size(default_width, height)
browser.save_screenshot(screenshot_path)
browser.quit()
Got it!!! works like a charm
For NodeJS, but the concept is the same:
await driver.executeScript(`
document.documentElement.style.display = "table";
document.documentElement.style.width = "100%";
document.body.style.display = "table-row";
`);
await driver.findElement(By.css('body')).takeScreenshot();
I have modified the answer given by #ihightower, instead of saving the screenshot in that function, return the total height and total width of the webpage and then set the window size to total height and total width.
from PIL import Image
from io import BytesIO
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def open_url(url):
options = Options()
options.headless = True
driver = webdriver.Chrome(chrome_options=options)
driver.maximize_window()
driver.get(url)
save_screenshot(driver, 'screen.png')
def save_screenshot(driver, file_name):
height, width = scroll_down(driver)
driver.set_window_size(width, height)
img_binary = driver.get_screenshot_as_png()
img = Image.open(BytesIO(img_binary))
img.save(file_name)
# print(file_name)
print(" screenshot saved ")
def scroll_down(driver):
total_width = driver.execute_script("return document.body.offsetWidth")
total_height = driver.execute_script("return document.body.parentNode.scrollHeight")
viewport_width = driver.execute_script("return document.body.clientWidth")
viewport_height = driver.execute_script("return window.innerHeight")
rectangles = []
i = 0
while i < total_height:
ii = 0
top_height = i + viewport_height
if top_height > total_height:
top_height = total_height
while ii < total_width:
top_width = ii + viewport_width
if top_width > total_width:
top_width = total_width
rectangles.append((ii, i, top_width, top_height))
ii = ii + viewport_width
i = i + viewport_height
previous = None
part = 0
for rectangle in rectangles:
if not previous is None:
driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
time.sleep(0.5)
# time.sleep(0.2)
if rectangle[1] + viewport_height > total_height:
offset = (rectangle[0], total_height - viewport_height)
else:
offset = (rectangle[0], rectangle[1])
previous = rectangle
return (total_height, total_width)
open_url("https://www.medium.com")
This works for me
s = Service("/opt/homebrew/bin/chromedriver")
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--start-maximized')
driver = webdriver.Chrome(chrome_options=chrome_options, service=s)
highest_ele = driver.find_element(By.XPATH, '//*[#id="react-app"]/div[3]/div[3]/span/span/span[2]')
total_height = highest_ele.location['y']
driver.set_window_size(height=total_height, width=1920)
time.sleep(1)
driver.save_screenshot('~/shot.png') # replace your path

How to scrape the images from the website?

How can we get all the images of this site: http://www.theft-alerts.com
We need the images of the 19 pages. Sow far we have this code, but it doesn't work yet. We want the images in a new map.
#!/usr/bin/python
import [urllib2][1]
from bs4 import BeautifulSoup
from urlparse import urljoin
url = "http://www.theft-alerts.com/index-%d.html"
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, "html.parser")
base = "http://www.theft-alerts.com"
images = [urljoin(base,a["href"]) for a in soup.select("td a[href^=images/]")]
for url in images:
img = BeautifulSoup(urllib2.urlopen(url).read(),"lxml").find("img")["src"]
with open("myimages/{}".format(img), "w") as f:
f.write(urllib2.urlopen("{}/{}".format(url.rsplit("/", 1)[0], img)).read())
IMAGE SCRAPING WITH PYTHON
This code will surely work for Scraping Google Images.
import os
import time
import requests
from selenium import webdriver
def fetch_image_urls(query: str, max_links_to_fetch: int, wd: webdriver,
sleep_between_interactions: int = 1):
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(sleep_between_interactions)
# build the google query
search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q=
{q}&oq={q}&gs_l=img"
# load the page
wd.get(search_url.format(q=query))
image_urls = set()
image_count = 0
results_start = 0
while image_count < max_links_to_fetch:
scroll_to_end(wd)
# get all image thumbnail results
thumbnail_results = wd.find_elements_by_css_selector("img.Q4LuWd")
number_results = len(thumbnail_results)
print(f"Found: {number_results} search results. Extracting links from
{results_start}:{number_results}")
for img in thumbnail_results[results_start:number_results]:
# try to click every thumbnail such that we can get the real image behind it
try:
img.click()
time.sleep(sleep_between_interactions)
except Exception:
continue
# extract image urls
actual_images = wd.find_elements_by_css_selector('img.n3VNCb')
for actual_image in actual_images:
if actual_image.get_attribute('src') and 'http' in
actual_image.get_attribute('src'):
image_urls.add(actual_image.get_attribute('src'))
image_count = len(image_urls)
if len(image_urls) >= max_links_to_fetch:
print(f"Found: {len(image_urls)} image links, done!")
break
else:
print("Found:", len(image_urls), "image links, looking for more ...")
time.sleep(30)
return
load_more_button = wd.find_element_by_css_selector(".mye4qd")
if load_more_button:
wd.execute_script("document.querySelector('.mye4qd').click();")
# move the result startpoint further down
results_start = len(thumbnail_results)
return image_urls
def persist_image(folder_path:str,url:str, counter):
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - Could not download {url} - {e}")
try:
f = open(os.path.join(folder_path, 'jpg' + "_" + str(counter) + ".jpg"), 'wb')
f.write(image_content)
f.close()
print(f"SUCCESS - saved {url} - as {folder_path}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
def search_and_download(search_term: str, driver_path: str, target_path='./images',
number_images=10):
target_folder = os.path.join(target_path, '_'.join(search_term.lower().split('
')))
if not os.path.exists(target_folder):
os.makedirs(target_folder)
with webdriver.Chrome(executable_path=driver_path) as wd:
res = fetch_image_urls(search_term, number_images, wd=wd,
sleep_between_interactions=0.5)
counter = 0
for elem in res:
persist_image(target_folder, elem, counter)
counter += 1
# How to execute this code
# Step 1 : pip install selenium. pillow, requests
# Step 2 : make sure you have chrome installed on your machine
# Step 3 : Check your chrome version ( go to three dot then help then about google
chrome )
# Step 4 : Download the same chrome driver from here "
https://chromedriver.storage.googleapis.com/index.html "
# Step 5 : put it inside the same folder of this code
DRIVER_PATH = './chromedriver'
search_term = 'iphone'
# num of images you can pass it from here by default it's 10 if you are not passing
#number_images = 10
search_and_download(search_term=search_term, driver_path=DRIVER_PATH)
You need to loop over every page and extract the images, you can keep looping until the anchor with the text "Next" is in the code tag with the class resultnav:
import requests
from bs4 import BeautifulSoup
from urlparse import urljoin
def get_pages(start):
soup = BeautifulSoup(requests.get(start).content)
images = [img["src"] for img in soup.select("div.itemspacingmodified a img")]
yield images
nxt = soup.select("code.resultnav a")[-1]
while True:
soup = BeautifulSoup(requests.get(urljoin(url, nxt["href"])).content)
nxt = soup.select("code.resultnav a")[-1]
if nxt.text != "Next":
break
yield [img["src"] for img in soup.select("div.itemspacingmodified a img")]
url = "http://www.theft-alerts.com/"
for images in get_pages(url):
print(images)
Which gives you the images from all 19 pages.

Categories

Resources