Only get images of certain size with BeautifulSoup - python

So I'm trying to do a small crawler to just pick a few Google-search images links and then download them. It's not going to be anything that needs to run 1000 times a day with 1000 queries, but just a simple script to download 10 of the first images for a certain search word.
For that I have the following code:
import requests
from bs4 import BeautifulSoup
import json
import urllib
s = requests.session()
s.headers.update({"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36"})
URL = "https://www.google.dk/search"
def get_images(query, start):
images = []
screen_width = 1920
screen_height = 1080
params = {
"q": query,
"sa": "X",
"biw": screen_width,
"bih": screen_height,
"tbm": "isch",
"ijn": start/100,
"start": start,
#"ei": "" - This seems like a unique ID, you might want to use it to avoid getting banned. But you probably still are.
}
request = s.get(URL, params=params)
bs = BeautifulSoup(request.text, "lxml")
for img in bs.findAll("div", {"class": "rg_meta"}):
js = json.loads(img.text)
images.append(js['ou'])
return images
So basically I get a list of links I can then parse through and download via this code where it even names the images from 1 to how many there is now being crawled:
searchlist = ["cats"] #search strings
nr_img = 5 #number of images to be crawled
for k, searchstring in enumerate(searchlist):
k += 0
images = get_images("{}".format(searchstring), 0)
img_nr_list = []
for n, x in enumerate(images[0:nr_img]):
n += 1+k*nr_img
urllib.urlretrieve("{}".format(x), "\foo\bar\{}.jpg".format(n))
img_nr_list.append("{}.jpg".format(n))
In principle pretty straight forward. However, my problem is that some images are just thumbnails, or just have a low image size. So my question is: Is there a way in which I can say something like: "If width < 600px and height < 400px then skip" or something like that ?

I don't know how to do it with beautifulsoup, but there is another python library called ImageScraper that lets you define the max image size
https://pypi.python.org/pypi/ImageScraper
I only tested it out using the command line tool, as it's python 2.7 and I'm normally on python 3+

Related

Webscraping 1000's of links using Python concurrent.futures

I am trying to scrape data from about 1000's of links which have the same content and the same procedure to extract data. To speed up the process I am using the python's concurrent.futures, which I think is the best in terms of speed. When I scrape data from about 30 - 40 links as a trial, it works; but as the number increases it does not. Here is my code:
import re
import json
import requests
import concurrent.futures
import time
links_json = ['https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/485387/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/485256/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/487113/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/486733/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/486937/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/486946/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/485444/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/487258/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/487011/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/487254/']
MAX_THREADS = 30
Data_Source = "RASFF"
Product_Category = []
Date = []
Product_name = []
Reference = []
def scrape(links):
data = requests.get(links).json()
Product_Category.append(data["product"]["productCategory"]["description"])
Date.append(data["ecValidationDate"])
Product_name.append(data["product"]["description"])
Reference.append(data["reference"])
def download_data(links_json):
threads = min(MAX_THREADS, len(links_json))
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
executor.map(scrape, links_json)
def main(new_links):
t0 = time.time()
download_data(new_links)
t1 = time.time()
print(f"{t1-t0} seconds to crawl {len(new_links)} in total.")
main(links_json)
When I try to run the main function, it is very inconsistent. Also right now there are only 12 links to scrape but as the links increase the data that should be extracted in the list also decreases. For instance: if there are about 200 links, there should be 200 values in the Product_category list but there are sometimes 100, 67 etc., meaning it is very inconsistent. I am not sure if I am missing something. I have even tried adding the time.sleep(0.25) in the scrape function but it does not work. I don't know how I can provide a list of 500 - 1000 links here.
Here's an example of how one could do this using the threading module:-
import requests
import threading
Product_Category = []
Date = []
Product_name = []
Reference = []
AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_5_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Safari/605.1.15'
BASEURL = 'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/'
LOCK = threading.Lock()
headers = {'User-Agent': AGENT}
links = ['485387',
'485256',
'487113',
'486733',
'486937',
'486946',
'485444',
'487258',
'487011',
'487254']
def scrape(session, link):
response = session.get(f'{BASEURL}{link}/', headers=headers)
response.raise_for_status()
json = response.json()
try:
LOCK.acquire()
Product_Category.append(
json["product"]["productCategory"]["description"])
Date.append(json["ecValidationDate"])
Product_name.append(json["product"]["description"])
Reference.append(json["reference"])
finally:
LOCK.release()
def main():
with requests.Session() as session:
ta = []
for link in links:
t = threading.Thread(target=scrape, args=(session, link))
ta.append(t)
t.start()
for t in ta:
t.join()
print(Product_Category)
print(Date)
print(Product_name)
print(Reference)
if __name__ == '__main__':
main()

Can't force a script to try few times when it fails to grab title from a webpage

I've crated a script to get the title of different shops from some identical webpages. The script is doing fine.
I'm now trying to create a logic within the script to let it try few times if somehow it fails to grab the titles from those pages.
As a test, if I define the line with selector otherwise, as in name = soup.select_one(".sales-info > h").text, the script will go for looping indefinitely.
I've tried so far with:
import requests
from bs4 import BeautifulSoup
links = (
'https://www.yellowpages.com/san-francisco-ca/mip/nizarios-pizza-481135933',
'https://www.yellowpages.com/nationwide/mip/credo-452182701'
)
def get_title(s,link):
r = s.get(link)
soup = BeautifulSoup(r.text,"lxml")
try:
name = soup.select_one(".sales-info > h1").text
except Exception:
print("trying again")
return get_title(s,link) #I wish to bring about any change here to let the script try few times other than trying indefinitely
return name
if __name__ == '__main__':
with requests.Session() as s:
s.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
for link in links:
print(get_title(s,link))
How can I let the script try few times when it fails to grab title from a webpage?
PS The webpages that I've used within the script are placeholders.
I added some parameters to specify number of retries, sleep between retries and default value to return if everything fails:
import time
import requests
from bs4 import BeautifulSoup
links = (
'https://www.webscraper.io/test-sites/e-commerce/allinone',
'https://www.webscraper.io/test-sites/e-commerce/static'
)
def get_title(s, link, retries=3, sleep=1, default=''):
"""
s -> session
link -> url
retries -> number of retries before return default value
sleep -> sleep between tries (in seconds)
default -> default value to return if every retry fails
"""
name, current_retry = default, 0
while current_retry != retries:
r = s.get(link)
soup = BeautifulSoup(r.text,"lxml")
try:
name = soup.select_one("h8").text
except Exception:
print("Retry {}/{}".format(current_retry + 1, retries))
time.sleep(sleep)
current_retry += 1
return name
if __name__ == '__main__':
with requests.Session() as s:
s.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
for link in links:
print(get_title(s, link, 3, 1, 'Failed to grab {}'.format(link)))
Prints:
Retry 1/3
Retry 2/3
Retry 3/3
Failed to grab https://www.webscraper.io/test-sites/e-commerce/allinone
Retry 1/3
Retry 2/3
Retry 3/3
Failed to grab https://www.webscraper.io/test-sites/e-commerce/static
I think the simplest way would be to switch from recursion to a loop:
def get_title(s,link):
failed = 0
while failed < 5:
try:
r = s.get(link)
soup = BeautifulSoup(r.text,"lxml")
name = soup.select_one(".sales-info > h1").text
return name
except Exception: # Best to specify which one, by the way
failed += 1
print('Failed too many times')
return None
You can try to use any retrying library, such as tenacity, backoff. Notice that these libraries usually function as decorators and your function will simply need to make the import and then call the decorator in a similar fashion to:
import requests
from bs4 import BeautifulSoup
from tenacity import retry ###or import backoff
...
#retry ###or #backoff.on_exception(backoff.expo, requests.exceptions.RequestException)
def get_title(s, link, retries=3, sleep=1, default=''):
...
You can achieve the same in different ways. Here is another you might wanna consider trying:
import time
import requests
from bs4 import BeautifulSoup
links = [
"https://www.yellowpages.com/san-francisco-ca/mip/nizarios-pizza-481135933",
"https://www.yellowpages.com/nationwide/mip/credo-452182701"
]
def get_title(s,link,counter=0):
r = s.get(link)
soup = BeautifulSoup(r.text,"lxml")
try:
name = soup.select_one(".sales-info > h").text
except Exception:
if counter<=3:
time.sleep(1)
print("done trying {} times".format(counter))
counter += 1
return get_title(s,link,counter)
else:
return None
return name
if __name__ == '__main__':
with requests.Session() as s:
s.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
for link in links:
print(get_title(s,link))

button click prior to scraping html table

Hi I am trying to scape an HTML table and I have working code.
The one URL, however, contains two html tables. The first table contains "quarterly" numbers and loads by default with the url. When you click the button above the table, you can switch to the second table with "annual" numbers.
My code only picks up first default (quarterly) table that appears when the url loads.
How can I get my python code to scrape the second "annual" table? Can selenium do this? If so could anyone provide any guidance?
#!/usr/local/bin/python3
import requests
import pandas as pd
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0'}
r = requests.get("https://www.investing.com/equities/exxon-mobil-income-statement", headers=headers)
df = pd.read_html(r.content)[1]
print(df)
Many thanks
Yes,
You can do it with selenium.
driver.get("https://www.investing.com/equities/exxon-mobil-income-statement")
annual_button = driver.find_element_by_css_selector("#leftColumn > div.alignBottom > div.float_lang_base_1 > a:nth-child(1)")
annual_button.click()
print(driver.find_element_by_css_selector("#rrtable > table").get_attribute('innerHTML'))
Here's a python code for that.
What it does? It entesr the page, finds the annual_button element by its css selector and than clicks it. Than, it find the table by its css selector and prints the HTML of it.
Hope it helps.
After much googling and some other stack posts, finally got this working:
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
browser = webdriver.Firefox(executable_path=r'/Users/xxxxxx/Documents/python/web_drivers/geckodriver')
browser.get('https://www.investing.com/equities/exxon-mobil-income-statement')
linkElem = browser.find_element_by_link_text('Annual')
linkElem.click()
r = browser.find_element_by_css_selector("#rrtable > table").get_attribute('outerHTML')
browser.quit()
soup = BeautifulSoup(r, 'html.parser')
df = pd.read_html(str(soup))[0]
print(df)
Try the following:
Sub Web_Table()
Dim HTMLDoc As New HTMLDocument
Dim objTable As Object
Dim lRow As Long
Dim lngTable As Long
Dim lngRow As Long
Dim lngCol As Long
Dim ActRw As Long
Dim objIE As InternetExplorer
Set objIE = New InternetExplorer
objIE.Navigate "https://www.investing.com/equities/exxon-mobil-income-statement"
Do Until objIE.ReadyState = 4 And Not objIE.Busy
DoEvents
Loop
Application.Wait (Now + TimeValue("0:00:03")) 'wait for java script to load
HTMLDoc.body.innerHTML = objIE.Document.body.innerHTML
With HTMLDoc.body
Set objTable = .getElementsByTagName("table")
For lngTable = 0 To objTable.Length - 1
For lngRow = 0 To objTable(lngTable).Rows.Length - 1
For lngCol = 0 To objTable(lngTable).Rows(lngRow).Cells.Length - 1
ThisWorkbook.Sheets("Sheet1").Cells(ActRw + lngRow + 1, lngCol + 1) = objTable(lngTable).Rows(lngRow).Cells(lngCol).innerText
Next lngCol
Next lngRow
ActRw = ActRw + objTable(lngTable).Rows.Length + 1
Next lngTable
End With
objIE.Quit
End Sub

How can I click this button via selenium, it always says list object has no attribute 'click'

I want to click the "show more results" button in google image search via selenium.
The page is https://www.google.co.jp/search?q=circle&source=lnms&tbm=isch&sa=X&ved=0ahUKEwifxd-wqr_VAhXEopQKHQpSB_AQ_AUICigB&biw=1249&bih=927
The page should be scroll down 4 times so that you can see the button.
the html code is :
enter image description here
my code is below
from selenium import webdriver
import time
class ImgCrawler:
def __init__(self,searchlink = None):
self.link = searchlink
self.soupheader = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3176.2 Safari/537.36"}
self.scrolldown = None
self.jsdriver = None
def getChromeCanary(self):
self.jsdriver = webdriver.Chrome("f:\software\python\chromedriver_win32\chromedriver.exe")
self.jsdriver.implicitly_wait(30)
self.jsdriver.get(self.link)
def scrollDownUseChromeCanary(self, scrolltimes = 1, sleeptime = 10):
for i in range(scrolltimes):
self.jsdriver.execute_script('window.scrollTo(0,document.body.scrollHeight);')
time.sleep(sleeptime)
def clickNextPage(self):
return self.jsdriver.find_elements_by_css_selector("input.ksb._kvc").click()
if __name__ == '__main__':
weblink = "https://www.google.com.hk/search?hl=en&site=imghp&tbm=isch&source=hp&biw=1461&bih=950&q=circle&oq=circle&gs_l=img.3..0l10.1497.3829.0.4548.10.9.1.0.0.0.185.1136.0j7.7.0....0...1.1.64.img..2.7.974...0i10k1.4YZkQiWXzGo"
img = ImgCrawler(weblink)
img.getChromeCanary()
img.scrollDownUseChromeCanary(4,5)
img.clickNextPage()
You need to point the first element of your query :
self.jsdriver.find_elements_by_css_selector("input.ksb._kvc")[0].click()
http://selenium-python.readthedocs.io/locating-elements.html
To find multiple elements (these methods will return a list):
find_elements_...() methods usually used
to get list of WebElements for following iteration through it:
[element.text for element in driver.find_elements_by_tag_name('a')]
to locate element by index if there is no unique identifier:
driver.find_elements_by_class_name('some_class_name')[1]
In other cases you can simply use find_element_...() methods that returns single WebElement
Try to replace
self.jsdriver.find_elements_by_css_selector("input.ksb._kvc").click()
with
self.jsdriver.find_element_by_css_selector("input.ksb._kvc").click()

Python - Download Images from google Image search?

I want to download all Images of google image search using python . The code I am using seems to have some problem some times .My code is
import os
import sys
import time
from urllib import FancyURLopener
import urllib2
import simplejson
# Define search term
searchTerm = "parrot"
# Replace spaces ' ' in search term for '%20' in order to comply with request
searchTerm = searchTerm.replace(' ','%20')
# Start FancyURLopener with defined version
class MyOpener(FancyURLopener):
version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'
myopener = MyOpener()
# Set count to 0
count= 0
for i in range(0,10):
# Notice that the start changes for each iteration in order to request a new set of images for each loop
url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0& q='+searchTerm+'&start='+str(i*10)+'&userip=MyIP')
print url
request = urllib2.Request(url, None, {'Referer': 'testing'})
response = urllib2.urlopen(request)
# Get results using JSON
results = simplejson.load(response)
data = results['responseData']
dataInfo = data['results']
# Iterate for each result and get unescaped url
for myUrl in dataInfo:
count = count + 1
my_url = myUrl['unescapedUrl']
myopener.retrieve(myUrl['unescapedUrl'],str(count)+'.jpg')
After downloading few pages I am getting an error as follows:
Traceback (most recent call last):
File "C:\Python27\img_google3.py", line 37, in <module>
dataInfo = data['results']
TypeError: 'NoneType' object has no attribute '__getitem__'
What to do ??????
I have modified my code. Now the code can download 100 images for a given query, and images are full high resolution that is original images are being downloaded.
I am downloading the images using urllib2 & Beautiful soup
from bs4 import BeautifulSoup
import requests
import re
import urllib2
import os
import cookielib
import json
def get_soup(url,header):
return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser')
query = raw_input("query image")# you can change the query for the image here
image_type="ActiOn"
query= query.split()
query='+'.join(query)
url="https://www.google.co.in/search?q="+query+"&source=lnms&tbm=isch"
print url
#add the directory for your image here
DIR="Pictures"
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
soup = get_soup(url,header)
ActualImages=[]# contains the link for Large original images, type of image
for a in soup.find_all("div",{"class":"rg_meta"}):
link , Type =json.loads(a.text)["ou"] ,json.loads(a.text)["ity"]
ActualImages.append((link,Type))
print "there are total" , len(ActualImages),"images"
if not os.path.exists(DIR):
os.mkdir(DIR)
DIR = os.path.join(DIR, query.split()[0])
if not os.path.exists(DIR):
os.mkdir(DIR)
###print images
for i , (img , Type) in enumerate( ActualImages):
try:
req = urllib2.Request(img, headers={'User-Agent' : header})
raw_img = urllib2.urlopen(req).read()
cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1
print cntr
if len(Type)==0:
f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+".jpg"), 'wb')
else :
f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+"."+Type), 'wb')
f.write(raw_img)
f.close()
except Exception as e:
print "could not load : "+img
print e
i hope this helps you
The Google Image Search API is deprecated, you need to use the Google Custom Search for what you want to achieve. To fetch the images you need to do this:
import urllib2
import simplejson
import cStringIO
fetcher = urllib2.build_opener()
searchTerm = 'parrot'
startIndex = 0
searchUrl = "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + searchTerm + "&start=" + startIndex
f = fetcher.open(searchUrl)
deserialized_output = simplejson.load(f)
This will give you 4 results, as JSON, you need to iteratively get the results by incrementing the startIndex in the API request.
To get the images you need to use a library like cStringIO.
For example, to access the first image, you need to do this:
imageUrl = deserialized_output['responseData']['results'][0]['unescapedUrl']
file = cStringIO.StringIO(urllib.urlopen(imageUrl).read())
img = Image.open(file)
Google deprecated their API, scraping Google is complicated, so I would suggest using Bing API instead to automatically download images. The pip package bing-image-downloader allows you to easily download an arbitrary number of images to a directory with a single line of code.
from bing_image_downloader import downloader
downloader.download(query_string, limit=100, output_dir='dataset', adult_filter_off=True, force_replace=False, timeout=60, verbose=True)
Google is not so good, and Microsoft is not so evil
Here's my latest google image snarfer, written in Python, using Selenium and headless Chrome.
It requires python-selenium, the chromium-driver, and a module called retry from pip.
Link: http://sam.aiki.info/b/google-images.py
Example Usage:
google-images.py tiger 10 --opts isz:lt,islt:svga,itp:photo > urls.txt
parallel=5
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
(i=0; while read url; do wget -e robots=off -T10 --tries 10 -U"$user_agent" "$url" -O`printf %04d $i`.jpg & i=$(($i+1)) ; [ $(($i % $parallel)) = 0 ] && wait; done < urls.txt; wait)
Help Usage:
$ google-images.py --help
usage: google-images.py [-h] [--safe SAFE] [--opts OPTS] query n
Fetch image URLs from Google Image Search.
positional arguments:
query image search query
n number of images (approx)
optional arguments:
-h, --help show this help message and exit
--safe SAFE safe search [off|active|images]
--opts OPTS search options, e.g.
isz:lt,islt:svga,itp:photo,ic:color,ift:jpg
Code:
#!/usr/bin/env python3
# requires: selenium, chromium-driver, retry
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import selenium.common.exceptions as sel_ex
import sys
import time
import urllib.parse
from retry import retry
import argparse
import logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger()
retry_logger = None
css_thumbnail = "img.Q4LuWd"
css_large = "img.n3VNCb"
css_load_more = ".mye4qd"
selenium_exceptions = (sel_ex.ElementClickInterceptedException, sel_ex.ElementNotInteractableException, sel_ex.StaleElementReferenceException)
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
#retry(exceptions=KeyError, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def get_thumbnails(wd, want_more_than=0):
wd.execute_script("document.querySelector('{}').click();".format(css_load_more))
thumbnails = wd.find_elements_by_css_selector(css_thumbnail)
n_results = len(thumbnails)
if n_results <= want_more_than:
raise KeyError("no new thumbnails")
return thumbnails
#retry(exceptions=KeyError, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def get_image_src(wd):
actual_images = wd.find_elements_by_css_selector(css_large)
sources = []
for img in actual_images:
src = img.get_attribute("src")
if src.startswith("http") and not src.startswith("https://encrypted-tbn0.gstatic.com/"):
sources.append(src)
if not len(sources):
raise KeyError("no large image")
return sources
#retry(exceptions=selenium_exceptions, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def retry_click(el):
el.click()
def get_images(wd, start=0, n=20, out=None):
thumbnails = []
count = len(thumbnails)
while count < n:
scroll_to_end(wd)
try:
thumbnails = get_thumbnails(wd, want_more_than=count)
except KeyError as e:
logger.warning("cannot load enough thumbnails")
break
count = len(thumbnails)
sources = []
for tn in thumbnails:
try:
retry_click(tn)
except selenium_exceptions as e:
logger.warning("main image click failed")
continue
sources1 = []
try:
sources1 = get_image_src(wd)
except KeyError as e:
pass
# logger.warning("main image not found")
if not sources1:
tn_src = tn.get_attribute("src")
if not tn_src.startswith("data"):
logger.warning("no src found for main image, using thumbnail")
sources1 = [tn_src]
else:
logger.warning("no src found for main image, thumbnail is a data URL")
for src in sources1:
if not src in sources:
sources.append(src)
if out:
print(src, file=out)
out.flush()
if len(sources) >= n:
break
return sources
def google_image_search(wd, query, safe="off", n=20, opts='', out=None):
search_url_t = "https://www.google.com/search?safe={safe}&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img&tbs={opts}"
search_url = search_url_t.format(q=urllib.parse.quote(query), opts=urllib.parse.quote(opts), safe=safe)
wd.get(search_url)
sources = get_images(wd, n=n, out=out)
return sources
def main():
parser = argparse.ArgumentParser(description='Fetch image URLs from Google Image Search.')
parser.add_argument('--safe', type=str, default="off", help='safe search [off|active|images]')
parser.add_argument('--opts', type=str, default="", help='search options, e.g. isz:lt,islt:svga,itp:photo,ic:color,ift:jpg')
parser.add_argument('query', type=str, help='image search query')
parser.add_argument('n', type=int, default=20, help='number of images (approx)')
args = parser.parse_args()
opts = Options()
opts.add_argument("--headless")
# opts.add_argument("--blink-settings=imagesEnabled=false")
with webdriver.Chrome(options=opts) as wd:
sources = google_image_search(wd, args.query, safe=args.safe, n=args.n, opts=args.opts, out=sys.stdout)
main()
Haven't looked into your code but this is an example solution made with selenium to try to get 400 pictures from the search term
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib2
searchterm = 'vannmelon' # will also be the name of the folder
url = "https://www.google.co.in/search?q="+searchterm+"&source=lnms&tbm=isch"
browser = webdriver.Firefox()
browser.get(url)
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
counter = 0
succounter = 0
if not os.path.exists(searchterm):
os.mkdir(searchterm)
for _ in range(500):
browser.execute_script("window.scrollBy(0,10000)")
for x in browser.find_elements_by_xpath("//div[#class='rg_meta']"):
counter = counter + 1
print "Total Count:", counter
print "Succsessful Count:", succounter
print "URL:",json.loads(x.get_attribute('innerHTML'))["ou"]
img = json.loads(x.get_attribute('innerHTML'))["ou"]
imgtype = json.loads(x.get_attribute('innerHTML'))["ity"]
try:
req = urllib2.Request(img, headers={'User-Agent': header})
raw_img = urllib2.urlopen(req).read()
File = open(os.path.join(searchterm , searchterm + "_" + str(counter) + "." + imgtype), "wb")
File.write(raw_img)
File.close()
succounter = succounter + 1
except:
print "can't get img"
print succounter, "pictures succesfully downloaded"
browser.close()
Adding to Piees's answer, for downloading any number of images from the search results, we need to simulate a click on 'Show more results' button after first 400 results are loaded.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import json
import urllib2
import sys
import time
# adding path to geckodriver to the OS environment variable
# assuming that it is stored at the same path as this script
os.environ["PATH"] += os.pathsep + os.getcwd()
download_path = "dataset/"
def main():
searchtext = sys.argv[1] # the search query
num_requested = int(sys.argv[2]) # number of images to download
number_of_scrolls = num_requested / 400 + 1
# number_of_scrolls * 400 images will be opened in the browser
if not os.path.exists(download_path + searchtext.replace(" ", "_")):
os.makedirs(download_path + searchtext.replace(" ", "_"))
url = "https://www.google.co.in/search?q="+searchtext+"&source=lnms&tbm=isch"
driver = webdriver.Firefox()
driver.get(url)
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
extensions = {"jpg", "jpeg", "png", "gif"}
img_count = 0
downloaded_img_count = 0
for _ in xrange(number_of_scrolls):
for __ in xrange(10):
# multiple scrolls needed to show all 400 images
driver.execute_script("window.scrollBy(0, 1000000)")
time.sleep(0.2)
# to load next 400 images
time.sleep(0.5)
try:
driver.find_element_by_xpath("//input[#value='Show more results']").click()
except Exception as e:
print "Less images found:", e
break
# imges = driver.find_elements_by_xpath('//div[#class="rg_meta"]') # not working anymore
imges = driver.find_elements_by_xpath('//div[contains(#class,"rg_meta")]')
print "Total images:", len(imges), "\n"
for img in imges:
img_count += 1
img_url = json.loads(img.get_attribute('innerHTML'))["ou"]
img_type = json.loads(img.get_attribute('innerHTML'))["ity"]
print "Downloading image", img_count, ": ", img_url
try:
if img_type not in extensions:
img_type = "jpg"
req = urllib2.Request(img_url, headers=headers)
raw_img = urllib2.urlopen(req).read()
f = open(download_path+searchtext.replace(" ", "_")+"/"+str(downloaded_img_count)+"."+img_type, "wb")
f.write(raw_img)
f.close
downloaded_img_count += 1
except Exception as e:
print "Download failed:", e
finally:
print
if downloaded_img_count >= num_requested:
break
print "Total downloaded: ", downloaded_img_count, "/", img_count
driver.quit()
if __name__ == "__main__":
main()
Full code is here.
This worked for me in Windows 10, Python 3.9.7:
pip install bing-image-downloader
Below code downloads 10 images of India from Bing search Engine to desired output folder:
from bing_image_downloader import downloader
downloader.download('India', limit=10, output_dir='dataset', adult_filter_off=True, force_replace=False, timeout=60, verbose=True)
Documentation: https://pypi.org/project/bing-image-downloader/
You can also use Selenium with Python. Here is how:
from selenium import webdriver
import urllib
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import urllib.request
driver = webdriver.Firefox()
word="apple"
url="http://images.google.com/search?q="+word+"&tbm=isch&sout=1"
driver.get(url)
imageXpathSelector='/html/body/div[2]/c-wiz/div[3]/div[1]/div/div/div/div/div[1]/div[1]/span/div[1]/div[1]/div[1]/a[1]/div[1]/img'
img=driver.find_element(By.XPATH,imageXpathSelector)
src=(img.get_attribute('src'))
urllib.request.urlretrieve(src, word+".jpg")
driver.close()
(This code works on Python 3.8)
Please be informed that you should install the Selenium package with 'pip install selenium'
Contrary to the other web scraping techniques, Selenium opens the browser and downloads the items because Selenium's mission is testing rather than scraping.
N.B. For imageXpathSelector if it does not work please click F12 while your browser is open and right-click the image then click the 'copy' menu from the opened menu and select 'copy Xpath' there. It will be the right Xpath location of the element you need.
This one as other code snippets have grown old and no longer worked for me. Downloads 100 images for each keyword, inspired from one of the solutions above.
from bs4 import BeautifulSoup
import urllib2
import os
class GoogleeImageDownloader(object):
_URL = "https://www.google.co.in/search?q={}&source=lnms&tbm=isch"
_BASE_DIR = 'GoogleImages'
_HEADERS = {
'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
def __init__(self):
query = raw_input("Enter keyword to search images\n")
self.dir_name = os.path.join(self._BASE_DIR, query.split()[0])
self.url = self._URL.format(urllib2.quote(query))
self.make_dir_for_downloads()
self.initiate_downloads()
def make_dir_for_downloads(self):
print "Creating necessary directories"
if not os.path.exists(self._BASE_DIR):
os.mkdir(self._BASE_DIR)
if not os.path.exists(self.dir_name):
os.mkdir(self.dir_name)
def initiate_downloads(self):
src_list = []
soup = BeautifulSoup(urllib2.urlopen(urllib2.Request(self.url,headers=self._HEADERS)),'html.parser')
for img in soup.find_all('img'):
if img.has_attr("data-src"):
src_list.append(img['data-src'])
print "{} of images collected for downloads".format(len(src_list))
self.save_images(src_list)
def save_images(self, src_list):
print "Saving Images..."
for i , src in enumerate(src_list):
try:
req = urllib2.Request(src, headers=self._HEADERS)
raw_img = urllib2.urlopen(req).read()
with open(os.path.join(self.dir_name , str(i)+".jpg"), 'wb') as f:
f.write(raw_img)
except Exception as e:
print ("could not save image")
raise e
if __name__ == "__main__":
GoogleeImageDownloader()
I know this question is old, but I ran across it recently and none of the previous answers work anymore. So I wrote this script to gather images from google. As of right now it can download as many images as are available.
here is a github link to it as well https://github.com/CumminUp07/imengine/blob/master/get_google_images.py
DISCLAIMER: DUE TO COPYRIGHT ISSUES, IMAGES GATHERED SHOULD ONLY BE USED FOR RESEARCH AND EDUCATION PURPOSES ONLY
from bs4 import BeautifulSoup as Soup
import urllib2
import json
import urllib
#programtically go through google image ajax json return and save links to list#
#num_images is more of a suggestion #
#it will get the ceiling of the nearest 100 if available #
def get_links(query_string, num_images):
#initialize place for links
links = []
#step by 100 because each return gives up to 100 links
for i in range(0,num_images,100):
url = 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q='+query_string+'\
&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start='+str(i)+'\
&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s'
#set user agent to avoid 403 error
request = urllib2.Request(url, None, {'User-Agent': 'Mozilla/5.0'})
#returns json formatted string of the html
json_string = urllib2.urlopen(request).read()
#parse as json
page = json.loads(json_string)
#html found here
html = page[1][1]
#use BeautifulSoup to parse as html
new_soup = Soup(html,'lxml')
#all img tags, only returns results of search
imgs = new_soup.find_all('img')
#loop through images and put src in links list
for j in range(len(imgs)):
links.append(imgs[j]["src"])
return links
#download images #
#takes list of links, directory to save to #
#and prefix for file names #
#saves images in directory as a one up number #
#with prefix added #
#all images will be .jpg #
def get_images(links,directory,pre):
for i in range(len(links)):
urllib.urlretrieve(links[i], "./"+directory+"/"+str(pre)+str(i)+".jpg")
#main function to search images #
#takes two lists, base term and secondary terms #
#also takes number of images to download per #
#combination #
#it runs every combination of search terms #
#with base term first then secondary #
def search_images(base,terms,num_images):
for y in range(len(base)):
for x in range(len(terms)):
all_links = get_links(base[y]+'+'+terms[x],num_images)
get_images(all_links,"images",x)
if __name__ == '__main__':
terms = ["cars","numbers","scenery","people","dogs","cats","animals"]
base = ["animated"]
search_images(base,terms,1000)
Instead of google image search, try other image searches like ecosia or bing.
Here is a sample code for retrieving images from ecosia search engine.
from bs4 import BeautifulSoup
import requests
import urllib
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent':user_agent}
urls = ["https://www.ecosia.org/images?q=india%20pan%20card%20example"]
#The url's from which the image is to be extracted.
index = 0
for url in urls:
request = urllib.request.Request(url,None,headers) #The assembled request
response = urllib.request.urlopen(request)
data = response.read() # Read the html result page
soup = BeautifulSoup(data, 'html.parser')
for link in soup.find_all('img'):
#The images are enclosed in 'img' tag and the 'src' contains the url of the image.
img_url = link.get('src')
dest = str(index) + ".jpg" #Destination to store the image.
try:
urllib.request.urlretrieve(img_url)
index += 1
except:
continue
The code works with google image search but it fails to retrieve images because google stores the images in encrypted format which is difficult to retrieve from the image url.
The solutions works as on 1-Feb-2021.
Okay, so instead of coding this from you I am going to tell you what you're doing wrong and it might lead you in the right direction. Usually most modern websites render html dynamically via javascript and so if you simply send a GET request(with urllib/CURL/fetch/axios) you wont get what you usually see in the browser going to the same URL/web address. What you need is something that renders the javascript code to create the same HTML/webpage you see on your browser, you can use something like selenium gecko driver for firefox to do this and there python modules out there that let you do this.
I hope this helps, if you still feel lost here's a simple script i wrote a while back to extract something similar from your google photos
from selenium import webdriver
import re
url="https://photos.app.goo.gl/xxxxxxx"
driver = webdriver.Firefox()
driver.get(url)
regPrms="^background-image\:url\(.*\)$"
regPrms="^The.*Spain$"
html = driver.page_source
urls=re.findall("(?P<url>https?://[^\s\"$]+)", html)
fin=[]
for url in urls:
if "video-downloads" in url:
fin.append(url)
print("The Following ZIP contains all your pictures")
for url in fin:
print("-------------------")
print(url)
You can achieve this using selenium as others mentioned it above.
Alternatively, you can try using Google Images API from SerpApi. Check out the playground.
Code and example. Fuction to download images was taken from this answer:
import os, time, shutil, httpx, asyncio
from urllib.parse import urlparse
from serpapi import GoogleSearch
# https://stackoverflow.com/a/39217788/1291371
async def download_file(url):
print(f'Downloading {url}')
# https://stackoverflow.com/a/18727481/1291371
parsed_url = urlparse(url)
local_filename = os.path.basename(parsed_url.path)
os.makedirs('images', exist_ok=True)
async with httpx.AsyncClient() as client:
async with client.stream('GET', url) as response:
async with open(f'images/{local_filename}', 'wb') as f:
await asyncio.to_thread(shutil.copyfileobj, response.raw, f)
return local_filename
async def main():
start = time.perf_counter()
params = {
"engine": "google",
"ijn": "0",
"q": "lasagna",
"tbm": "isch",
"api_key": os.getenv("API_KEY"),
}
search = GoogleSearch(params)
results = search.get_dict()
download_files_tasks = [
download_file(image['original']) for image in results['images_results']
]
await asyncio.gather(*download_files_tasks, return_exceptions=True)
print(
f"Downloaded {len(download_files_tasks)} images in {time.perf_counter() - start:0.4f} seconds")
asyncio.run(main())
Disclaimer, I work for SerpApi.
The one I used is :
https://github.com/hellock/icrawler
This package is a mini framework of web crawlers. With modularization design, it is easy to use and extend. It supports media data like images and videos very well, and can also be applied to texts and another type of files. Scrapy is heavy and powerful, while icrawler is tiny and flexible.
def main():
parser = ArgumentParser(description='Test built-in crawlers')
parser.add_argument(
'--crawler',
nargs='+',
default=['google', 'bing', 'baidu', 'flickr', 'greedy', 'urllist'],
help='which crawlers to test')
args = parser.parse_args()
for crawler in args.crawler:
eval('test_{}()'.format(crawler))
print('\n')

Categories

Resources