How to properly store BeautifulSoup objects for later use [duplicate] - python

I have some code that is quite long, so it takes a long time to run. I want to simply save either the requests object (in this case "name") or the BeautifulSoup object (in this case "soup") locally so that next time I can save time. Here is the code:
from bs4 import BeautifulSoup
import requests
url = 'SOMEURL'
name = requests.get(url)
soup = BeautifulSoup(name.content)

Since name.content is just HTML, you can just dump this to a file and read it back later.
Usually the bottleneck is not the parsing, but instead the network latency of making requests.
from bs4 import BeautifulSoup
import requests
url = 'https://google.com'
name = requests.get(url)
with open("/tmp/A.html", "w") as f:
f.write(name.content)
# read it back in
with open("/tmp/A.html") as f:
soup = BeautifulSoup(f)
# do something with soup
Here is some anecdotal evidence for the fact that bottleneck is in the network.
from bs4 import BeautifulSoup
import requests
import time
url = 'https://google.com'
t1 = time.clock();
name = requests.get(url)
t2 = time.clock();
soup = BeautifulSoup(name.content)
t3 = time.clock();
print t2 - t1, t3 - t2
Output, from running on Thinkpad X1 Carbon, with a fast campus network.
0.11 0.02

Storing requests locally and restoring them as Beautifoul Soup object latter on
If you are iterating through pages of web site you can store each page with request explained here.
Create folder soupCategory in same folder where your script is.
Use any latest user agent for headers
headers = {'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0 Safari/605.1.15'}
def getCategorySoup():
session = requests.Session()
retry = Retry(connect=7, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
basic_url = "https://www.somescrappingdomain.com/apartments?adsWithImages=1&page="
t0 = time.time()
j=0
totalPages = 1525 # put your number of pages here
for i in range(1,totalPages):
url = basic_url+str(i)
r = requests.get(url, headers=headers)
pageName = "./soupCategory/"+str(i)+".html"
with open(pageName, mode='w', encoding='UTF-8', errors='strict', buffering=1) as f:
f.write(r.text)
print (pageName, end=" ")
t1 = time.time()
total = t1-t0
print ("Total time for getting ",totalPages," category pages is ", round(total), " seconds")
return
Latter on you can create Beautifoul Soup object as #merlin2011 mentioned with:
with open("/soupCategory/1.html") as f:
soup = BeautifulSoup(f)

Related

I'm not able to split my code into functions

I made a code to download pdfs from a website, and it works perfectly, downloading all the PDF's (first code below). However, when I split my code into functions, only two links are inserted into the "papers" list and the execution ends with code zero, but the following warning message appears:
GuessedAtParserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("html.parser"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
The code that caused this warning is on line 11 of the file C:\Downloads\EditoraCL\download_pdf.py. To get rid of this warning, pass the additional argument 'features="html.parser"' to the BeautifulSoup constructor.
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
FIRST CODE:
import requests
import httplib2
import os
from bs4 import BeautifulSoup, SoupStrainer
papers = []
pdfs = []
http = httplib2.Http()
status, response = http.request('https://www.snh2021.anpuh.org/site/anais')
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
papers.append(link['href'])
print(papers)
for x in papers:
if x.endswith('pdf'):
pdfs.append(x)
print(pdfs)
def baixa_arquivo(url, endereco):
resposta = requests.get(url)
if resposta.status_code == requests.codes.OK:
with open(endereco, 'wb') as novo_arquivo:
novo_arquivo.write(resposta.content)
print('Download concluĂ­do. Salvo em {}'.format(endereco))
else:
resposta.raise_for_status()
if __name__ == '__main__':
url_basica = 'https://www.snh2021.anpuh.org/{}'
output = 'Download'
for i in range(1, len(pdfs)):
nome_do_arquivo = os.path.join(output, 'artigo{}.pdf'.format(i))
a = pdfs[i]
z = url_basica.format(a)
y = requests.get(z)
if y.status_code!=404:
baixa_arquivo(z, nome_do_arquivo)
CODE DIVIDED INTO FUNCTIONS:
import requests
import httplib2
import os
from bs4 import BeautifulSoup, SoupStrainer
papers = []
pdfs = []
def busca_links():
http = httplib2.Http()
status, response = http.request('https://www.snh2021.anpuh.org/site/anais')
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
papers.append(link['href'])
return papers
def links_pdf():
for x in papers:
if x.endswith('pdf'):
pdfs.append(x)
return pdfs
def baixa_arquivo(url, endereco):
resposta = requests.get(url)
if resposta.status_code == requests.codes.OK:
with open(endereco, 'wb') as novo_arquivo:
novo_arquivo.write(resposta.content)
return f'Download concluĂ­do. Salvo em {endereco}'
else:
resposta.raise_for_status()
if __name__ == '__main__':
busca_links()
links_pdf()
url_basica = 'https://www.snh2021.anpuh.org/{}'
output = 'Download'
print(papers)
print(pdfs)
for i in range(1, len(pdfs)):
nome_do_arquivo = os.path.join(output, 'artigo{}.pdf'.format(i))
a = pdfs[i]
z = url_basica.format(a)
y = requests.get(z)
if y.status_code!=404:
baixa_arquivo(z, nome_do_arquivo)
Could someone help me understand why the second code is giving this error?
Functions do not share their inner variables, so in order to make your code work, you should assign "papers" to the function itself, after returning it inside the function ( papers = busca_links() and links_pdf(papers) ).
Anyway, for the purpose of organization and clearer code, you should use classes and methods:
import os
import requests
import httplib2
from bs4 import BeautifulSoup, SoupStrainer
class Pdf:
def __init__(self, base_url, url):
self.main_dir = os.path.dirname(__file__)
self.pdfs_dir = os.path.join(self.main_dir, 'pdfs')
self.base_url = base_url
self.url = url
def get_links(self):
http = httplib2.Http()
status, response = http.request(self.url)
self.links = []
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
if link['href'].endswith('pdf'):
self.links.append(f"{self.base_url}{link['href']}")
def download_pdf(self):
for link in self.links:
response = requests.get(link, stream=True)
if response.status_code == 200:
file_path = os.path.join(self.pdfs_dir, link.split('/')[-1])
with open(file_path, 'wb') as f:
f.write(response.content)
print('Success. Saved on {}'.format(file_path))
else:
# Should handle errors here, by appending them to a list and
# trying again later.
print('Error.')
if __name__ == '__main__':
base_url = 'https://www.snh2021.anpuh.org/'
url = f'{base_url}site/anais'
pdf = Pdf(base_url, url)
pdf.get_links()
pdf.download_pdf()

Webscraping 1000's of links using Python concurrent.futures

I am trying to scrape data from about 1000's of links which have the same content and the same procedure to extract data. To speed up the process I am using the python's concurrent.futures, which I think is the best in terms of speed. When I scrape data from about 30 - 40 links as a trial, it works; but as the number increases it does not. Here is my code:
import re
import json
import requests
import concurrent.futures
import time
links_json = ['https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/485387/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/485256/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/487113/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/486733/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/486937/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/486946/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/485444/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/487258/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/487011/',
'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/487254/']
MAX_THREADS = 30
Data_Source = "RASFF"
Product_Category = []
Date = []
Product_name = []
Reference = []
def scrape(links):
data = requests.get(links).json()
Product_Category.append(data["product"]["productCategory"]["description"])
Date.append(data["ecValidationDate"])
Product_name.append(data["product"]["description"])
Reference.append(data["reference"])
def download_data(links_json):
threads = min(MAX_THREADS, len(links_json))
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
executor.map(scrape, links_json)
def main(new_links):
t0 = time.time()
download_data(new_links)
t1 = time.time()
print(f"{t1-t0} seconds to crawl {len(new_links)} in total.")
main(links_json)
When I try to run the main function, it is very inconsistent. Also right now there are only 12 links to scrape but as the links increase the data that should be extracted in the list also decreases. For instance: if there are about 200 links, there should be 200 values in the Product_category list but there are sometimes 100, 67 etc., meaning it is very inconsistent. I am not sure if I am missing something. I have even tried adding the time.sleep(0.25) in the scrape function but it does not work. I don't know how I can provide a list of 500 - 1000 links here.
Here's an example of how one could do this using the threading module:-
import requests
import threading
Product_Category = []
Date = []
Product_name = []
Reference = []
AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_5_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Safari/605.1.15'
BASEURL = 'https://webgate.ec.europa.eu/rasff-window/backend/public/notification/view/id/'
LOCK = threading.Lock()
headers = {'User-Agent': AGENT}
links = ['485387',
'485256',
'487113',
'486733',
'486937',
'486946',
'485444',
'487258',
'487011',
'487254']
def scrape(session, link):
response = session.get(f'{BASEURL}{link}/', headers=headers)
response.raise_for_status()
json = response.json()
try:
LOCK.acquire()
Product_Category.append(
json["product"]["productCategory"]["description"])
Date.append(json["ecValidationDate"])
Product_name.append(json["product"]["description"])
Reference.append(json["reference"])
finally:
LOCK.release()
def main():
with requests.Session() as session:
ta = []
for link in links:
t = threading.Thread(target=scrape, args=(session, link))
ta.append(t)
t.start()
for t in ta:
t.join()
print(Product_Category)
print(Date)
print(Product_name)
print(Reference)
if __name__ == '__main__':
main()

Python Web Scraping - Is it not possible to scrape this site?

I want to scrape the following website: https://www.globenewswire.com/NewsRoom
My goal is to store the press releases and articles in a database that I utilize later on. I've done this with other news sites too and deleted the code on here for easier readability (100% no influence on the code given to you). My problem is that I can't figure out how to exactly scrape headlines, links and other data since the html-code is structured with unusual attributes.
The following code is how I approached it. Maybe someone has an idea on what mistakes I did in scraping. Gladly appreciate any help.
import requests
import sqlite3
import Keywords
from bs4 import BeautifulSoup
from time import sleep
from random import randint
from datetime import datetime
from datetime import timedelta
# ----- Initializing Database & Notification Service -----
connect = sqlite3.connect('StoredArticles.db')
cursor = connect.cursor()
print("Connection created.")
try:
cursor.execute('''CREATE TABLE articlestable (article_time TEXT, article_title TEXT, article_keyword TEXT,
article_link TEXT, article_description TEXT, article_entry_time DATETIME)''')
cursor.execute('''CREATE UNIQUE INDEX index_article_link ON articlestable(article_link)''')
except:
pass
print("Table ready.")
while True:
class Scrapers:
# ----- Initialize Keywords -----
def __init__(self):
self.article_keyword = None
self.article_title = None
self.article_link = None
self.article_time = None
self.article_time_drop = None
self.article_description = None
self.article_entry_time = None
self.headers = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko)' +
'Version/14.0.1 Safari/605.1.15'
}
def scraping_globenewswire(self, page):
url = 'https://www.globenewswire.com/NewsRoom?page=' + str(page)
r = requests.get(url, headers=self.headers)
soup = BeautifulSoup(r.text, 'html.parser')
articles = soup.select('.main-container > .row')
print("GlobeNewswire - Scraping page " + str(page) + "...")
sleep(randint(0, 1))
for item in articles:
self.article_title = item.select_one('a[data-autid="article-url"]').text.strip()
self.article_time = item.select_one('span[data-autid="article-published-date"]').text.strip()
self.article_link = 'https://www.globenewswire.com' + \
item.select_one('a[data-autid="article-url"]')['href']
self.article_description = item.select_one('span', _class='pagging-list-item-text-body').text.strip()
self.article_entry_time = datetime.now()
cursor.execute('''INSERT OR IGNORE INTO articlestable VALUES(?,?,?,?,?,?)''',
(self.article_time, self.article_title, self.article_keyword, self.article_link,
self.article_description, self.article_entry_time))
print(self.article_title)
return
# ----- End of Loops -----
scraper = Scrapers()
# ----- Range of Pages to scrape through -----
for x in range(1, 3):
scraper.scraping_globenewswire(x)
# ----- Add to Database -----
connect.commit()
print("Process done. Starting to sleep again. Time: " + str(datetime.now()))
sleep(randint(5, 12))
I extracted all the headlines of page=1 from the given URL.
The headlines are present inside an <a> with the attribue data-autid equals to article-url
Select all the <a> with the above attributes using findAll().
Iterate over all the selected <a> above and extract the headlines i.e, text
You can extend this and extract whatever data you need with this approach.
This code will print all the headlines of page=1 from the given URL.
import requests
import bs4 as bs
url = 'https://www.globenewswire.com/NewsRoom'
resp = requests.get(url)
soup = bs.BeautifulSoup(resp.text, 'lxml')
headlines = soup.findAll('a', attrs={'data-autid': 'article-url'})
for i in headlines:
print(i.text, end="\n")

How to web scrape a list of URLs of a website with multiprocessing when I login using Python

First of all I am a beginner with Python. Now I am trying to create a script that does the following
login to a website using Selenium
load a list of the website's URLs from a CSV file
web scrape data using multiprocessing method
I am using the following script
#Load URLS from CSV
def mycontents():
contents = []
with open('global_csv.csv', 'r') as csvf:
reader = csv.reader(csvf, delimiter=";")
for row in reader:
contents.append(row[1]) # Add each url to list contents
return contents
# parse a single item to get information
def parse(url):
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}
r = requests.get(url, headers, timeout=10)
sleep(3)
info = []
availability_text = '-'
price_text = '-'
if r.status_code == 200:
print('Processing..'+ url)
html = r.text
soup = BeautifulSoup(html, 'html.parser')
time.sleep(4)
price = soup.select(".price")
if price is not None:
price_text = price.text.strip()
print(price_text)
else:
price_text = "0,00"
print(price_text)
availability = soup.find('span', attrs={'class':'wholesale-availability'})
if availability is not None:
availability_text = availability.text.strip()
print(availability_text)
else:
availability_text = "Not Available"
print(availability_text)
info.append(price_text)
info.append(availability_text)
return ';'.join(info)
web_links = None
web_links = mycontents()
#Insert First Row
fields=['SKU','price','availability']
with open('output_global.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(fields)
if __name__ == "__main__":
#Load Webdriver
browser = webdriver.Chrome('C:\\chromedriver.exe')
browser.get('TheLoginPage')
#Find Username Field
username = browser.find_element_by_id('email')
username.send_keys('myusername')
#Find Password Field
password = browser.find_element_by_id('pass')
time.sleep(2)
password.send_keys('mypassword')
#Find Connect Button
sing_in = browser.find_element_by_xpath('//*[#id="send2"]')
sing_in.click()
#Start MultiProcess
with Pool(4) as p:
records = p.map(parse, web_links)
if len(records) > 0:
with open('output_global.csv', 'a') as f:
f.write('\n'.join(records))
When I run the script is not getting anything and in Command Window it is just shows the URLs, which makes me think that even if I connect successfully the sessions are different?!
I tried to save the session by putting it inside parse method or
if __name__ == "__main__":
I tried to connect to the browser the same session but I get errors like
You have not defined a session
TypeError: get() takes 2 positional arguments but 3 were given
local variable 'session' referenced before assignment
How can I practically login to the website and use multiprocessing to web scrape the URLs I need?

Python - Download Images from google Image search?

I want to download all Images of google image search using python . The code I am using seems to have some problem some times .My code is
import os
import sys
import time
from urllib import FancyURLopener
import urllib2
import simplejson
# Define search term
searchTerm = "parrot"
# Replace spaces ' ' in search term for '%20' in order to comply with request
searchTerm = searchTerm.replace(' ','%20')
# Start FancyURLopener with defined version
class MyOpener(FancyURLopener):
version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'
myopener = MyOpener()
# Set count to 0
count= 0
for i in range(0,10):
# Notice that the start changes for each iteration in order to request a new set of images for each loop
url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0& q='+searchTerm+'&start='+str(i*10)+'&userip=MyIP')
print url
request = urllib2.Request(url, None, {'Referer': 'testing'})
response = urllib2.urlopen(request)
# Get results using JSON
results = simplejson.load(response)
data = results['responseData']
dataInfo = data['results']
# Iterate for each result and get unescaped url
for myUrl in dataInfo:
count = count + 1
my_url = myUrl['unescapedUrl']
myopener.retrieve(myUrl['unescapedUrl'],str(count)+'.jpg')
After downloading few pages I am getting an error as follows:
Traceback (most recent call last):
File "C:\Python27\img_google3.py", line 37, in <module>
dataInfo = data['results']
TypeError: 'NoneType' object has no attribute '__getitem__'
What to do ??????
I have modified my code. Now the code can download 100 images for a given query, and images are full high resolution that is original images are being downloaded.
I am downloading the images using urllib2 & Beautiful soup
from bs4 import BeautifulSoup
import requests
import re
import urllib2
import os
import cookielib
import json
def get_soup(url,header):
return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser')
query = raw_input("query image")# you can change the query for the image here
image_type="ActiOn"
query= query.split()
query='+'.join(query)
url="https://www.google.co.in/search?q="+query+"&source=lnms&tbm=isch"
print url
#add the directory for your image here
DIR="Pictures"
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
soup = get_soup(url,header)
ActualImages=[]# contains the link for Large original images, type of image
for a in soup.find_all("div",{"class":"rg_meta"}):
link , Type =json.loads(a.text)["ou"] ,json.loads(a.text)["ity"]
ActualImages.append((link,Type))
print "there are total" , len(ActualImages),"images"
if not os.path.exists(DIR):
os.mkdir(DIR)
DIR = os.path.join(DIR, query.split()[0])
if not os.path.exists(DIR):
os.mkdir(DIR)
###print images
for i , (img , Type) in enumerate( ActualImages):
try:
req = urllib2.Request(img, headers={'User-Agent' : header})
raw_img = urllib2.urlopen(req).read()
cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1
print cntr
if len(Type)==0:
f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+".jpg"), 'wb')
else :
f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+"."+Type), 'wb')
f.write(raw_img)
f.close()
except Exception as e:
print "could not load : "+img
print e
i hope this helps you
The Google Image Search API is deprecated, you need to use the Google Custom Search for what you want to achieve. To fetch the images you need to do this:
import urllib2
import simplejson
import cStringIO
fetcher = urllib2.build_opener()
searchTerm = 'parrot'
startIndex = 0
searchUrl = "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + searchTerm + "&start=" + startIndex
f = fetcher.open(searchUrl)
deserialized_output = simplejson.load(f)
This will give you 4 results, as JSON, you need to iteratively get the results by incrementing the startIndex in the API request.
To get the images you need to use a library like cStringIO.
For example, to access the first image, you need to do this:
imageUrl = deserialized_output['responseData']['results'][0]['unescapedUrl']
file = cStringIO.StringIO(urllib.urlopen(imageUrl).read())
img = Image.open(file)
Google deprecated their API, scraping Google is complicated, so I would suggest using Bing API instead to automatically download images. The pip package bing-image-downloader allows you to easily download an arbitrary number of images to a directory with a single line of code.
from bing_image_downloader import downloader
downloader.download(query_string, limit=100, output_dir='dataset', adult_filter_off=True, force_replace=False, timeout=60, verbose=True)
Google is not so good, and Microsoft is not so evil
Here's my latest google image snarfer, written in Python, using Selenium and headless Chrome.
It requires python-selenium, the chromium-driver, and a module called retry from pip.
Link: http://sam.aiki.info/b/google-images.py
Example Usage:
google-images.py tiger 10 --opts isz:lt,islt:svga,itp:photo > urls.txt
parallel=5
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
(i=0; while read url; do wget -e robots=off -T10 --tries 10 -U"$user_agent" "$url" -O`printf %04d $i`.jpg & i=$(($i+1)) ; [ $(($i % $parallel)) = 0 ] && wait; done < urls.txt; wait)
Help Usage:
$ google-images.py --help
usage: google-images.py [-h] [--safe SAFE] [--opts OPTS] query n
Fetch image URLs from Google Image Search.
positional arguments:
query image search query
n number of images (approx)
optional arguments:
-h, --help show this help message and exit
--safe SAFE safe search [off|active|images]
--opts OPTS search options, e.g.
isz:lt,islt:svga,itp:photo,ic:color,ift:jpg
Code:
#!/usr/bin/env python3
# requires: selenium, chromium-driver, retry
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import selenium.common.exceptions as sel_ex
import sys
import time
import urllib.parse
from retry import retry
import argparse
import logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger()
retry_logger = None
css_thumbnail = "img.Q4LuWd"
css_large = "img.n3VNCb"
css_load_more = ".mye4qd"
selenium_exceptions = (sel_ex.ElementClickInterceptedException, sel_ex.ElementNotInteractableException, sel_ex.StaleElementReferenceException)
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
#retry(exceptions=KeyError, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def get_thumbnails(wd, want_more_than=0):
wd.execute_script("document.querySelector('{}').click();".format(css_load_more))
thumbnails = wd.find_elements_by_css_selector(css_thumbnail)
n_results = len(thumbnails)
if n_results <= want_more_than:
raise KeyError("no new thumbnails")
return thumbnails
#retry(exceptions=KeyError, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def get_image_src(wd):
actual_images = wd.find_elements_by_css_selector(css_large)
sources = []
for img in actual_images:
src = img.get_attribute("src")
if src.startswith("http") and not src.startswith("https://encrypted-tbn0.gstatic.com/"):
sources.append(src)
if not len(sources):
raise KeyError("no large image")
return sources
#retry(exceptions=selenium_exceptions, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def retry_click(el):
el.click()
def get_images(wd, start=0, n=20, out=None):
thumbnails = []
count = len(thumbnails)
while count < n:
scroll_to_end(wd)
try:
thumbnails = get_thumbnails(wd, want_more_than=count)
except KeyError as e:
logger.warning("cannot load enough thumbnails")
break
count = len(thumbnails)
sources = []
for tn in thumbnails:
try:
retry_click(tn)
except selenium_exceptions as e:
logger.warning("main image click failed")
continue
sources1 = []
try:
sources1 = get_image_src(wd)
except KeyError as e:
pass
# logger.warning("main image not found")
if not sources1:
tn_src = tn.get_attribute("src")
if not tn_src.startswith("data"):
logger.warning("no src found for main image, using thumbnail")
sources1 = [tn_src]
else:
logger.warning("no src found for main image, thumbnail is a data URL")
for src in sources1:
if not src in sources:
sources.append(src)
if out:
print(src, file=out)
out.flush()
if len(sources) >= n:
break
return sources
def google_image_search(wd, query, safe="off", n=20, opts='', out=None):
search_url_t = "https://www.google.com/search?safe={safe}&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img&tbs={opts}"
search_url = search_url_t.format(q=urllib.parse.quote(query), opts=urllib.parse.quote(opts), safe=safe)
wd.get(search_url)
sources = get_images(wd, n=n, out=out)
return sources
def main():
parser = argparse.ArgumentParser(description='Fetch image URLs from Google Image Search.')
parser.add_argument('--safe', type=str, default="off", help='safe search [off|active|images]')
parser.add_argument('--opts', type=str, default="", help='search options, e.g. isz:lt,islt:svga,itp:photo,ic:color,ift:jpg')
parser.add_argument('query', type=str, help='image search query')
parser.add_argument('n', type=int, default=20, help='number of images (approx)')
args = parser.parse_args()
opts = Options()
opts.add_argument("--headless")
# opts.add_argument("--blink-settings=imagesEnabled=false")
with webdriver.Chrome(options=opts) as wd:
sources = google_image_search(wd, args.query, safe=args.safe, n=args.n, opts=args.opts, out=sys.stdout)
main()
Haven't looked into your code but this is an example solution made with selenium to try to get 400 pictures from the search term
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib2
searchterm = 'vannmelon' # will also be the name of the folder
url = "https://www.google.co.in/search?q="+searchterm+"&source=lnms&tbm=isch"
browser = webdriver.Firefox()
browser.get(url)
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
counter = 0
succounter = 0
if not os.path.exists(searchterm):
os.mkdir(searchterm)
for _ in range(500):
browser.execute_script("window.scrollBy(0,10000)")
for x in browser.find_elements_by_xpath("//div[#class='rg_meta']"):
counter = counter + 1
print "Total Count:", counter
print "Succsessful Count:", succounter
print "URL:",json.loads(x.get_attribute('innerHTML'))["ou"]
img = json.loads(x.get_attribute('innerHTML'))["ou"]
imgtype = json.loads(x.get_attribute('innerHTML'))["ity"]
try:
req = urllib2.Request(img, headers={'User-Agent': header})
raw_img = urllib2.urlopen(req).read()
File = open(os.path.join(searchterm , searchterm + "_" + str(counter) + "." + imgtype), "wb")
File.write(raw_img)
File.close()
succounter = succounter + 1
except:
print "can't get img"
print succounter, "pictures succesfully downloaded"
browser.close()
Adding to Piees's answer, for downloading any number of images from the search results, we need to simulate a click on 'Show more results' button after first 400 results are loaded.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import json
import urllib2
import sys
import time
# adding path to geckodriver to the OS environment variable
# assuming that it is stored at the same path as this script
os.environ["PATH"] += os.pathsep + os.getcwd()
download_path = "dataset/"
def main():
searchtext = sys.argv[1] # the search query
num_requested = int(sys.argv[2]) # number of images to download
number_of_scrolls = num_requested / 400 + 1
# number_of_scrolls * 400 images will be opened in the browser
if not os.path.exists(download_path + searchtext.replace(" ", "_")):
os.makedirs(download_path + searchtext.replace(" ", "_"))
url = "https://www.google.co.in/search?q="+searchtext+"&source=lnms&tbm=isch"
driver = webdriver.Firefox()
driver.get(url)
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
extensions = {"jpg", "jpeg", "png", "gif"}
img_count = 0
downloaded_img_count = 0
for _ in xrange(number_of_scrolls):
for __ in xrange(10):
# multiple scrolls needed to show all 400 images
driver.execute_script("window.scrollBy(0, 1000000)")
time.sleep(0.2)
# to load next 400 images
time.sleep(0.5)
try:
driver.find_element_by_xpath("//input[#value='Show more results']").click()
except Exception as e:
print "Less images found:", e
break
# imges = driver.find_elements_by_xpath('//div[#class="rg_meta"]') # not working anymore
imges = driver.find_elements_by_xpath('//div[contains(#class,"rg_meta")]')
print "Total images:", len(imges), "\n"
for img in imges:
img_count += 1
img_url = json.loads(img.get_attribute('innerHTML'))["ou"]
img_type = json.loads(img.get_attribute('innerHTML'))["ity"]
print "Downloading image", img_count, ": ", img_url
try:
if img_type not in extensions:
img_type = "jpg"
req = urllib2.Request(img_url, headers=headers)
raw_img = urllib2.urlopen(req).read()
f = open(download_path+searchtext.replace(" ", "_")+"/"+str(downloaded_img_count)+"."+img_type, "wb")
f.write(raw_img)
f.close
downloaded_img_count += 1
except Exception as e:
print "Download failed:", e
finally:
print
if downloaded_img_count >= num_requested:
break
print "Total downloaded: ", downloaded_img_count, "/", img_count
driver.quit()
if __name__ == "__main__":
main()
Full code is here.
This worked for me in Windows 10, Python 3.9.7:
pip install bing-image-downloader
Below code downloads 10 images of India from Bing search Engine to desired output folder:
from bing_image_downloader import downloader
downloader.download('India', limit=10, output_dir='dataset', adult_filter_off=True, force_replace=False, timeout=60, verbose=True)
Documentation: https://pypi.org/project/bing-image-downloader/
You can also use Selenium with Python. Here is how:
from selenium import webdriver
import urllib
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import urllib.request
driver = webdriver.Firefox()
word="apple"
url="http://images.google.com/search?q="+word+"&tbm=isch&sout=1"
driver.get(url)
imageXpathSelector='/html/body/div[2]/c-wiz/div[3]/div[1]/div/div/div/div/div[1]/div[1]/span/div[1]/div[1]/div[1]/a[1]/div[1]/img'
img=driver.find_element(By.XPATH,imageXpathSelector)
src=(img.get_attribute('src'))
urllib.request.urlretrieve(src, word+".jpg")
driver.close()
(This code works on Python 3.8)
Please be informed that you should install the Selenium package with 'pip install selenium'
Contrary to the other web scraping techniques, Selenium opens the browser and downloads the items because Selenium's mission is testing rather than scraping.
N.B. For imageXpathSelector if it does not work please click F12 while your browser is open and right-click the image then click the 'copy' menu from the opened menu and select 'copy Xpath' there. It will be the right Xpath location of the element you need.
This one as other code snippets have grown old and no longer worked for me. Downloads 100 images for each keyword, inspired from one of the solutions above.
from bs4 import BeautifulSoup
import urllib2
import os
class GoogleeImageDownloader(object):
_URL = "https://www.google.co.in/search?q={}&source=lnms&tbm=isch"
_BASE_DIR = 'GoogleImages'
_HEADERS = {
'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
def __init__(self):
query = raw_input("Enter keyword to search images\n")
self.dir_name = os.path.join(self._BASE_DIR, query.split()[0])
self.url = self._URL.format(urllib2.quote(query))
self.make_dir_for_downloads()
self.initiate_downloads()
def make_dir_for_downloads(self):
print "Creating necessary directories"
if not os.path.exists(self._BASE_DIR):
os.mkdir(self._BASE_DIR)
if not os.path.exists(self.dir_name):
os.mkdir(self.dir_name)
def initiate_downloads(self):
src_list = []
soup = BeautifulSoup(urllib2.urlopen(urllib2.Request(self.url,headers=self._HEADERS)),'html.parser')
for img in soup.find_all('img'):
if img.has_attr("data-src"):
src_list.append(img['data-src'])
print "{} of images collected for downloads".format(len(src_list))
self.save_images(src_list)
def save_images(self, src_list):
print "Saving Images..."
for i , src in enumerate(src_list):
try:
req = urllib2.Request(src, headers=self._HEADERS)
raw_img = urllib2.urlopen(req).read()
with open(os.path.join(self.dir_name , str(i)+".jpg"), 'wb') as f:
f.write(raw_img)
except Exception as e:
print ("could not save image")
raise e
if __name__ == "__main__":
GoogleeImageDownloader()
I know this question is old, but I ran across it recently and none of the previous answers work anymore. So I wrote this script to gather images from google. As of right now it can download as many images as are available.
here is a github link to it as well https://github.com/CumminUp07/imengine/blob/master/get_google_images.py
DISCLAIMER: DUE TO COPYRIGHT ISSUES, IMAGES GATHERED SHOULD ONLY BE USED FOR RESEARCH AND EDUCATION PURPOSES ONLY
from bs4 import BeautifulSoup as Soup
import urllib2
import json
import urllib
#programtically go through google image ajax json return and save links to list#
#num_images is more of a suggestion #
#it will get the ceiling of the nearest 100 if available #
def get_links(query_string, num_images):
#initialize place for links
links = []
#step by 100 because each return gives up to 100 links
for i in range(0,num_images,100):
url = 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q='+query_string+'\
&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start='+str(i)+'\
&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s'
#set user agent to avoid 403 error
request = urllib2.Request(url, None, {'User-Agent': 'Mozilla/5.0'})
#returns json formatted string of the html
json_string = urllib2.urlopen(request).read()
#parse as json
page = json.loads(json_string)
#html found here
html = page[1][1]
#use BeautifulSoup to parse as html
new_soup = Soup(html,'lxml')
#all img tags, only returns results of search
imgs = new_soup.find_all('img')
#loop through images and put src in links list
for j in range(len(imgs)):
links.append(imgs[j]["src"])
return links
#download images #
#takes list of links, directory to save to #
#and prefix for file names #
#saves images in directory as a one up number #
#with prefix added #
#all images will be .jpg #
def get_images(links,directory,pre):
for i in range(len(links)):
urllib.urlretrieve(links[i], "./"+directory+"/"+str(pre)+str(i)+".jpg")
#main function to search images #
#takes two lists, base term and secondary terms #
#also takes number of images to download per #
#combination #
#it runs every combination of search terms #
#with base term first then secondary #
def search_images(base,terms,num_images):
for y in range(len(base)):
for x in range(len(terms)):
all_links = get_links(base[y]+'+'+terms[x],num_images)
get_images(all_links,"images",x)
if __name__ == '__main__':
terms = ["cars","numbers","scenery","people","dogs","cats","animals"]
base = ["animated"]
search_images(base,terms,1000)
Instead of google image search, try other image searches like ecosia or bing.
Here is a sample code for retrieving images from ecosia search engine.
from bs4 import BeautifulSoup
import requests
import urllib
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent':user_agent}
urls = ["https://www.ecosia.org/images?q=india%20pan%20card%20example"]
#The url's from which the image is to be extracted.
index = 0
for url in urls:
request = urllib.request.Request(url,None,headers) #The assembled request
response = urllib.request.urlopen(request)
data = response.read() # Read the html result page
soup = BeautifulSoup(data, 'html.parser')
for link in soup.find_all('img'):
#The images are enclosed in 'img' tag and the 'src' contains the url of the image.
img_url = link.get('src')
dest = str(index) + ".jpg" #Destination to store the image.
try:
urllib.request.urlretrieve(img_url)
index += 1
except:
continue
The code works with google image search but it fails to retrieve images because google stores the images in encrypted format which is difficult to retrieve from the image url.
The solutions works as on 1-Feb-2021.
Okay, so instead of coding this from you I am going to tell you what you're doing wrong and it might lead you in the right direction. Usually most modern websites render html dynamically via javascript and so if you simply send a GET request(with urllib/CURL/fetch/axios) you wont get what you usually see in the browser going to the same URL/web address. What you need is something that renders the javascript code to create the same HTML/webpage you see on your browser, you can use something like selenium gecko driver for firefox to do this and there python modules out there that let you do this.
I hope this helps, if you still feel lost here's a simple script i wrote a while back to extract something similar from your google photos
from selenium import webdriver
import re
url="https://photos.app.goo.gl/xxxxxxx"
driver = webdriver.Firefox()
driver.get(url)
regPrms="^background-image\:url\(.*\)$"
regPrms="^The.*Spain$"
html = driver.page_source
urls=re.findall("(?P<url>https?://[^\s\"$]+)", html)
fin=[]
for url in urls:
if "video-downloads" in url:
fin.append(url)
print("The Following ZIP contains all your pictures")
for url in fin:
print("-------------------")
print(url)
You can achieve this using selenium as others mentioned it above.
Alternatively, you can try using Google Images API from SerpApi. Check out the playground.
Code and example. Fuction to download images was taken from this answer:
import os, time, shutil, httpx, asyncio
from urllib.parse import urlparse
from serpapi import GoogleSearch
# https://stackoverflow.com/a/39217788/1291371
async def download_file(url):
print(f'Downloading {url}')
# https://stackoverflow.com/a/18727481/1291371
parsed_url = urlparse(url)
local_filename = os.path.basename(parsed_url.path)
os.makedirs('images', exist_ok=True)
async with httpx.AsyncClient() as client:
async with client.stream('GET', url) as response:
async with open(f'images/{local_filename}', 'wb') as f:
await asyncio.to_thread(shutil.copyfileobj, response.raw, f)
return local_filename
async def main():
start = time.perf_counter()
params = {
"engine": "google",
"ijn": "0",
"q": "lasagna",
"tbm": "isch",
"api_key": os.getenv("API_KEY"),
}
search = GoogleSearch(params)
results = search.get_dict()
download_files_tasks = [
download_file(image['original']) for image in results['images_results']
]
await asyncio.gather(*download_files_tasks, return_exceptions=True)
print(
f"Downloaded {len(download_files_tasks)} images in {time.perf_counter() - start:0.4f} seconds")
asyncio.run(main())
Disclaimer, I work for SerpApi.
The one I used is :
https://github.com/hellock/icrawler
This package is a mini framework of web crawlers. With modularization design, it is easy to use and extend. It supports media data like images and videos very well, and can also be applied to texts and another type of files. Scrapy is heavy and powerful, while icrawler is tiny and flexible.
def main():
parser = ArgumentParser(description='Test built-in crawlers')
parser.add_argument(
'--crawler',
nargs='+',
default=['google', 'bing', 'baidu', 'flickr', 'greedy', 'urllist'],
help='which crawlers to test')
args = parser.parse_args()
for crawler in args.crawler:
eval('test_{}()'.format(crawler))
print('\n')

Categories

Resources