With the following code, every image is saved two times. How can I skip the image that is already saved?
import urllib.request
from bs4 import BeautifulSoup
def make_soup(url):
thepage = urllib.request.urlopen(url)
soupdata = BeautifulSoup(thepage, "html.parser")
return soupdata
i = 1
soup = make_soup("https://www./")
for img in soup.findAll('img'):
temp = img.get('src')
image = temp
if str(image):
filename = str(i)
i = i + 1
imagefile = open(filename + '.png', 'wb')
imagefile.write(urllib.request.urlopen(image).read())
imagefile.close()
You can use a set structure to filter out the duplicates like so:
unique_srcs = list(set([img.get('src') for img in soup.findAll('img')]))
for img_src in unique_srcs:
filename = str(i)
i = i + 1
imagefile = open(filename + '.png', 'wb')
imagefile.write(urllib.request.urlopen(img_src).read())
imagefile.close()
Now, bear in mind that this might change the order of the files. If you can't afford to change the order, you can achieve the same by looping through the soup.findAll() list and checking whether each element's src is not in an another unique_list, then appending that element to that unique_list of srcs and then looping over it like I did.
EDIT:
To keep the order, use this code instead of the list comprehension for unique_srcs array.
unique_srcs = []
for img in soup.findAll('img'):
if img.get('src') not in unique_srcs:
unique_srcs.append(img.get('src'))
Your first element would then be unique_srcs[0] (logo in your case).
Related
I have some code that allows for the downloading of various comics off of xkcd. This code is gathered from Al Sweigart's book: Automate The Boring Stuff With Python with some minor edits made by me.
I understand most of what is going on. What's confusing id that the 'soup' BeautifulSoup object is made from a request named 'r' continues to get information from the page that can be used throughout the code even though 'r' is re-instantiated in the function 'download_image()'.
Even more confusing is that if the 'r' found in 'download_image()' is renamed to something other than 'r', the code will break.
Code:
import requests
import os
import bs4
os.makedirs('xkcd', exist_ok=True)
page = input('What issue of xkcd would you like to download? (*all for all comics, *today for today\'s comic): ')
url = 'http://xkcd.com/'
def download_image():
comic_url = 'http:' + comic[0].get('src') # page with just the image
r = requests.get(comic_url) # switches to that page
# gets file with directory xkcd/name of comic
try:
issue_number = str(int(str(soup.select('a[rel="prev"]')[0].get('href'))[1:-1]) + 1)
except ValueError:
issue_number = '1'
name = os.path.basename(comic_url[:-4] + "_" + issue_number + ".png")
file = open(os.path.join('xkcd', name), 'wb')
print("Downloading image %s... " % name)
# writes to file
for chunk in r.iter_content(100000):
file.write(chunk)
file.close()
if page == '*all':
url = 'http://xkcd.com/5'
while not url.endswith('#'):
r = requests.get(url)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
comic = soup.select('#comic img')
download_image()
prev_link = soup.select('a[rel="prev"]')[0]
url = 'http://xkcd.com/' + prev_link.get('href')
else:
if page == '*today':
page = ''
r = requests.get(url + page)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
comic = soup.select('#comic img')
if not comic:
print("Comic not found.")
else:
download_image()
"""
r = requests.get('https://imgs.xkcd.com/comics/python.png')
# makes file and write the file in bytes to it
with open('comic.png', 'wb') as f:
f.write(r.content)
"""
Does anyone know why the soup variable continues to work after re-defining the r variable?
My current code is cutting first 6 characters from file names while downloading PDF's. So for example PDF file name is 123456acII.pdf (https://example.com/wp-content/uploads/2016/11/123456acII.pdf) but file in folder is acII.pdf.
How to make names be as they are?
import os
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
main = "https://example.com/"
#If there is no such folder, the script will create one automatically
folder_location = r'C:\temp\webscraping'
if not os.path.exists(folder_location):os.mkdir(folder_location)
def Get_Links():
r = requests.get(main).text
soup = BeautifulSoup(r, 'html.parser')
links = []
for item in soup.findAll("div", {'class': 'large-4 medium-4 columns'}):
for n in item.find_all('a'):
print ('Link: '+ n.get('href'))
links.append(n.get('href'))
return links
def Parse_Links():
pdf = set()
for url in Get_Links():
r = requests.get(url).text
soup = BeautifulSoup(r, 'html.parser')
for item in soup.findAll("div", {'class': 'large-6 medium-8 columns large-centered'}):
for link in item.findAll("a"):
link = link.get("href")
if link:
pdf.add(link)
return pdf
def Save():
for item in Parse_Links():
print(f"Downloading File: {item[55:]}")
filename = os.path.join(folder_location,f"{item[55:]}")
r = requests.get(item)
with open(filename, 'wb') as f:
f.write(r.content)
print("done")
Save()
It looks like you are slicing the string starting at index position 55 {item[55:]}. Try to see if it's simply just starting your index position 6 positions prior:
change to: {item[49:]}
I am trying to create a python webscraper that downloads a certain amount of images from a url, to my current directory. However for the following code:
urllib.request.urlretrieve(each, filename)
It is saying that: AttributeError: 'function' object has no attribute 'urlretrieve' when running the program
Here is the full code:
from urllib.request import urlopen
from bs4 import BeautifulSoup as soup
url = 'https://unsplash.com/s/photos/download'
def download_imgs(url, amountOfImgs):
html = urlopen(url).read()
#parsing the html from the url
page_soup = soup(html, "html.parser")
images = [img for img in page_soup.findAll('img')]
counter = 0
#compiling the unicode list of image links
image_links = [each.get('src') for each in images]
for each in image_links:
if(counter <= amountOfImgs):
filename = each.split('/')[-1]
urllib.request.urlretrieve(each, filename)
counter += 1
else:
return image_links
print(download_imgs(url, 5))
It looks like when you imported just URLOpen, you missed everything else.
I did it a bit differently, I got the html using the requests.get method, and removed the need for url open, you could just do
import urlopen, urlretrieve
if you want to use mine, I know it worked,
import urllib.request
from bs4 import BeautifulSoup as soup
import requests
url = 'https://unsplash.com/s/photos/download'
def download_imgs(url, amountOfImgs):
req=requests.get(url)
html=req.text
#parsing the html from the url
page_soup = soup(html, "html.parser")
images = [img for img in page_soup.findAll('img')]
counter = 0
#compiling the unicode list of image links
image_links = [each.get('src') for each in images]
for each in image_links:
if(counter <= amountOfImgs):
filename = each.split('/')[-1]
urllib.request.urlretrieve(each, filename)
counter += 1
else:
return image_links
print(download_imgs(url, 5))
My error:
File "C:/Users/hp dv4/PycharmProjects/project/imagescrap.py", line
22, in
imagefile.write(urllib.request.urlopen(img_src).read())
ValueError: unknown url type: '/img/logo_with_text.png'
I am getting this error while crawling through the specified website whereas, this same code works fine with some other website.
import urllib.request
from bs4 import BeautifulSoup
def make_soup(url):
thepage = urllib.request.urlopen(url)
soupdata = BeautifulSoup(thepage, "html.parser")
return soupdata
i = 1
soup = make_soup("http://ioe.edu.np/")
unique_srcs = []
for img in soup.findAll('img'):
if img.get('src') not in unique_srcs:
unique_srcs.append(img.get('src'))
for img_src in unique_srcs:
filename = str(i)
i = i + 1
imagefile = open(filename + '.png', 'wb')
imagefile.write(urllib.request.urlopen(img_src).read())
imagefile.close()
the above code will encounter one more error.
you are trying to save every file with .png extension, which may make the files unreadable.
import urllib.request
from bs4 import BeautifulSoup
def make_soup(url):
thepage = urllib.request.urlopen(url)
soupdata = BeautifulSoup(thepage, "html.parser")
return soupdata
base_url = "http://ioe.edu.np/"
soup = make_soup(base_url)
unique_srcs = []
for img in soup.findAll('img'):
if img.get('src') not in unique_srcs:
unique_srcs.append(img.get('src'))
for i, img_src in enumerate(unique_srcs):
print(img_src)
filename = str(i)
extension = img_src.split('.')[-1]
with open(filename+'.'+extension, 'wb') as f:
f.write(urllib.request.urlopen(base_url+img_src).read())
few idiomatic python suggestions:
use enumerate instead of trying to manage a counter.
use the with-open construct which takes care of closing your file.
one other thing you could do to further improve:
use a set instead of a list, so that you don't download the same file twice.
As the error messages says:
unknown url type: '/img/logo_with_text.png'
add http://ioe.edu.np/ in front of img_src and it should work
I want to save images from url to special folder, for example 'my_images', but not to default(where my *.py file is). Is it possible to make it?
Because my code saves all images to folder with *.py file.
Here is my code:
import urllib.request
from bs4 import BeautifulSoup
import re
import os
BASE_URL = 'https://fachowiec.com/sklep/pl/products/index?Products_page=1&pageSize=15'
def get_domain(url):
domain = re.findall(r'https:\W\W\w+\.\w+', url)
return domain[0]
def get_html(url):
request = urllib.request.urlopen(url)
return request.read()
def get_img(html):
soup = BeautifulSoup(html)
img_box = []
imgs = soup.find_all('div', class_= 'pthumb')
for img in imgs:
img_box.append(get_domain(BASE_URL) + img.img['src'])
for img in img_box:
urllib.request.urlretrieve(img, os.path.basename(img))
def main():
get_img(get_html('https://fachowiec.com/sklep/pl/products/index?Products_page=1&pageSize=15'))
if __name__ == '__main__':
main()
def get_img(html):
soup = BeautifulSoup(html)
img_box = []
imgs = soup.find_all('div', class_= 'pthumb')
for img in imgs:
img_box.append(get_domain(BASE_URL) + img.img['src'])
my_path = '/home/<username>/Desktop' # use whatever path you like
for img in img_box:
urllib.request.urlretrieve(img, os.path.join(my_path, os.path.basename(img)))
You should add the pathname in second parameter of urllib.request.urlretrieve. Something like below:
urllib.request.urlretrieve(img, "PATH"+os.path.basename(img))
The second argument, if present, specifies the file location to copy to (if absent, the location will be a tempfile with a generated name).