Trying to download .pdf .png anf .jpg from nytimes.com - python

I wrote a simple Python scraper to grab some documents from a specific page on nytimes.com. It works, in the sense that it grabs, and formats all the URL's correctly, and attempts to download the files, and formats the name correctly.
But all I get are 1kb files. I can't figure out why. Here is my code:
import urllib2
import urllib
from cookielib import CookieJar
files = 'http://www.nytimes.com/interactive/2014/11/25/us/evidence-released-in-michael-brown-case.html?_r=0'
slashpos = 0
def getLinks(url):
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
p = opener.open(url)
result = []
for line in p:
for element in line.split():
if element.startswith('href="http://gr'):
if element.endswith('pdf"') or element.endswith('png"') or element.endswith('jpg"'):
result.append(element[6:])
else:
continue
for char in result:
slashpos = char.rfind('/') + 1
urllib.urlretrieve(char, char[slashpos:-1])
getLinks(files)
Any and all help is appreciated. Thanks!

1) use result.append(element[6:-1]) instead of result.append(element[6:]) (Avoids having double quotes in the url, the reason why downloads fail)
2) and for saving the file use urllib.urlretrieve(char, char[slashpos:]) instead of urllib.urlretrieve(char, char[slashpos:-1])

Solved !! :D
#!/usr/bin/env python
from bs4 import BeautifulSoup
import urllib2
import urlparse
from sys import argv
from cookielib import CookieJar
if len(argv) != 2:
print "Usage:\n\tpython %s 'http://www.nytimes.com/interactive/2014/11/25/us/evidence-released-in-michael-brown-case.html?_r=0'"%argv[0]
exit()
url = argv[1]
urls =[]
try:
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
html = opener.open(url)
except:
print "[-] No such website"
exit()
soup = BeautifulSoup(html)
for tag in soup.find_all('a'):
try:
tag["href"] = urlparse.urljoin(url, tag['href'])
if tag['href'] not in urls and '.png' in tag['href'] or '.jpg' in tag['href']:
newpdf = tag['href'].split("/")
name = newpdf[-1]
resp = urllib2.urlopen(tag['href'])
meta_data = resp.info()
fsize = int(meta_data.getheaders("Content-Length")[0])
print "Downloading --> %s \t size: %s "%(name, fsize)
f = open(name, "wb")
f.write(resp.read())
f.close
urls.append(tag["href"])
else:
print tag['href']
except KeyboardInterrupt:
print " User hit CTRL+C"
exit()
except:
pass
Hopefully it would be helpful for you

Related

How to fix HTTP Error 403: Forbidden in webscraping

I ran the codes below. Most of the codes work, but when I ran the "for elm in collect" block, I got an error: HTTPError: HTTP Error 403: Forbidden. Can anyone help with this? Thanks!!
import requests
from bs4 import BeautifulSoup
import urllib.request
import os
resp = requests.get('https://www.williams.edu/institutional-research/common-data-set/',
headers={'User-Agent': 'Mozilla/5.0'})
soup = BeautifulSoup(resp.text, 'html5lib')
links = [a['href'] for a in soup.select('li a[href]')]
collect = []
for link in links:
if "https://www.williams.edu/institutional-research/files/" in link:
collect.append(link)
for elm in collect:
def main():
download_file(elm) # the elm is an url.
def download_file(download_url): # the download_url is the elm.
save_path = 'C:/Users/WM'
file_name = elm.split("/")[-1]
complete_name = os.path.join(save_path, file_name)
response = urllib.request.urlopen(download_url)
file = open(complete_name, 'wb')
file.write(response.read())
file.close()
print("Completed")
if __name__ == "__main__":
main()
Not sure why there is a mixed use of requests and urllib in your code - Just request the download_url in loop as you do with initial url and add some header:
response = requests.get(download_url, headers={'User-Agent': 'Mozilla/5.0'})
Example
import requests
from bs4 import BeautifulSoup
import os
resp = requests.get('https://www.williams.edu/institutional-research/common-data-set/',
headers={'User-Agent': 'Mozilla/5.0'})
soup = BeautifulSoup(resp.text, 'html5lib')
links = [a['href'] for a in soup.select('li a[href]')]
collect = []
for link in links:
if "https://www.williams.edu/institutional-research/files/" in link:
collect.append(link)
for elm in collect:
def main():
download_file(elm) # the elm is an url.
def download_file(download_url): # the download_url is the elm.
save_path = 'C:/Users/WM'
file_name = elm.split("/")[-1]
complete_name = os.path.join(save_path, file_name)
response = requests.get(download_url, headers={'User-Agent': 'Mozilla/5.0'})
file = open(complete_name, 'wb')
file.write(response.read())
file.close()
print("Completed")
if __name__ == "__main__":
main()

Beautiful Soup Object Still Works After the Requests Object it is Attached to Changes

I have some code that allows for the downloading of various comics off of xkcd. This code is gathered from Al Sweigart's book: Automate The Boring Stuff With Python with some minor edits made by me.
I understand most of what is going on. What's confusing id that the 'soup' BeautifulSoup object is made from a request named 'r' continues to get information from the page that can be used throughout the code even though 'r' is re-instantiated in the function 'download_image()'.
Even more confusing is that if the 'r' found in 'download_image()' is renamed to something other than 'r', the code will break.
Code:
import requests
import os
import bs4
os.makedirs('xkcd', exist_ok=True)
page = input('What issue of xkcd would you like to download? (*all for all comics, *today for today\'s comic): ')
url = 'http://xkcd.com/'
def download_image():
comic_url = 'http:' + comic[0].get('src') # page with just the image
r = requests.get(comic_url) # switches to that page
# gets file with directory xkcd/name of comic
try:
issue_number = str(int(str(soup.select('a[rel="prev"]')[0].get('href'))[1:-1]) + 1)
except ValueError:
issue_number = '1'
name = os.path.basename(comic_url[:-4] + "_" + issue_number + ".png")
file = open(os.path.join('xkcd', name), 'wb')
print("Downloading image %s... " % name)
# writes to file
for chunk in r.iter_content(100000):
file.write(chunk)
file.close()
if page == '*all':
url = 'http://xkcd.com/5'
while not url.endswith('#'):
r = requests.get(url)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
comic = soup.select('#comic img')
download_image()
prev_link = soup.select('a[rel="prev"]')[0]
url = 'http://xkcd.com/' + prev_link.get('href')
else:
if page == '*today':
page = ''
r = requests.get(url + page)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
comic = soup.select('#comic img')
if not comic:
print("Comic not found.")
else:
download_image()
"""
r = requests.get('https://imgs.xkcd.com/comics/python.png')
# makes file and write the file in bytes to it
with open('comic.png', 'wb') as f:
f.write(r.content)
"""
Does anyone know why the soup variable continues to work after re-defining the r variable?

Unable to download multiple files with this python script

This script uses beautiful soup to parse all pdf documents on a particular page of a web site. The script successfully downloads one file but will not download all the files that are returned. I need help making this download all pdf documents that I have parsed.
I have done research but have found no answers
import requests
from bs4 import BeautifulSoup
import html5lib
import lxml
#RFP_Import = ('http://www.staffordmsd.org/cms/One.aspx? portalId=895956&pageId=1606144')
RFP_Import = ('http://www.staffordmsd.org/departments/business_operations/bids_and_proposals')
place_hoder = ('http://www.staffordmsd.org')
def get_pdf_links():
r = requests.get(RFP_Import)
soup= BeautifulSoup(r.content, 'html5lib')
links = soup.find_all('a')
pdf_links = [place_hoder + link['href'] for link in links if link['href'].endswith('pdf')]
return pdf_links
def download_pdf_links (pdf_links):
for link in pdf_links:
file_name = link.split("/")[-1]
print ("Downloading file:%s"%file_name)
r = requests.get(link, stream = True)
with open(file_name, 'wb') as f:
for chunk in r.iter_content(chunk_size = 1024*1024):
if chunk:
f.write(chunk)
print ('%s downloaded!\n'%file_name)
print ('all RFPs downloaded!')
return
if __name__ == "__main__":
pdf_links = get_pdf_links()
download_pdf_links(pdf_links)
Successfully downloads first pdf document and then stops.
import requests
from bs4 import BeautifulSoup
import html5lib
import lxml
#RFP_Import = ('http://www.staffordmsd.org/cms/One.aspx? portalId=895956&pageId=1606144')
RFP_Import = ('http://www.staffordmsd.org/departments/business_operations/bids_and_proposals')
place_hoder = ('http://www.staffordmsd.org')
def get_pdf_links():
r = requests.get(RFP_Import)
soup= BeautifulSoup(r.content, 'html5lib')
links = soup.find_all('a')
pdf_links = [place_hoder + link['href'] for link in links if link['href'].endswith('pdf')]
return pdf_links
def download_pdf_links (pdf_links):
for link in pdf_links:
file_name = link.split("/")[-1]
print ("Downloading file:%s"%file_name)
r = requests.get(link, stream = True)
with open(file_name, 'wb') as f:
for chunk in r.iter_content(chunk_size = 1024*1024):
if chunk:
f.write(chunk)
print ('%s downloaded!\n'%file_name)
print ('all RFPs downloaded!')
return
if __name__ == "__main__":
pdf_links = get_pdf_links()
download_pdf_links(pdf_links)
Inside download_pdf_links(), return is misaligned. It should be aligned with for. Otherwise, it is part of the for cycle and the function terminates after the first iteration.
This is probably also true for print ('all RFPs downloaded!'). I guess you want that printed out at the end of the for cycle, after you have been through all links.
In download_pdf_link you are using return inside your loop, which will return after the first iteration of the loop and stop downloading files. You need to return after the loop finishes by putting it on the same indentation as the start of the loop like this:
def download_pdf_links (pdf_links):
for link in pdf_links:
file_name = link.split("/")[-1]
print ("Downloading file:%s"%file_name)
r = requests.get(link, stream = True)
with open(file_name, 'wb') as f:
for chunk in r.iter_content(chunk_size = 1024*1024):
if chunk:
f.write(chunk)
print ('%s downloaded!\n'%file_name)
# Un-indented so it happens after the loop finishes.
print ('all RFPs downloaded!')
return

When trying to extract meta data out of images on webpages, keeps returning {}, why?

I've looked at the exifread documentation and it says that it's returned as a dictionary, but the problem is that it returns nothing except {}, I don't know if that means there is no meta data in the image, or I made a nooby mistake, well anyway I've spend a good chunk of time looking at my code and documentation, but still can't find the solution, any help would be appreciated :)
Code:
import exifread
import colorama
import urllib2
import urllib
import random
import time
import bs4
import sys
def get_images(target):
colorama.init()
print(colorama.Fore.LIGHTGREEN_EX + "[*] Retrieving Meta Data from Target's Page...")
req = urllib2.Request(target)
resp = urllib2.urlopen(req)
page = resp.read()
soup = bs4.BeautifulSoup(page, "html.parser")
for img in soup.find_all("img"):
src = img.get("src")
if "www" in src or "http" in src or "https" in src:
rand_num = random.random()
name = str(rand_num) + ".jpg"
urllib.urlretrieve(src, name)
f = open(name, "rb")
tags = exifread.process_file(f)
print (tags)
else:
s = target + src
rand_num = random.random()
name = str(rand_num) + ".jpg"
urllib.urlretrieve(s, name)
f = open(name, "rb")
tags = exifread.process_file(f)
print (tags)
return
def main():
target = raw_input("Enter the target: ")
print ("\n")
get_images(target)
time.sleep(5)
sys.exit()
if __name__ == "__main__":
main()
The problem is you were not passing a base url, you need to pass the host and then join that to the src unless you get an absolute url from the src attribute.
The following code demonstrates a working example, I used requests in place of urllib but the logic is the same:
import bs4
import sys
import os
import requests
from urlparse import urljoin
def get_images(target, base):
page = requests.get(target).content
soup = bs4.BeautifulSoup(page, "html.parser")
for img in soup.find_all("img", src=True):
src = img.get("src")
name = os.path.basename(src)
if not src.startswith(("www.","http:","https:")):
src = urljoin(base, src)
with open(name, "wb+") as f:
f.write(requests.get(src).content)
f.seek(0)
tags = exifread.process_file(f,"rb")
print (tags)
def main():
target ="http://www.exiv2.org/sample.html"
# need base to join to relative src
base = "http://www.exiv2.org/"
get_images(target, base)
if __name__ == "__main__":
main()
You will get the exif data for the one image on the page that has some:
A PIL example:
import bs4
import os
import requests
from urlparse import urljoin
import PIL.Image
def get_images(target, base):
page = requests.get(target).content
soup = bs4.BeautifulSoup(page, "html.parser")
for img in soup.find_all("img"):
src = img.get("src")
name = os.path.basename(src)
if not src.startswith(("www.","http:","https:")):
src = urljoin(base, src)
with open(name, "wb+") as f:
f.write(requests.get(src).content)
f.seek(0)
try:
img = PIL.Image.open(f)
exif_data = img._getexif()
print(exif_data)
except AttributeError as e:
print("No exif data for {}".format(name))
os.remove(name)
os.remove(name) will remove files that have no exif data, if you don't want that to happen then remove it.

How to download a file from a link that doesn't have the filename in python [duplicate]

I need to download several files via http in Python.
The most obvious way to do it is just using urllib2:
import urllib2
u = urllib2.urlopen('http://server.com/file.html')
localFile = open('file.html', 'w')
localFile.write(u.read())
localFile.close()
But I'll have to deal with the URLs that are nasty in some way, say like this: http://server.com/!Run.aspx/someoddtext/somemore?id=121&m=pdf. When downloaded via the browser, the file has a human-readable name, ie. accounts.pdf.
Is there any way to handle that in python, so I don't need to know the file names and hardcode them into my script?
Download scripts like that tend to push a header telling the user-agent what to name the file:
Content-Disposition: attachment; filename="the filename.ext"
If you can grab that header, you can get the proper filename.
There's another thread that has a little bit of code to offer up for Content-Disposition-grabbing.
remotefile = urllib2.urlopen('http://example.com/somefile.zip')
remotefile.info()['Content-Disposition']
Based on comments and #Oli's anwser, I made a solution like this:
from os.path import basename
from urlparse import urlsplit
def url2name(url):
return basename(urlsplit(url)[2])
def download(url, localFileName = None):
localName = url2name(url)
req = urllib2.Request(url)
r = urllib2.urlopen(req)
if r.info().has_key('Content-Disposition'):
# If the response has Content-Disposition, we take file name from it
localName = r.info()['Content-Disposition'].split('filename=')[1]
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
elif r.url != url:
# if we were redirected, the real file name we take from the final URL
localName = url2name(r.url)
if localFileName:
# we can force to save the file as specified name
localName = localFileName
f = open(localName, 'wb')
f.write(r.read())
f.close()
It takes file name from Content-Disposition; if it's not present, uses filename from the URL (if redirection happened, the final URL is taken into account).
Combining much of the above, here is a more pythonic solution:
import urllib2
import shutil
import urlparse
import os
def download(url, fileName=None):
def getFileName(url,openUrl):
if 'Content-Disposition' in openUrl.info():
# If the response has Content-Disposition, try to get filename from it
cd = dict(map(
lambda x: x.strip().split('=') if '=' in x else (x.strip(),''),
openUrl.info()['Content-Disposition'].split(';')))
if 'filename' in cd:
filename = cd['filename'].strip("\"'")
if filename: return filename
# if no filename was found above, parse it out of the final URL.
return os.path.basename(urlparse.urlsplit(openUrl.url)[2])
r = urllib2.urlopen(urllib2.Request(url))
try:
fileName = fileName or getFileName(url,r)
with open(fileName, 'wb') as f:
shutil.copyfileobj(r,f)
finally:
r.close()
2 Kender:
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
it is not safe -- web server can pass wrong formatted name as ["file.ext] or [file.ext'] or even be empty and localName[0] will raise exception.
Correct code can looks like this:
localName = localName.replace('"', '').replace("'", "")
if localName == '':
localName = SOME_DEFAULT_FILE_NAME
Using wget:
custom_file_name = "/custom/path/custom_name.ext"
wget.download(url, custom_file_name)
Using urlretrieve:
urllib.urlretrieve(url, custom_file_name)
urlretrieve also creates the directory structure if not exists.
You need to look into 'Content-Disposition' header, see the solution by kender.
How to download a file using python in a 'smarter' way?
Posting his solution modified with a capability to specify an output folder:
from os.path import basename
import os
from urllib.parse import urlsplit
import urllib.request
def url2name(url):
return basename(urlsplit(url)[2])
def download(url, out_path):
localName = url2name(url)
req = urllib.request.Request(url)
r = urllib.request.urlopen(req)
if r.info().has_key('Content-Disposition'):
# If the response has Content-Disposition, we take file name from it
localName = r.info()['Content-Disposition'].split('filename=')[1]
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
elif r.url != url:
# if we were redirected, the real file name we take from the final URL
localName = url2name(r.url)
localName = os.path.join(out_path, localName)
f = open(localName, 'wb')
f.write(r.read())
f.close()
download("https://example.com/demofile", '/home/username/tmp')
I have just updated the answer of kender for python3

Categories

Resources