download a pdf from a website and change title - python and curl - python

I have a python script to download pdf's from a ASP-site. I would like to save the pdf file using the name it is displayed on the website. So from this line of html, get the link to download the pdf and get the name how it is displayed. So for the following html line:
Chapter 3 - Weird science</li>
get the link https://www.ib3.nl/curriculum/engels\100 TB 3 Ch 3.pdf
and save this pdf as Chapter 3 - Weird science.pdf
below is the script to get all the pdf's
from bs4 import BeautifulSoup as BeautifulSoup
import urllib.request as requests
from urllib import parse as urlparse
import requests
import os
klassen = ['1e klas']
vakken = ['Wiskunde']
'''['Engels','Aardrijkskunde','Economie', 'Filosofie','Frans', 'Geschiedenis', \
'Nask', 'Natuurkunde', 'Nederlands', 'Scheikunde', 'Spaans', 'Wiskunde'\
'Biologie', 'Duits', 'Grieks','Latijn','Leesmateriaal', \
'Loopbaanorientatie','NLT']'''
links = []
for klas in klassen:
for vak in vakken:
url = "https://www.svpo.nl/curriculum.asp"
payload = 'vak='+ vak + '&klas_en_schoolsoort='+klas
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}
response = requests.post(url, data=payload, headers=headers)
path_out = 'c:\books\\'
path = (path_out + klas + "\\" + vak + "\\")
if not(os.path.exists(path)): os.makedirs(path)
links = BeautifulSoup(response.text, "lxml")#.find_all('a')
a=BeautifulSoup(response.text, "lxml").find_all('a')
for link in BeautifulSoup(response.text, "lxml").find_all('a'):
current_link = link.get('href')
if str(link.get('href')) != 'None':
if current_link.endswith('pdf'):
print(current_link)
links.append(current_link)
filename = current_link[current_link.find('\\')+1:]
filename_url = urlparse.quote(filename)
path_url = current_link[:current_link.find('\\')] + '/' + filename_url
os.system('Curl -o "' + path + filename + '" ' + path_url)

Simply:
filename = link.text + '.pdf'
That's all.
My version with changes from comments:
import os
import requests
from bs4 import BeautifulSoup
from urllib import parse as urlparse
klassen = ['1e klas']
vakken = ['Wiskunde']
'''['Engels','Aardrijkskunde','Economie', 'Filosofie','Frans', 'Geschiedenis', \
'Nask', 'Natuurkunde', 'Nederlands', 'Scheikunde', 'Spaans', 'Wiskunde'\
'Biologie', 'Duits', 'Grieks','Latijn','Leesmateriaal', \
'Loopbaanorientatie','NLT']'''
links = []
url = "https://www.svpo.nl/curriculum.asp"
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}
path_out = r'c:\books'
for klas in klassen:
for vak in vakken:
path = os.path.join(path_out, klas, vak)
os.makedirs(path, exist_ok=True)
payload = {'vak': vak, 'klas_en_schoolsoort': klas}
response = requests.post(url, data=payload, headers=headers)
all_links = BeautifulSoup(response.text, "lxml").find_all('a', {'href': True})
for link in all_links:
url = link.get('href')
if url.lower().endswith('.pdf'):
url = url.replace('\\', '/')
links.append(url)
print('url:', url)
#filename = url.split('\\')[-1]
filename = link.text + '.pdf'
print('filename:', filename)
full_path = os.path.join(path, filename)
print('full_path:', full_path)
response = requests.get(url)
with open(full_path, 'wb') as fh:
fh.write(response.content)
print('---')

Related

AttributeError: 'unicode' object has no attribute 'fromstring'. How to get around this?

I'm trying to detect the availability of an item on Amazon. Why doesn't this code work?
from simplified_scrapy.request import req
from simplified_scrapy.simplified_doc import SimplifiedDoc
import requests
import re
from bs4 import BeautifulSoup
from collections import OrderedDict
from time import sleep
import time
from lxml import html
import json
def check(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
page = requests.get(url, headers = headers)
for i in range(20):
sleep(3)
doc = html.fromstring(page.content)
XPATH_AVAILABILITY = '//div[#id ="availability"]//text()'
RAw_AVAILABILITY = doc.xpath(XPATH_AVAILABILITY)
AVAILABILITY = ''.join(RAw_AVAILABILITY).strip() if RAw_AVAILABILITY else None
return AVAILABILITY
file_name = raw_input("Enter file name: ")
filepath = "%s"%(file_name)
with open(filepath) as f:
listoflinks = [line.rstrip('\n') for line in f]
all_links = []
for i in listoflinks:
html = req.get(i)
doc = SimplifiedDoc(html)
amazon_links = doc.getElements('a')
amazon_links = amazon_links.containsOr(['https://www.amazon.com/','https://amzn.to/'],attr='href')
for a in amazon_links:
if a.href not in all_links:
all_links.append(a.href)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
for i in all_links:
print "LINK:"
print i
response = requests.get(i, headers=headers)
#soup = BeautifulSoup(html, "lxml")
soup = BeautifulSoup(response.content, features="lxml")
title = soup.select("#productTitle")[0].get_text().strip()
if check(i) == 'In stock.':
price = soup.select("#priceblock_saleprice")[0].get_text()
else:
price = "UNAVAILABLE"
review_count = int(soup.select("#acrCustomerReviewText")[0].get_text().split()[0])
jsonObject = {'title': title, 'price': price, 'review_count': review_count}
print json.dumps(jsonObject, indent=2)
print "////////////////////////////////////////////////"
print "..............................................."
print "FINALLY..."
print "# OF LINKS RETRIEVED:"
print len(all_links)
When I execute it, this error appears:
File "scra.py", line 17, in check
doc = html.fromstring(page.content)
AttributeError: 'unicode' object has no attribute 'fromstring'
Please help me. I already tried converting page to pagedata = page.json() but it only made it worse.
Try using this instead of html.fromstring
doc = BeautifulSoup(page.content, 'html.parser')
doc = doc.prettify()

Why can I only scrape 16 photos from pixabay?

I need to get Backlight Image Data so I'm trying to get backlight images from pixabay. But only 16 images are downloaded by the following code.
I tried to find why, and I found the difference in the html source.
The images that I downloaded are in the tag "img srcset", and my source downloads the first picture in the srcset.
But the other pictures are in "img src", and my source can't download it.
Does anyone know what is the problem??
Code
from bs4 import BeautifulSoup
import urllib.request
import os.path
url="https://pixabay.com/images/search/backlight/"
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
source = response.read()
soup = BeautifulSoup(source, "html.parser")
img = soup.find_all("img")
cnt = 0
for image in img:
img_src=image.get("src")
if img_src[0]=='/':
continue
cnt += 1
print(img_src)
path = "C:/Users/Guest001/Test/" + str(cnt) + ".jpg"
print(path)
urllib.request.urlretrieve(img_src, path)
Some of the images have in src a /static/img/blank.gif and the real url is in the data-lazy attribute. Also some of the images have .png suffix. Here is a working example.
from bs4 import BeautifulSoup
import urllib.request
import os.path
url="https://pixabay.com/images/search/backlight/"
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
source = response.read()
soup = BeautifulSoup(source, "html.parser")
img = soup.find_all("img")
cnt = 0
for image in img:
img_src= image.get("src") if '.gif' not in image.get("src") else image.get('data-lazy')
if img_src[0]=='/':
continue
cnt += 1
print(img_src)
path = ''
if '.jpg' in img_src:
path = "C:/Users/Guest001/Test/" + str(cnt) + ".jpg"
elif '.png' in img_src:
path = "C:/Users/Guest001/Test/" + str(cnt) + ".png"
print(path)
urllib.request.urlretrieve(img_src, path)

Scraper requests image corrupted

First time trying make something in python. Decided that it was a img-scraper.
it's found and download all images, but they are all corrupted. Found info about wrong unicode in BeatySoup, but I did not understand what was wrong. img in jpg, gif and png.
I don't use urllib because site blocking it (403 forbidden)
from bs4 import BeautifulSoup
import requests
import time
url = 'some url'
r = requests.get(url)
html = r.text
soup = BeautifulSoup(html, 'lxml')
images = []
for img in soup.findAll('img', {'class': '_images'}):
images.append(img.get('data-url'));
for i in range(len(images)):
s = images[i]
cutname = s.split("/")[-1]
filename = cutname[:cutname.find("?")]
f = open(filename,'wb')
f.write((requests.get(s)).content)
f.close()
time.sleep(0.5)
Seems like you need to pass some headers. The bottom part of the code to write the image file out is by #Deepspace
from bs4 import BeautifulSoup
import requests
url = "https://www.webtoons.com/en/comedy/bluechair/ep-366-husk/viewer?title_no=199&episode_no=538"
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Referer' : url
}
r = requests.get(url, headers = headers)
soup=BeautifulSoup(r.content,'lxml')
imgs=[link['data-url'] for link in soup.select('#_imageList img')]
counter = 0
for img in imgs:
counter = counter + 1
filename = 'image' + str(counter) + '.jpg'
with open(filename, 'wb') as handle:
response = requests.get(img, stream=True, headers = headers)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)

python unicode char from requests/bs4

i have a script to get the lyrics of a song from metrolyrics using requests and bs4
the problem is that when i print it it show something like this (part of the lyrics)
Rabbi, Papa, Allah, Lama, Imam, Bibbia, Dharma, Sura, Torah, Pane, Vino, Kashèr, ḤalÄl, Yom Kippur, Quaresima, Ramadan
when it should look like this
Rabbi, Papa, Lama, Imam, Bibbia, Dharma, Sura, Torah, Pane, vino, kashèr, ḥalāl, Yom Kippur, Quaresima, Ramadan
code i use
import requests
from bs4 import BeautifulSoup
import os
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
def get_lyrics(song_name):
song_name += ' metrolyrics'
name = quote_plus(song_name)
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11'
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
url = 'http://www.google.com/search?q=' + name
result = requests.get(url, headers=hdr).text
link_start = result.find('http://www.metrolyrics.com')
if(link_start == -1):
return("Lyrics not found on Metrolyrics")
link_end = result.find('html', link_start + 1)
link = result[link_start:link_end + 4]
lyrics_html = requests.get(link, headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel'
'Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/55.0.2883.95 Safari/537.36'
}
).text
soup = BeautifulSoup(lyrics_html, "lxml")
raw_lyrics = (soup.findAll('p', attrs={'class': 'verse'}))
paras = []
try:
final_lyrics = unicode.join(u'\n', map(unicode, raw_lyrics))
except NameError:
final_lyrics = str.join(u'\n', map(str, raw_lyrics))
final_lyrics = (final_lyrics.replace('<p class="verse">', '\n'))
final_lyrics = (final_lyrics.replace('<br/>', ' '))
final_lyrics = final_lyrics.replace('</p>', ' ')
return (final_lyrics)
i have tried with .encode('utf-8') .encode('unicode-escape') and the reconverting again but no solution
i have another script where i use musixmatch api and there it show the unicode correct
I did small changes in get_lyrics function:
return final_lyrics.encode('latin1').decode('utf-8')
and got desired output:
# python2
print get_lyrics('kashèr')
...
Rabbi, Papa, Allah, Lama, Imam, Bibbia, Dharma, Sura, Torah, Pane, Vino, Kashèr, Ḥalāl, Yom Kippur, Quaresima, Ramadan
...

Python 3 : HTTP Error 405: Method Not Allowed

I'm getting 'HTTP Error 405: Method Not Allowed' error. My code is
import urllib.request
import urllib.parse
try:
url = 'https://www.google.com/search'
values = {'q': 'python programming tutorials'}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8') # data should be bytes
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"
req = urllib.request.Request(url, data, headers = headers)
resp = urllib.request.urlopen(req)
print("HERE")
respData = resp.read()
saveFile = open('withHeaders.txt', 'w')
saveFile.write(str(respData))
saveFile.close()
except Exception as e:
print(e)
The error I guess is in req = urllib.request.Request(url, data, headers = headers). What is the error, syntactical? What should be changed in code? And any conceptual mistake do correct me.
EDIT
Concept:
def URLRequest(url, params, method="GET"):
if method == "POST":
return urllib2.Request(url, data=urllib.urlencode(params))
else:
return urllib2.Request(url + "?" + urllib.urlencode(params))
You can use Requests library instead. It's much cleaner than urllib
import requests
q = 'Whatever you want to search'
url = 'https://www.google.com/search'
response = requests.get(url+'?'+'q='+q)
saveFile = open('response.txt', 'w')
savefile.write(response.text)
savefile.close()
Or if you want to stick to the urllib , you can do this:
import urllib.request
url = 'https://www.google.com/search'
q = 'Search Query'
headers = {'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"}
request = urllib.request.Request(url+'?'+'q='+q, headers=headers)
response = urllib.request.urlopen(request).read() # the text of the response is here
saveFile = open('withHeaders.txt', 'w')
saveFile.write(str(response))
saveFile.close()
Here in reference to www.pythonforbeginners
# Importing the module
import urllib.request
# your search text
text="hi google"
# Define the url
url = 'http://www.google.com/#q='+text
# Add your headers
headers = {'User-Agent' : 'Mozilla 5.10'}
# Create the Request.
request = urllib.request.Request(url, None, headers)
# Getting the response
response = urllib.request.urlopen(request)
# Print the headers
print (response.read())

Categories

Resources