Why can I only scrape 16 photos from pixabay? - python

I need to get Backlight Image Data so I'm trying to get backlight images from pixabay. But only 16 images are downloaded by the following code.
I tried to find why, and I found the difference in the html source.
The images that I downloaded are in the tag "img srcset", and my source downloads the first picture in the srcset.
But the other pictures are in "img src", and my source can't download it.
Does anyone know what is the problem??
Code
from bs4 import BeautifulSoup
import urllib.request
import os.path
url="https://pixabay.com/images/search/backlight/"
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
source = response.read()
soup = BeautifulSoup(source, "html.parser")
img = soup.find_all("img")
cnt = 0
for image in img:
img_src=image.get("src")
if img_src[0]=='/':
continue
cnt += 1
print(img_src)
path = "C:/Users/Guest001/Test/" + str(cnt) + ".jpg"
print(path)
urllib.request.urlretrieve(img_src, path)

Some of the images have in src a /static/img/blank.gif and the real url is in the data-lazy attribute. Also some of the images have .png suffix. Here is a working example.
from bs4 import BeautifulSoup
import urllib.request
import os.path
url="https://pixabay.com/images/search/backlight/"
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
source = response.read()
soup = BeautifulSoup(source, "html.parser")
img = soup.find_all("img")
cnt = 0
for image in img:
img_src= image.get("src") if '.gif' not in image.get("src") else image.get('data-lazy')
if img_src[0]=='/':
continue
cnt += 1
print(img_src)
path = ''
if '.jpg' in img_src:
path = "C:/Users/Guest001/Test/" + str(cnt) + ".jpg"
elif '.png' in img_src:
path = "C:/Users/Guest001/Test/" + str(cnt) + ".png"
print(path)
urllib.request.urlretrieve(img_src, path)

Related

download a pdf from a website and change title - python and curl

I have a python script to download pdf's from a ASP-site. I would like to save the pdf file using the name it is displayed on the website. So from this line of html, get the link to download the pdf and get the name how it is displayed. So for the following html line:
Chapter 3 - Weird science</li>
get the link https://www.ib3.nl/curriculum/engels\100 TB 3 Ch 3.pdf
and save this pdf as Chapter 3 - Weird science.pdf
below is the script to get all the pdf's
from bs4 import BeautifulSoup as BeautifulSoup
import urllib.request as requests
from urllib import parse as urlparse
import requests
import os
klassen = ['1e klas']
vakken = ['Wiskunde']
'''['Engels','Aardrijkskunde','Economie', 'Filosofie','Frans', 'Geschiedenis', \
'Nask', 'Natuurkunde', 'Nederlands', 'Scheikunde', 'Spaans', 'Wiskunde'\
'Biologie', 'Duits', 'Grieks','Latijn','Leesmateriaal', \
'Loopbaanorientatie','NLT']'''
links = []
for klas in klassen:
for vak in vakken:
url = "https://www.svpo.nl/curriculum.asp"
payload = 'vak='+ vak + '&klas_en_schoolsoort='+klas
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}
response = requests.post(url, data=payload, headers=headers)
path_out = 'c:\books\\'
path = (path_out + klas + "\\" + vak + "\\")
if not(os.path.exists(path)): os.makedirs(path)
links = BeautifulSoup(response.text, "lxml")#.find_all('a')
a=BeautifulSoup(response.text, "lxml").find_all('a')
for link in BeautifulSoup(response.text, "lxml").find_all('a'):
current_link = link.get('href')
if str(link.get('href')) != 'None':
if current_link.endswith('pdf'):
print(current_link)
links.append(current_link)
filename = current_link[current_link.find('\\')+1:]
filename_url = urlparse.quote(filename)
path_url = current_link[:current_link.find('\\')] + '/' + filename_url
os.system('Curl -o "' + path + filename + '" ' + path_url)
Simply:
filename = link.text + '.pdf'
That's all.
My version with changes from comments:
import os
import requests
from bs4 import BeautifulSoup
from urllib import parse as urlparse
klassen = ['1e klas']
vakken = ['Wiskunde']
'''['Engels','Aardrijkskunde','Economie', 'Filosofie','Frans', 'Geschiedenis', \
'Nask', 'Natuurkunde', 'Nederlands', 'Scheikunde', 'Spaans', 'Wiskunde'\
'Biologie', 'Duits', 'Grieks','Latijn','Leesmateriaal', \
'Loopbaanorientatie','NLT']'''
links = []
url = "https://www.svpo.nl/curriculum.asp"
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}
path_out = r'c:\books'
for klas in klassen:
for vak in vakken:
path = os.path.join(path_out, klas, vak)
os.makedirs(path, exist_ok=True)
payload = {'vak': vak, 'klas_en_schoolsoort': klas}
response = requests.post(url, data=payload, headers=headers)
all_links = BeautifulSoup(response.text, "lxml").find_all('a', {'href': True})
for link in all_links:
url = link.get('href')
if url.lower().endswith('.pdf'):
url = url.replace('\\', '/')
links.append(url)
print('url:', url)
#filename = url.split('\\')[-1]
filename = link.text + '.pdf'
print('filename:', filename)
full_path = os.path.join(path, filename)
print('full_path:', full_path)
response = requests.get(url)
with open(full_path, 'wb') as fh:
fh.write(response.content)
print('---')

Requests Not Creating Images Python

I want to download images. But for some reason the code execute without errors but it's not creating any images.I'm using Requests and BeautifulSoup. My IDE is VS Code
HEADERS = {'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"}
def getData(link):
URL = link
request = requests.get(url=URL, headers=HEADERS)
result = BeautifulSoup(request.content, "lxml")
return result
address = "https://bisesargodha.edu.pk/content/ViewSeqImges.aspx?SubjectId=760&SeqNameAnsSubj=Sequence1"
response = getData(address)
table = response.find('table', attrs = {'id':'ContentPlaceHolder1_Table1'})
imgs = table.findAll('img')
imgNum = 1
for img in imgs:
image_url = f"https://bisesargodha.edu.pk/content/{img['src']}"
image_save = f"img-{imgNum}.jpg"
pull_image = requests.get(image_url, headers=HEADERS)
pull_image_contant = pull_image.content
with open(image_save, "wb+") as myfile:
myfile.write(pull_image_contant)
imgNum = imgNum + 1
You need to fetch and consume your response as a stream, try something like this:
img_num = 1
for img in imgs:
image_url = f"https://bisesargodha.edu.pk/content/{img['src']}"
image_save = f"img-{img_num}.jpg"
with requests.get(image_url, headers=HEADERS, stream=True) as resp:
resp.raise_for_status() # do some additional error handling if necessary
with open(image_save, "wb") as image_file:
for chunk in r.iter_content(chunk_size=8192):
image_file.write(chunk)
img_num = img_num + 1
If the issue still persists then maybe double check the image urls you are constructing and make sure they are really pointing to the right content.

AttributeError: 'unicode' object has no attribute 'fromstring'. How to get around this?

I'm trying to detect the availability of an item on Amazon. Why doesn't this code work?
from simplified_scrapy.request import req
from simplified_scrapy.simplified_doc import SimplifiedDoc
import requests
import re
from bs4 import BeautifulSoup
from collections import OrderedDict
from time import sleep
import time
from lxml import html
import json
def check(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
page = requests.get(url, headers = headers)
for i in range(20):
sleep(3)
doc = html.fromstring(page.content)
XPATH_AVAILABILITY = '//div[#id ="availability"]//text()'
RAw_AVAILABILITY = doc.xpath(XPATH_AVAILABILITY)
AVAILABILITY = ''.join(RAw_AVAILABILITY).strip() if RAw_AVAILABILITY else None
return AVAILABILITY
file_name = raw_input("Enter file name: ")
filepath = "%s"%(file_name)
with open(filepath) as f:
listoflinks = [line.rstrip('\n') for line in f]
all_links = []
for i in listoflinks:
html = req.get(i)
doc = SimplifiedDoc(html)
amazon_links = doc.getElements('a')
amazon_links = amazon_links.containsOr(['https://www.amazon.com/','https://amzn.to/'],attr='href')
for a in amazon_links:
if a.href not in all_links:
all_links.append(a.href)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
for i in all_links:
print "LINK:"
print i
response = requests.get(i, headers=headers)
#soup = BeautifulSoup(html, "lxml")
soup = BeautifulSoup(response.content, features="lxml")
title = soup.select("#productTitle")[0].get_text().strip()
if check(i) == 'In stock.':
price = soup.select("#priceblock_saleprice")[0].get_text()
else:
price = "UNAVAILABLE"
review_count = int(soup.select("#acrCustomerReviewText")[0].get_text().split()[0])
jsonObject = {'title': title, 'price': price, 'review_count': review_count}
print json.dumps(jsonObject, indent=2)
print "////////////////////////////////////////////////"
print "..............................................."
print "FINALLY..."
print "# OF LINKS RETRIEVED:"
print len(all_links)
When I execute it, this error appears:
File "scra.py", line 17, in check
doc = html.fromstring(page.content)
AttributeError: 'unicode' object has no attribute 'fromstring'
Please help me. I already tried converting page to pagedata = page.json() but it only made it worse.
Try using this instead of html.fromstring
doc = BeautifulSoup(page.content, 'html.parser')
doc = doc.prettify()

Scraper requests image corrupted

First time trying make something in python. Decided that it was a img-scraper.
it's found and download all images, but they are all corrupted. Found info about wrong unicode in BeatySoup, but I did not understand what was wrong. img in jpg, gif and png.
I don't use urllib because site blocking it (403 forbidden)
from bs4 import BeautifulSoup
import requests
import time
url = 'some url'
r = requests.get(url)
html = r.text
soup = BeautifulSoup(html, 'lxml')
images = []
for img in soup.findAll('img', {'class': '_images'}):
images.append(img.get('data-url'));
for i in range(len(images)):
s = images[i]
cutname = s.split("/")[-1]
filename = cutname[:cutname.find("?")]
f = open(filename,'wb')
f.write((requests.get(s)).content)
f.close()
time.sleep(0.5)
Seems like you need to pass some headers. The bottom part of the code to write the image file out is by #Deepspace
from bs4 import BeautifulSoup
import requests
url = "https://www.webtoons.com/en/comedy/bluechair/ep-366-husk/viewer?title_no=199&episode_no=538"
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Referer' : url
}
r = requests.get(url, headers = headers)
soup=BeautifulSoup(r.content,'lxml')
imgs=[link['data-url'] for link in soup.select('#_imageList img')]
counter = 0
for img in imgs:
counter = counter + 1
filename = 'image' + str(counter) + '.jpg'
with open(filename, 'wb') as handle:
response = requests.get(img, stream=True, headers = headers)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)

python unicode char from requests/bs4

i have a script to get the lyrics of a song from metrolyrics using requests and bs4
the problem is that when i print it it show something like this (part of the lyrics)
Rabbi, Papa, Allah, Lama, Imam, Bibbia, Dharma, Sura, Torah, Pane, Vino, Kashèr, ḤalÄl, Yom Kippur, Quaresima, Ramadan
when it should look like this
Rabbi, Papa, Lama, Imam, Bibbia, Dharma, Sura, Torah, Pane, vino, kashèr, ḥalāl, Yom Kippur, Quaresima, Ramadan
code i use
import requests
from bs4 import BeautifulSoup
import os
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
def get_lyrics(song_name):
song_name += ' metrolyrics'
name = quote_plus(song_name)
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11'
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
url = 'http://www.google.com/search?q=' + name
result = requests.get(url, headers=hdr).text
link_start = result.find('http://www.metrolyrics.com')
if(link_start == -1):
return("Lyrics not found on Metrolyrics")
link_end = result.find('html', link_start + 1)
link = result[link_start:link_end + 4]
lyrics_html = requests.get(link, headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel'
'Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/55.0.2883.95 Safari/537.36'
}
).text
soup = BeautifulSoup(lyrics_html, "lxml")
raw_lyrics = (soup.findAll('p', attrs={'class': 'verse'}))
paras = []
try:
final_lyrics = unicode.join(u'\n', map(unicode, raw_lyrics))
except NameError:
final_lyrics = str.join(u'\n', map(str, raw_lyrics))
final_lyrics = (final_lyrics.replace('<p class="verse">', '\n'))
final_lyrics = (final_lyrics.replace('<br/>', ' '))
final_lyrics = final_lyrics.replace('</p>', ' ')
return (final_lyrics)
i have tried with .encode('utf-8') .encode('unicode-escape') and the reconverting again but no solution
i have another script where i use musixmatch api and there it show the unicode correct
I did small changes in get_lyrics function:
return final_lyrics.encode('latin1').decode('utf-8')
and got desired output:
# python2
print get_lyrics('kashèr')
...
Rabbi, Papa, Allah, Lama, Imam, Bibbia, Dharma, Sura, Torah, Pane, Vino, Kashèr, Ḥalāl, Yom Kippur, Quaresima, Ramadan
...

Categories

Resources