I am trying to download all pdf files from one website but every pdf created is corrupted...
import requests
from bs4 import BeautifulSoup
url ="https://www.geeksforgeeks.org/how-to-extract-pdf-tables-in-python/"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
links = soup.find_all('a')
i = 0
for link in links:
if('.pdf' in link.get('href', [])):
i += 1
print("Downloading file: ", i)
response = requests.get(link.get('href'))
pdf = open("pdf"+str(i)+".pdf", 'wb')
pdf.write(response.content)
pdf.close()
print("File ", i, " downloaded")
print("All PDF files downloaded")
Add headers to your requests
import requests
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; PPC Mac OS X 10_8_7 rv:5.0; en-US) AppleWebKit/533.31.5 (KHTML, like Gecko) Version/4.0 Safari/533.31.5',
}
from bs4 import BeautifulSoup
url ="https://www.geeksforgeeks.org/how-to-extract-pdf-tables-in-python/"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
links = soup.find_all('a')
i = 0
for link in links:
if('.pdf' in link.get('href', [])):
i += 1
print("Downloading file: ", i)
response = requests.get(link.get('href'), headers=headers)
pdf = open("pdf"+str(i)+".pdf", 'wb')
pdf.write(response.content)
pdf.close()
print("File ", i, " downloaded")
print("All PDF files downloaded")
Related
So i built this small script that would give back a URL of any searched video on youtube. But after opening it up again turns out that the web scraping with youtube is not working out properly. As when printing soup it returns something completely different than from what can be seen with inspect element on Youtube. Can someone help me solve this...
Heres My Code:
import requests
from lxml import html
import webbrowser
from bs4 import BeautifulSoup
import time
import tkinter
from pytube import YouTube
headers= {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36"}
def video_finder():
word = input("Enter video title: ")
if ' ' in word:
new = word.replace(' ', '+')
print(new)
else:
pass
vid = requests.get('https://www.youtube.com/results?search_query={}'.format(new))
soup = BeautifulSoup(vid.text, features='lxml')
all_vids = soup.find_all('div', id_='contents')
print(all_vids)
video1st = all_vids[0]
a_Tag = video1st.find('a', class_="yt-uix-tile-link yt-ui-ellipsis yt-ui-ellipsis-2 yt-uix-sessionlink spf-link", href=True)
Video_name = a_Tag.text
Video_id = a_Tag['href']
video_link = 'https://www.youtube.com' + Video_id
print(Video_name)
print(video_link)
Its not the best but ye... thank you
To get correct result from Youtube page, set User-Agent HTTP header to Googlebot, and use html.parser in BeautifulSoup.
For example:
import requests
from bs4 import BeautifulSoup
headers= {"User-Agent": "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"}
def video_finder():
word = input("Enter video title: ")
params = {
'search_query': word
}
vid = requests.get('https://www.youtube.com/results', params=params, headers=headers)
soup = BeautifulSoup(vid.content, features='html.parser')
a_Tag = soup.find('a', class_="yt-uix-tile-link yt-ui-ellipsis yt-ui-ellipsis-2 yt-uix-sessionlink spf-link", href=lambda h: h.startswith('/watch?'))
Video_name = a_Tag.text
Video_id = a_Tag['href']
video_link = 'https://www.youtube.com' + Video_id
print(Video_name)
print(video_link)
video_finder()
Prints:
Enter video title: sailor moon
Sailor Moon Opening (English) *HD*
https://www.youtube.com/watch?v=5txHGxJRwtQ
First time trying make something in python. Decided that it was a img-scraper.
it's found and download all images, but they are all corrupted. Found info about wrong unicode in BeatySoup, but I did not understand what was wrong. img in jpg, gif and png.
I don't use urllib because site blocking it (403 forbidden)
from bs4 import BeautifulSoup
import requests
import time
url = 'some url'
r = requests.get(url)
html = r.text
soup = BeautifulSoup(html, 'lxml')
images = []
for img in soup.findAll('img', {'class': '_images'}):
images.append(img.get('data-url'));
for i in range(len(images)):
s = images[i]
cutname = s.split("/")[-1]
filename = cutname[:cutname.find("?")]
f = open(filename,'wb')
f.write((requests.get(s)).content)
f.close()
time.sleep(0.5)
Seems like you need to pass some headers. The bottom part of the code to write the image file out is by #Deepspace
from bs4 import BeautifulSoup
import requests
url = "https://www.webtoons.com/en/comedy/bluechair/ep-366-husk/viewer?title_no=199&episode_no=538"
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Referer' : url
}
r = requests.get(url, headers = headers)
soup=BeautifulSoup(r.content,'lxml')
imgs=[link['data-url'] for link in soup.select('#_imageList img')]
counter = 0
for img in imgs:
counter = counter + 1
filename = 'image' + str(counter) + '.jpg'
with open(filename, 'wb') as handle:
response = requests.get(img, stream=True, headers = headers)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
I want to scrape few pages from amazon website like title,url,aisn and i run into a problem that script only parsing 15 products while on the page it is showing 50. i decided to print out all html to console and i saw that the html is ending at 15 products without any errors from the script.
Here is the part of my script
keyword = "men jeans".replace(' ', '+')
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1b3) Gecko/20090305 Firefox/3.1b3 GTB5'}
url = "https://www.amazon.com/s/field-keywords={}".format(keyword)
request = requests.session()
req = request.get(url, headers = headers)
sleep(3)
soup = BeautifulSoup(req.content, 'html.parser')
print(soup)
It's because few of the items are generated dynamically. There might be any better solution other than using selenium. However, as a workaround you can try the below way instead.
from selenium import webdriver
from bs4 import BeautifulSoup
def fetch_item(driver,keyword):
driver.get(url.format(keyword.replace(" ", "+")))
soup = BeautifulSoup(driver.page_source, 'html.parser')
for items in soup.select("[id^='result_']"):
try:
name = items.select_one("h2").text
except AttributeError: name = ""
print(name)
if __name__ == '__main__':
url = "https://www.amazon.com/s/field-keywords={}"
driver = webdriver.Chrome()
try:
fetch_item(driver,"men jeans")
finally:
driver.quit()
Upon running the above script you should get 56 names or something as result.
import requests
from bs4 import BeautifulSoup
for page in range(1, 21):
keyword = "red car".replace(' ', '+')
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1b3) Gecko/20090305 Firefox/3.1b3 GTB5'}
url = "https://www.amazon.com/s/field-keywords=" + keyword + "?page=" + str(page)
request = requests.session()
req = request.get(url, headers=headers)
soup = BeautifulSoup(req.content, 'html.parser')
results = soup.findAll("li", {"class": "s-result-item"})
for i in results:
try:
print(i.find("h2", {"class": "s-access-title"}).text.replace('[SPONSORED]', ''))
print(i.find("span", {"class": "sx-price-large"}).text.replace("\n", ' '))
print('*' * 20)
except:
pass
Amazon's page range is max till 20 here is it crawling the pages
So I have this code that will give me the urls I need in a list format
import requests
from bs4 import BeautifulSoup
offset = 0
links = []
with requests.Session() as session:
while True:
r = session.get("http://rayleighev.deviantart.com/gallery/44021661/Reddit?offset=%d" % offset)
soup = BeautifulSoup(r.content, "html.parser")
new_links = soup.find_all("a", {'class' : "thumb"})
# no more links - break the loop
if not new_links:
break
# denotes the number of gallery pages gone through at one time (# of pages times 24 equals the number below)
links.extend(new_links)
print(len(links))
offset += 24
#denotes the number of gallery pages(# of pages times 24 equals the number below)
if offset == 48:
break
for link in links:
print(link.get("href"))
After that I try to get different text from all of the urls, and all that text is in relatively the same place on each one. But, whenever I run the second half, below, I keep getting a chunk of html text and some errors, and I'm not sure of how to fix it or if there is any other, and preferably simpler, way to get the text from each url.
import urllib.request
import re
for link in links:
url = print("%s" % link)
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
paragraphs = re.findall(r'</a><br /><br />(.*?)</div>', str(respData))
if paragraphs != None:
paragraphs = re.findall(r'<br /><br />(.*?)</span>', str(respData))
if paragraphs != None:
paragraphs = re.findall(r'<br /><br />(.*?)</span></div>', str(respData))
for eachP in paragraphs:
print(eachP)
title = re.findall(r'<title>(.*?)</title>', str(respData))
for eachT in title:
print(eachT)
Your code:
for link in links:
url = print("%s" % link)
assigns None to url. Perhaps you mean:
for link in links:
url = "%s" % link.get("href")
There's also no reason to use urllib to get the sites content, you can use requests as you did before by changing:
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
to
req = requests.get(url, headers=headers)
soup = BeautifulSoup(req.content, "html.parser")
Now you can get the title and paragraph with just:
title = soup.find('div', {'class': 'dev-title-container'}).h1.text
paragraph = soup.find('div', {'class': 'text block'}).text
I can scrape one site easy but the other i get error ??? Im not sure if its because the website has some sort of block on or something
import random
from bs4 import BeautifulSoup
import urllib2
import re
from urlparse import urljoin
user_input = raw_input ("Search for Team = ");
resp = urllib2.urlopen("http://idimsports.eu/football.html") ###working
soup = BeautifulSoup(resp, from_encoding=resp.info().getparam('charset'))
base_url = "http://idimsports.eu"
links = soup.find_all('a', href=re.compile(''+user_input))
if len(links) == 0:
print "No Streams Available"
else:
for link in links:
print urljoin(base_url, link['href'])
resp = urllib2.urlopen("http://cricfree.tv/football-live-stream") ###not working
soup = BeautifulSoup(resp, from_encoding=resp.info().getparam('charset'))
links = soup.find_all('a', href=re.compile(''+user_input))
if len(links) == 0:
print "No Streams Available"
else:
for link in links:
print urljoin(base_url, link['href'])
Set the user-agent header of your request
headers = { 'User-Agent' : 'Mozilla/5.0' }
req = urllib2.Request("http://cricfree.tv/football-live-stream", None, headers)
resp = urllib2.urlopen(req)
also on your second loop you're reusing base_url you probably don't want to do that.