Python: Simple Web Crawler using BeautifulSoup4 - python

I have been following TheNewBoston's Python 3.4 tutorials that use Pycharm, and am currently on the tutorial on how to create a web crawler. I Simply want to download all of XKCD's Comics. Using the archive that seemed very easy. Here is my code, followed by TheNewBoston's.
Whenever I run the code, nothing happens. It runs through and says, "Process finished with exit code 0" Where did I screw up?
TheNewBoston's Tutorial is a little dated, and the website used for the crawl has changed domains. I will comment the part of the video that seems to matter.
My code:
mport requests
from urllib import request
from bs4 import BeautifulSoup
def download_img(image_url, page):
name = str(page) + ".jpg"
request.urlretrieve(image_url, name)
def xkcd_spirder(max_pages):
page = 1
while page <= max_pages:
url = r'http://xkcd.com/' + str(page)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
for link in soup.findAll('div', {'img': 'src'}):
href = link.get('href')
print(href)
download_img(href, page)
page += 1
xkcd_spirder(5)

The comic is in the div with the id comic, you just need to pull the src from img inside that div then join it to the base url and finally request the content and write, I use the basename as the name to save the file under.
I also replaced your while with a range loop and did all the http requests just using requests:
import requests
from bs4 import BeautifulSoup
from os import path
from urllib.parse import urljoin # python2 -> from urlparse import urljoin
def download_img(image_url, base):
# path.basename(image_url)
# http://imgs.xkcd.com/comics/tree_cropped_(1).jpg -> tree_cropped_(1).jpg -
with open(path.basename(image_url), "wb") as f:
# image_url is a releative path, we have to join to the base
f.write(requests.get(urljoin(base,image_url)).content)
def xkcd_spirder(max_pages):
base = "http://xkcd.com/"
for page in range(1, max_pages + 1):
url = base + str(page)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
# we only want one image
img = soup.select_one("#comic img") # or .find('div',id= 'comic').img
download_img(img["src"], base)
xkcd_spirder(5)
Once you run the code you will see we get the first five comics.

Related

How to download an HTML file completely? [duplicate]

Currently I have a script that can only download the HTML of a given page.
Now I want to download all the files of the web page including HTML, CSS, JS and image files (same as we get with a ctrl-s of any website).
My current code is:
import urllib
url = "https://en.wikipedia.org/wiki/Python_%28programming_language%29"
urllib.urlretrieve(url, "t3.html")
I visited many questions but they are all only downloading the HTML.
The following implementation enables you to get the sub-HTML websites. It can be more developed in order to get the other files you need. I sat the depth variable for you to set the maximum sub_websites that you want to parse to.
import urllib2
from BeautifulSoup import *
from urlparse import urljoin
def crawl(pages, depth=None):
indexed_url = [] # a list for the main and sub-HTML websites in the main website
for i in range(depth):
for page in pages:
if page not in indexed_url:
indexed_url.append(page)
try:
c = urllib2.urlopen(page)
except:
print "Could not open %s" % page
continue
soup = BeautifulSoup(c.read())
links = soup('a') #finding all the sub_links
for link in links:
if 'href' in dict(link.attrs):
url = urljoin(page, link['href'])
if url.find("'") != -1:
continue
url = url.split('#')[0]
if url[0:4] == 'http':
indexed_url.append(url)
pages = indexed_url
return indexed_url
pagelist=["https://en.wikipedia.org/wiki/Python_%28programming_language%29"]
urls = crawl(pagelist, depth=2)
print urls
Python3 version, 2019. May this saves some time to somebody:
#!/usr/bin/env python
import urllib.request as urllib2
from bs4 import *
from urllib.parse import urljoin
def crawl(pages, depth=None):
indexed_url = [] # a list for the main and sub-HTML websites in the main website
for i in range(depth):
for page in pages:
if page not in indexed_url:
indexed_url.append(page)
try:
c = urllib2.urlopen(page)
except:
print( "Could not open %s" % page)
continue
soup = BeautifulSoup(c.read())
links = soup('a') #finding all the sub_links
for link in links:
if 'href' in dict(link.attrs):
url = urljoin(page, link['href'])
if url.find("'") != -1:
continue
url = url.split('#')[0]
if url[0:4] == 'http':
indexed_url.append(url)
pages = indexed_url
return indexed_url
pagelist=["https://en.wikipedia.org/wiki/Python_%28programming_language%29"]
urls = crawl(pagelist, depth=1)
print( urls )
You can easily do that with simple python library pywebcopy.
For Current version: 5.0.1
from pywebcopy import save_webpage
url = 'http://some-site.com/some-page.html'
download_folder = '/path/to/downloads/'
kwargs = {'bypass_robots': True, 'project_name': 'recognisable-name'}
save_webpage(url, download_folder, **kwargs)
You will have html, css, js all at your download_folder. Completely working like original site.
Using Python 3+ Requests and other standard libraries.
The function savePage receives a requests.Response and the pagefilename where to save it.
Saves the pagefilename.html on the current folder
Downloads, javascripts, css and images based on the tags script, link and img and saved on a folder pagefilename_files.
Any exception are printed on sys.stderr, returns a BeautifulSoup object .
Requests session must be a global variable unless someone writes a cleaner code here for us.
You can adapt it to your needs.
import os, sys
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
def soupfindAllnSave(pagefolder, url, soup, tag2find='img', inner='src'):
if not os.path.exists(pagefolder): # create only once
os.mkdir(pagefolder)
for res in soup.findAll(tag2find): # images, css, etc..
try:
filename = os.path.basename(res[inner])
fileurl = urljoin(url, res.get(inner))
# rename to saved file path
# res[inner] # may or may not exist
filepath = os.path.join(pagefolder, filename)
res[inner] = os.path.join(os.path.basename(pagefolder), filename)
if not os.path.isfile(filepath): # was not downloaded
with open(filepath, 'wb') as file:
filebin = session.get(fileurl)
file.write(filebin.content)
except Exception as exc:
print(exc, file=sys.stderr)
return soup
def savePage(response, pagefilename='page'):
url = response.url
soup = BeautifulSoup(response.text)
pagefolder = pagefilename+'_files' # page contents
soup = soupfindAllnSave(pagefolder, url, soup, 'img', inner='src')
soup = soupfindAllnSave(pagefolder, url, soup, 'link', inner='href')
soup = soupfindAllnSave(pagefolder, url, soup, 'script', inner='src')
with open(pagefilename+'.html', 'w') as file:
file.write(soup.prettify())
return soup
Example saving google page and its contents (google_files folder)
session = requests.Session()
#... whatever requests config you need here
response = session.get('https://www.google.com')
savePage(response, 'google')
Try the Python library Scrapy. You can program Scrapy to recursively scan a website by downloading its pages, scanning, following links:
An open source and collaborative framework for extracting the data you need from websites. In a fast, simple, yet extensible way.

why doesn't my web scraper work? Python3 - requests, BeautifulSoup

I have been following this python tutorial for a while, and I made a web scrawler, similar to the one in the video.
Language: Python
import requests
from bs4 import BeautifulSoup
def spider(max_pages):
page = 1
while page <= max_pages:
url = 'https://www.aliexpress.com/category/7/computer-office.html?trafficChannel=main&catName=computer-office&CatId=7&ltype=wholesale&SortType=default&g=n&page=' + str(page)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for link in soup.findAll('a', {'class':'item-title'}):
href = link.get('href')
title = link.string
print(href)
page += 1
spider(1)
And this is the output that the program gives:
PS D:\development> & C:/Users/hirusha/AppData/Local/Programs/Python/Python38/python.exe "d:/development/Python/TheNewBoston/Python/one/web scrawler.py"n/TheNewBoston/Python/one/web scrawler.py"
PS D:\development>
What can I do?
Before this, I had an error, the code was:
soup = BeautifulSoup(plain_text)
i changed this to
soup = BeautifulSoup(plain_text, 'html.parser')
and the error was gone,
the error i got here was:
d:/development/Python/TheNewBoston/Python/one/web scrawler.py:10: GuessedAtParserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("lxml"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
The code that caused this warning is on line 10 of the file d:/development/Python/TheNewBoston/Python/one/web scrawler.py. To get rid of this warning, pass the additional argument 'features="lxml"' to the BeautifulSoup constructor.
soup = BeautifulSoup(plain_text)
Any help is appreciated, Thank You!
There are no results as the class you are targeting is not present until the webpage is rendered, which doesn't happen with requests.
Data is dynamically retrieved from a script tag. You can regex the JavaScript object holding the data and parse with json to get that info.
The error you show was due to a parser not being specified originally; which you rectified.
import re, json, requests
import pandas as pd
r = requests.get('https://www.aliexpress.com/category/7/computer-office.html?trafficChannel=main&catName=computer-office&CatId=7&ltype=wholesale&SortType=default&g=n&page=1')
data = json.loads(re.search(r'window\.runParams = (\{".*?\});', r.text, re.S).group(1))
df = pd.DataFrame([(item['title'], 'https:' + item['productDetailUrl']) for item in data['items']])
print(df)

python web-crawler Guessed at parser warning

I am trying to make a web crawler using python (3.8) I mostly think I'm done but I'm getting this error can any body help me and thank's in advance.
Python code :
import requests
from bs4 import BeautifulSoup
def aliexpress_spider (max_pages):
page = 1
while page <= max_pages:
url = "https://www.aliexpress.com/af/ps4.html?trafficChannel=af&d=y&CatId=0&SearchText=ps4&ltype=affiliate&SortType=default&page=" + str(page)
sourcecode = requests.get(url)
plaintext = sourcecode.text
soup = BeautifulSoup(plaintext)
for link in soup.findAll('a' , {'class' : 'item-title'}):
href = "https://www.aliexpress.com" + link.get("href")
title = link.string
print(href)
print(title)
page += 1
aliexpress_spider(1)
Error massege :
GuessedAtParserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("html.parser"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
The code that caused this warning is on line 11 of the file C:/Users/moham/PycharmProjects/moh/test.py. To get rid of this warning, pass the additional argument 'features="html.parser"' to the BeautifulSoup constructor.
soup = BeautifulSoup(plaintext)
import requests
from bs4 import BeautifulSoup
def aliexpress_spider (max_pages):
page = 1
while page <= max_pages:
url = "https://www.aliexpress.com/af/ps4.html?trafficChannel=af&d=y&CatId=0&SearchText=ps4&ltype=affiliate&SortType=default&page=" + str(page)
sourcecode = requests.get(url)
soup = BeautifulSoup(sourcecode.text ,"html.parser")
for link in soup.findAll('a' , {'class' : 'item-title'}):
href = "https://www.aliexpress.com" + link.get("href")
title = link.string
print(href)
print(title)
print(soup.title)
page += 1
aliexpress_spider(1)

Python script with BS4 not working after I made some change; syntax problem?

I ran the code fine, then I tweaked the code and saved and closed it, tried to run it again and got a syntax error. My stupid self didn't backup the original code and now anything I change doesn't seem to fix it. I checked the source code of the website and that hasn't changed. It's erroring before even checking the website. Any suggestions on what I overlooked?
import requests
import time
import bs4
import sys
sys.stdout = open("links2.txt", "a")
for x in range(0, 100000):
try:
URL = f'https://wesbite.com/{x}'
page = requests.get(URL)
time.sleep(1)
soup = BeautifulSoup(page.content, 'html.parser')
website = "https://v.website.com/"
for links in soup.find('div',id='view').find_all('a'):
parts = links['href'].split("/")
new_link = parts[1].replace(parts[1], website) + '/'.join(parts[2:]) + ".mp4"
print(new_link)
except:
continue
It's reporting a syntax error on the line that reads: URL = f'https://wesbite.com/{x}'
Here is your working code now:
import requests
import time
from bs4 import BeautifulSoup
import sys
sys.stdout = open("links2.txt", "a")
for x in range(0, 100000):
try:
URL = f'https://wesbite.com/{x}'
page = requests.get(URL)
time.sleep(1)
soup = BeautifulSoup(page.content, 'html.parser')
website = "https://v.website.com/"
for links in soup.find('div',id='view').find_all('a'):
parts = links['href'].split("/")
new_link = parts[1].replace(parts[1], website) + '/'.join(parts[2:]) + ".mp4"
print(new_link)
except:
continue
It was:
import bs4
Now:
from bs4 import BeautifulSoup

Python Web Crawler from thenewboston

I recently watched a thenewboston video on writing a web crawler using python. For some reason, I'm getting a SSLError. I tried fixing it with line 6 of code but no luck. Any idea why it's throwing errors? The code is verbatim from thenewboston.
import requests
from bs4 import BeautifulSoup
def creepy_crawly(max_pages):
page = 1
#requests.get('https://www.thenewboston.com/', verify = True)
while page <= max_pages:
url = "https://www.thenewboston.com/trade/search.php?pages=" + str(page)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
for link in soup.findAll('a', {'class' : 'item-name'}):
href = "https://www.thenewboston.com" + link.get('href')
print(href)
page += 1
creepy_crawly(1)
I've done a web crawler using urllib, it can be faster and has no problem accessing https pages, one thing though is that it doesn't validates the server certificate, this make it faster but more dangerous ( vulnerable to mitm attacks).
Bellow there's an usage example of that lib:
link = 'https://www.stackoverflow.com'
html = urllib.urlopen(link).read()
print(html)
3 lines is all you need to grab the HTML from a page, simple isn't it?
More about urllib: https://docs.python.org/2/library/urllib.html
I also recommend you use regex on the HTML to grab other links, an example for that (using re library) would be:
for url in re.findall(r'<a[^>]+href=["\'](.[^"\']+)["\']', html, re.I): # Searches the HTML for other URLs
link = url.split("#", 1)[0] \
if url.startswith("http") \
else '{uri.scheme}://{uri.netloc}'.format(uri=urlparse.urlparse(origLink)) + url.split("#", 1)[0] # Checks if the HTML is valid and format it

Categories

Resources