Python web scraping using BeautifulSoup, Loop and skip certain URL value - python

So I am using following code to scrape statues from a site.
from bs4 import BeautifulSoup
import requests
f = open('C:\Python27\projects\FL_final.doc','w')
base_url = "http://www.leg.state.fl.us/statutes/index.cfm?App_mode=Display_Statute&URL=0000-0099/00{chapter:02d}/00{chapter:02d}.html"
for chapter in range (1,9):
url = base_url.format(chapter=chapter)
r = requests.get(url)
soup = BeautifulSoup((r.content),"html.parser")
tableContents = soup.find('div', {'class': 'Chapters' })
for title in tableContents.find_all ('div', {'class': 'Title' }):
f.write (title.text)
for data in tableContents.find_all('div',{'class':'Section' }):
data = data.text.encode("utf-8","ignore")
data = "\n\n" + str(data)+ "\n"
f.write(data)
f.close()
the problem is that certain chapters are missing. For example, there are pages for chapter 1 to chapter 2, then page for chapter 3,4,5 doesn't exist. So when use range (1,9) it gives me errors as it cant pick up chapter 3,4,5 contents, as their (0003/0003, 0004/0004, 0005/0005)url dont exist.
How can I skip missing URLs in my loop and let the program find the next available URL within the range?
here is chapter 1's url: http://www.leg.state.fl.us/statutes/index.cfm?App_mode=Display_Statute&URL=0000-0099/0001/0001.html

You can add a try for the url request and check that tableContents is not none before applying your find_all :
import requests
f = open('C:\Python27\projects\FL_final.doc','w')
base_url = "http://www.leg.state.fl.us/statutes/index.cfm?App_mode=Display_Statute&URL=0000-0099/00{chapter:02d}/00{chapter:02d}.html"
for chapter in range (1,9):
url = base_url.format(chapter=chapter)
try:
r = requests.get(url)
except requests.exceptions.RequestException as e: # This is the correct syntax
print "missing url"
print e
sys.exit(1)
soup = BeautifulSoup((r.content),"html.parser")
tableContents = soup.find('div', {'class': 'Chapters' })
if tableContents is not None:
for title in tableContents.find_all ('div', {'class': 'Title' }):
f.write (title.text)
for data in tableContents.find_all('div',{'class':'Section' }):
data = data.text.encode("utf-8","ignore")
data = "\n\n" + str(data)+ "\n"
print data
f.write(data)

you can check if tableContents is found, e.g.:
tableContents = soup.find('div', {'class': 'Chapters' })
if tableContents:
for title in tableContents.find_all ('div', {'class': 'Title' }):
f.write (title.text)

Related

Trying to scrape other category with beautifulsoup

this is the website i am trying to scrape:
[https://www.jurongpoint.com.sg/store-directory/]
This is my code,as u can see i don't know how to fill both of the {} for the url variable as the 4 category that i want to scrape especially url for service is very different. The comment above url variable shows the link of the 4 category when clicked in. Appreciate any help,thank you!
from bs4 import BeautifulSoup
import requests
def parse():
cate=["Service","Food & Beverage","Fashion & Accessories","Electronics & Technology"]
#cate=Food+%26+Beverage
#cate=Electronics+%26+Technology
#cate=Fashion+%26+Accessories
#cate=Services
url="https://www.jurongpoint.com.sg/store-directory/?level=&cate={}+%26+{}"
for cat in cate:
for page in range(1,14):
print(page)
soup = BeautifulSoup(requests.get(url).text ,"html.parser")
for link in soup.find_all('div',class_='entry-content'):
try:
shops=soup.find_all('div',class_="col-9")
names=soup.find_all('tr',class_="clickable")
for n, k in zip(names, shops):
name = n.find_all('td')[1].text.replace(' ','')
desc = k.text.replace(' ','')
print(name + "\n")
print(desc)
except AttributeError as e:
print(e)
next_button = soup.select_one('.PagedList-skipToNext a')
if next_button:
url = next_button.get('href')
else:
break
parse()
Use parameters of your request and avoid to manage escape characters (like %26)
url = "https://www.jurongpoint.com.sg/store-directory"
for cat in cate:
for page in range(1, 14):
print(f'Scraping category {cat} page {page}')
payload = {
'level': '',
'cate': cat,
'page': page
}
resp = requests.get(url, params=payload)
soup = BeautifulSoup(resp.text, 'html.parser')
# your code here
>>> resp.url
'https://www.jurongpoint.com.sg/store-directory/?level=&cate=Electronics+%26+Technology&page=8'

Beautiful Soup only extracting one tag when can see all the others in the html code

Trying to understand how web scraping works:
import requests
from bs4 import BeautifulSoup as soup
url = "https://webscraper.io/test-sites/e-commerce/allinone/computers/laptops"
result = requests.get(url)
doc = soup(result.text, "lxml")
items = doc.find_all('div', {'class': 'col-sm-4 col-lg-4 col-md-4'})
for item in items:
caption = item.find('div', {'class': 'caption'})
price = item.find('h4', {'class': 'pull-right price'})
print(price.string)
However, when I run this all that returns is the final price from the website ($1799.00). Why does it skip all the other h4 tags and just return the last one?
Any help would be much appreciated!
If you need any more information please let me know
What happens?
You call the print() after you finally iterated over your results, thats why you only get the last one.
How to fix?
Put the print() into your loop
for item in items:
caption = item.find('div', {'class': 'caption'})
price = item.find('h4', {'class': 'pull-right price'})
print(price.string)
Output
$295.99
$299.00
$299.00
$306.99
$321.94
$356.49
$364.46
$372.70
$379.94
$379.95
$391.48
$393.88
$399.00
$399.99
$404.23
$408.98
$409.63
$410.46
$410.66
$416.99
$433.30
$436.29
$436.29
$439.73
$454.62
$454.73
$457.38
$465.95
$468.56
$469.10
$484.23
$485.90
$487.80
$488.64
$488.78
$494.71
$497.17
$498.23
$520.99
$564.98
$577.99
$581.99
$609.99
$679.00
$679.00
$729.00
$739.99
$745.99
$799.00
$809.00
$899.00
$999.00
$1033.99
$1096.02
$1098.42
$1099.00
$1099.00
$1101.83
$1102.66
$1110.14
$1112.91
$1114.55
$1123.87
$1123.87
$1124.20
$1133.82
$1133.91
$1139.54
$1140.62
$1143.40
$1144.20
$1144.40
$1149.00
$1149.00
$1149.73
$1154.04
$1170.10
$1178.19
$1178.99
$1179.00
$1187.88
$1187.98
$1199.00
$1199.00
$1199.73
$1203.41
$1212.16
$1221.58
$1223.99
$1235.49
$1238.37
$1239.20
$1244.99
$1259.00
$1260.13
$1271.06
$1273.11
$1281.99
$1294.74
$1299.00
$1310.39
$1311.99
$1326.83
$1333.00
$1337.28
$1338.37
$1341.22
$1347.78
$1349.23
$1362.24
$1366.32
$1381.13
$1399.00
$1399.00
$1769.00
$1769.00
$1799.00
Example
Instead of just printing the results while iterating, store them structured in a list of dicts and print or save it after the for loop
import requests
from bs4 import BeautifulSoup as soup
url = "https://webscraper.io/test-sites/e-commerce/allinone/computers/laptops"
result = requests.get(url)
doc = soup(result.text, "lxml")
items = doc.find_all('div', {'class': 'col-sm-4 col-lg-4 col-md-4'})
data = []
for item in items:
data.append({
'caption' : item.a['title'],
'price' : item.find('h4', {'class': 'pull-right price'}).string
})
print(data)

Webscraping Analytics Vidhya to get the courses and their names and total count of reviews

I have scraped analytics vidhya website to get their courses,names of courses and total reviews of their courses. There was no problem to get their coures however I am having trouble to scrape/get the names of courses and their total reviews.
here's my code:
import requests
from bs4 import BeautifulSoup
for page in range(1,5):
url = "https://courses.analyticsvidhya.com/collections?category=courses&page="+str(page)
page_request = requests.get(url)
data = page_request.content
soup = BeautifulSoup(data,"html.parser")
for courses in soup.find_all('div', {'class': 'collections__product-cards collections__product-cards___0b9ab'}):
for course_name in soup.find_all('ul', {'class': 'products__list'}):
for names in soup.find_all('li', {'class': 'products__list-item'}):
for divs in soup.find_all('div', {'class':'course-card__body'}):
for revs in soup.find_all('div', {'class': 'course-card__reviews'}):
reviews = soup.find('span', {'class': 'review__stars-count'})
title = soup.find('h3')
review = reviews.text
course_title = title.text
print(course_title + " "+str(review) +" "+ "https://courses.analyticsvidhya.com"+ names.find('a')['href'])
The problem while running this python script is that it keeps giving the same 'course_title'(name of the course) as well as the reviews.
import requests
from bs4 import BeautifulSoup
for page in range(1,6):
url = "https://courses.analyticsvidhya.com/collections?category=courses&page="+str(page)
page_request = requests.get(url)
data = page_request.content
soup = BeautifulSoup(data,"html.parser")
for courses in soup.find_all('div', {'class': 'collections__product-cards collections__product-cards___0b9ab'}):
for names in courses.find_all('li', {'class': 'products__list-item'}):
for divs in names.find_all('div', {'class':'course-card__body'}):
title = divs.find_all('h3')
for revs in divs.find_all('div', {'class': 'course-card__reviews'}):
rev=revs.find_all('span', {'class': 'review__stars-count'})
for i,j in zip(title,rev):
title =i.text
review=j.text
print(title + " "+str(review) +" "+ "https://courses.analyticsvidhya.com"+ names.find('a')['href'])
I have done a few edits on the code, now it is able to scrape course name, review content and the link.

python beautiful soup output into excel

I am trying to get the output from the python script into excel. The script works fine in Python, but when I try and do the import CSV and writerow rule it doesn't work. It says price not defined in writerow and how would I print multiple items. Any help would be appreciated.
import csv
import requests
from bs4 import BeautifulSoup
f = open('dataoutput.csv','w', newline = "")
writer = csv.writer(f)
def trade_spider(max_pages):
page = 1
while page <= max_pages:
url = 'http://www.zoopla.co.uk/for-sale/property/manchester/?identifier=manchester&q=manchester&search_source=home&radius=0&pn=' + str(page)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
for link in soup.findAll('a', {'class': 'listing-results-price text-price'}):
href = "http://www.zoopla.co.uk" + link.get('href')
title = link.string
get_single_item_data(href)
page +=1
def get_single_item_data(item_url):
source_code = requests.get(item_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
for item_name in soup.findAll('div', {'class': 'listing-details-address'}):
address = item_name.string
print(item_name.get_text(strip=True))
for item_fame in soup.findAll('div', {'class' : 'listing-details-price text-price'}):
price = item_fame.string
print(item_fame.get_text(strip=True))
writer.writerow(price)
trade_spider(1)
The object price is not defined anywhere in your script outside of the function get_single_item_data. Outside of that function your code cannot recognize any object with that name. Also, get_single_item_data does not return anything from the BeautifulSoup object. It only prints it. You should rewrite your function to be something like this:
def get_single_item_data(item_url):
source_code = requests.get(item_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
#create list to contain addresses
addresses = []
for item_name in soup.findAll('div', {'class': 'listing-details-address'}):
address = item_name.string
#add each address to the list
addresses.append(address)
print(item_name.get_text(strip=True))
#create list for prices
prices = []
for item_fame in soup.findAll('div', {'class' : 'listing-details-price text-price'}):
price = item_fame.string
#add prices to list
prices.append(price)
print(item_fame.get_text(strip=True))
#alter the code to return the data structure you prefer.
return([addresses,prices])

Python web scraping page loop

Appreciate this is been asked many time on here but I cant seem to get it to work for me.
I've written a scraper which successfully scrapes everything I need from the first page of the site. But, I cant figure out how to get it to loop through the various pages.
The url simply increments like this BLAH/3 + 'page=x'
I haven't been learning to code for very long, so any advice would be appreciated!
import requests
from bs4 import BeautifulSoup
url = 'http://www.URL.org/BLAH1/BLAH2/BLAH3'
soup = BeautifulSoup(r.content, "html.parser")
# String substitution for HTML
for link in soup.find_all("a"):
"<a href='>%s'>%s</a>" %(link.get("href"), link.text)
# Fetch and print general data from title class
general_data = soup.find_all('div', {'class' : 'title'})
for item in general_data:
name = print(item.contents[0].text)
address = print(item.contents[1].text.replace('.',''))
care_type = print(item.contents[2].text)
Update:
r = requests.get('http://www.URL.org/BLAH1/BLAH2/BLAH3')
for page in range(10):
r = requests.get('http://www.URL.org/BLAH1/BLAH2/BLAH3' + 'page=' + page)
soup = BeautifulSoup(r.content, "html.parser")
#print(soup.prettify())
# String substitution for HTML
for link in soup.find_all("a"):
"<a href='>%s'>%s</a>" %(link.get("href"), link.text)
# Fetch and print general data from title class
general_data = soup.find_all('div', {'class' : 'title'})
for item in general_data:
name = print(item.contents[0].text)
address = print(item.contents[1].text.replace('.',''))
care_type = print(item.contents[2].text)
Update 2!:
import requests
from bs4 import BeautifulSoup
url = 'http://www.URL.org/BLAH1/BLAH2/BLAH3&page='
for page in range(10):
r = requests.get(url + str(page))
soup = BeautifulSoup(r.content, "html.parser")
# String substitution for HTML
for link in soup.find_all("a"):
print("<a href='>%s'>%s</a>" % (link.get("href"), link.text))
# Fetch and print general data from title class
general_data = soup.find_all('div', {'class' : 'title'})
for item in general_data:
print(item.contents[0].text)
print(item.contents[1].text.replace('.',''))
print(item.contents[2].text)
To loop pages with page=x you need for loop like this>
import requests
from bs4 import BeautifulSoup
url = 'http://www.housingcare.org/housing-care/results.aspx?ath=1%2c2%2c3%2c6%2c7&stp=1&sm=3&vm=list&rp=10&page='
for page in range(10):
print('---', page, '---')
r = requests.get(url + str(page))
soup = BeautifulSoup(r.content, "html.parser")
# String substitution for HTML
for link in soup.find_all("a"):
print("<a href='>%s'>%s</a>" % (link.get("href"), link.text))
# Fetch and print general data from title class
general_data = soup.find_all('div', {'class' : 'title'})
for item in general_data:
print(item.contents[0].text)
print(item.contents[1].text.replace('.',''))
print(item.contents[2].text)
Every page can be different and better solution needs more inforamtion about page. Sometimes you can get link to last page and then you can use this information instead 10 in range(10)
Or you can use while True to loop and break to leave loop if there is no link to next page. But first you have to show this page (url to real page) in question.
EDIT: example how to get link to next page and then you get all pages - not only 10 pages as in previous version.
import requests
from bs4 import BeautifulSoup
# link to first page - without `page=`
url = 'http://www.housingcare.org/housing-care/results.aspx?ath=1%2c2%2c3%2c6%2c7&stp=1&sm=3&vm=list&rp=10'
# only for information, not used in url
page = 0
while True:
print('---', page, '---')
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
# String substitution for HTML
for link in soup.find_all("a"):
print("<a href='>%s'>%s</a>" % (link.get("href"), link.text))
# Fetch and print general data from title class
general_data = soup.find_all('div', {'class' : 'title'})
for item in general_data:
print(item.contents[0].text)
print(item.contents[1].text.replace('.',''))
print(item.contents[2].text)
# link to next page
next_page = soup.find('a', {'class': 'next'})
if next_page:
url = next_page.get('href')
page += 1
else:
break # exit `while True`

Categories

Resources