Class crawler written in python throws attribute error - python

After writing some code in python, I've got stuck in deep trouble. I'm a newbie in writing code following the OOP design in python. The xpaths I've used in my code are flawless. I'm getting lost when it comes to run the "passing_links" method in my "info_grabber" class through the instance of "page_crawler" class. Every time I run my code I get an error "'page_crawler' object has no attribute 'passing_links'". Perhaps the way I've written my class-crawler is not how it should be. However, as I've spent few hours on it so I suppose I might get any suggestion as to which lines I should rectify to make it work. Thanks in advance for taking a look into it:
from lxml import html
import requests
class page_crawler(object):
main_link = "https://www.yellowpages.com/search?search_terms=pizza&geo_location_terms=San%20Francisco%2C%20CA"
base_link = "https://www.yellowpages.com"
def __init__(self):
self.links = [self.main_link]
def crawler(self):
for link in self.links:
self.get_link(link)
def get_link(self, link):
print("Running page "+ link)
page = requests.get(link)
tree = html.fromstring(page.text)
item_links = tree.xpath('//h2[#class="n"]/a[#class="business-name"][not(#itemprop="name")]/#href')
for item_link in item_links:
return self.base_link + item_link
links = tree.xpath('//div[#class="pagination"]//li/a/#href')
for url in links:
if not self.base_link + url in self.links:
self.links += [self.base_link + url]
class Info_grabber(page_crawler):
def __init__(self, plinks):
page_crawler.__init__(self)
self.plinks = [plinks]
def passing_links(self):
for nlink in self.plinks:
print(nlink)
self.crawling_deep(nlink)
def crawling_deep(self, uurl):
page = requests.get(uurl)
tree = html.fromstring(page.text)
name = tree.findtext('.//div[#class="sales-info"]/h1')
phone = tree.findtext('.//p[#class="phone"]')
try:
email = tree.xpath('//div[#class="business-card-footer"]/a[#class="email-business"]/#href')[0]
except IndexError:
email=""
print(name, phone, email)
if __name__ == '__main__':
crawl = Info_grabber(page_crawler)
crawl.crawler()
crawl.passing_links()
Now upon execution I get a new error "raise MissingSchema(error)" when it hits the line "self.crawling_deep(nlink)"

I'm not sure i understand what you're trying to do in page_crawler.get_link, but i think you should have a different method for collecting "pagination" links.
I renamed Info_grabber.plinks to Info_grabber.links so that the page_crawler.crawler can access them, and managed to extract info from several pages, however the code is far from ideal.
class page_crawler(object):
main_link = "https://www.yellowpages.com/search?search_terms=pizza&geo_location_terms=San%20Francisco%2C%20CA"
base_link = "https://www.yellowpages.com"
def __init__(self):
self.links = []
self.pages = []
def crawler(self):
for link in self.links:
self.get_link(link)
def get_link(self, link):
print("Running page "+ link)
page = requests.get(link)
tree = html.fromstring(page.text)
item_links = tree.xpath('//h2[#class="n"]/a[#class="business-name"][not(#itemprop="name")]/#href')
for item_link in item_links:
if not self.base_link + item_link in self.links:
self.links += [self.base_link + item_link]
def get_pages(self, link):
page = requests.get(link)
tree = html.fromstring(page.text)
links = tree.xpath('//div[#class="pagination"]//li/a/#href')
for url in links:
if not self.base_link + url in self.pages:
self.pages += [self.base_link + url]
class Info_grabber(page_crawler):
def __init__(self, plinks):
page_crawler.__init__(self)
self.links += [plinks]
def passing_links(self):
for nlink in self.links:
print(nlink)
self.crawling_deep(nlink)
def crawling_deep(self, uurl):
page = requests.get(uurl)
tree = html.fromstring(page.text)
name = tree.findtext('.//div[#class="sales-info"]/h1')
phone = tree.findtext('.//p[#class="phone"]')
try:
email = tree.xpath('//div[#class="business-card-footer"]/a[#class="email-business"]/#href')[0]
except IndexError:
email=""
print(name, phone, email)
if __name__ == '__main__':
url = page_crawler.main_link
crawl = Info_grabber(url)
crawl.crawler()
crawl.passing_links()
You'll notice that i added a pages property and a get_pages method in page_crawler, i'll leave the implementation part to you.
You might need to add more methods to page_crawler later on, as they could be of use if you develop more child classes. Finally consider looking into composition as it is also a strong OOP feature.

Your crawl is an instance of the page crawler class, but not the InfoGrabber class, which is the class that has the method passing_links. I think what you want to do is make crawl an instance of InfoGrabber instead.
Then I believe before doing self.crawling_deep you must do:
if n_link:
page = requests.get(n_link).text
tel = re.findall(r'\d{10}', page)[0] if re.findall(r'\d{10}', page) else ""
print(tel)

Related

How can I get data from a website using BeautifulSoup and requests?

I am a beginner in web scraping, and I need help with this problem.
The website, allrecipes.com, is a website where you can find recipes based on a search, which in this case is 'pie':
link to the html file:
'view-source:https://www.allrecipes.com/search/results/?wt=pie&sort=re'
(right click-> view page source)
I want to create a program that takes a input, searches it up on allrecipes, and returns a list with tuples of the first five recipes with data such as the time that takes to make, serving yield, ingrediants, and more.
This is my program so far:
import requests
from bs4 import BeautifulSoup
def searchdata():
inp=input('what recipe would you like to search')
url ='http://www.allrecipes.com/search/results/?wt='+str(inp)+'&sort=re'
r=requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
links=[]
#fill in code for finding top 3 or five links
for i in range(3)
a = requests.get(links[i])
soupa = BeautifulSoup(a.text, 'html.parser')
#fill in code to find name, ingrediants, time, and serving size with data from soupa
names=[]
time=[]
servings=[]
ratings=[]
ingrediants=[]
searchdata()
Yes, i know, my code is very messy but What should I fill in in the two code fill-in areas?
Thanks
After searching for the recipe you have to get the links of each recipe and then request again for each of those links, because the information you're looking for is not available on the search page. That would not look clean without OOP so here's the class I wrote that does what you want.
import requests
from time import sleep
from bs4 import BeautifulSoup
class Scraper:
links = []
names = []
def get_url(self, url):
url = requests.get(url)
self.soup = BeautifulSoup(url.content, 'html.parser')
def print_info(self, name):
self.get_url(f'https://www.allrecipes.com/search/results/?wt={name}&sort=re')
if self.soup.find('span', class_='subtext').text.strip()[0] == '0':
print(f'No recipes found for {name}')
return
results = self.soup.find('section', id='fixedGridSection')
articles = results.find_all('article')
texts = []
for article in articles:
txt = article.find('h3', class_='fixed-recipe-card__h3')
if txt:
if len(texts) < 5:
texts.append(txt)
else:
break
self.links = [txt.a['href'] for txt in texts]
self.names = [txt.a.span.text for txt in texts]
self.get_data()
def get_data(self):
for i, link in enumerate(self.links):
self.get_url(link)
print('-' * 4 + self.names[i] + '-' * 4)
info_names = [div.text.strip() for div in self.soup.find_all(
'div', class_='recipe-meta-item-header')]
ingredient_spans = self.soup.find_all('span', class_='ingredients-item-name')
ingredients = [span.text.strip() for span in ingredient_spans]
for i, div in enumerate(self.soup.find_all('div', class_='recipe-meta-item-body')):
print(info_names[i].capitalize(), div.text.strip())
print()
print('Ingredients'.center(len(ingredients[0]), ' '))
print('\n'.join(ingredients))
print()
print('*' * 50, end='\n\n')
chrome = Scraper()
chrome.print_info(input('What recipe would you like to search: '))

returning a scraped variable from a function in python

I want to create a function that returns a varibable I can write to a csv.
If I write:
from makesoup import make_soup
def get_links(soupbowl):
linkname=""
for boot in soupbowl.findAll('tbody'):
for record in boot.findAll('tr', {"row0", "row1"}):
for link in record.find_all('a'):
if link.has_attr('href'):
linkname = linkname+"\n" + (link.attrs['href'])[1:]
print(linkname)
soup = make_soup("https://www.footballdb.com/teams/index.html")
pyt = get_links(soup)
print(pyt)
It prints what I want(all links on page) in the function and None with print(pyt)
Instead of print(linkname) in the function, i want to return(linkname).
But when I do I only print the first link on the page. Is there a way to pass all the links to variable pyt which is outside of the function?
Thank You in advance
Try the following, to get all the links in one go:
from makesoup import make_soup
def get_links(soupbowl):
links_found = []
linkname=""
for boot in soupbowl.findAll('tbody'):
for record in boot.findAll('tr', {"row0", "row1"}):
for link in record.find_all('a'):
if link.has_attr('href'):
linkname = linkname+"\n" + (link.attrs['href'])[1:]
links_found.append(linkname)
return links_found
soup = make_soup("https://www.footballdb.com/teams/index.html")
pyt = get_links(soup)
print(pyt)
Or use yield, to return them one by one - while you process the output for something else:
from makesoup import make_soup
def get_links(soupbowl):
linkname=""
for boot in soupbowl.findAll('tbody'):
for record in boot.findAll('tr', {"row0", "row1"}):
for link in record.find_all('a'):
if link.has_attr('href'):
linkname = linkname+"\n" + (link.attrs['href'])[1:]
yield linkname
soup = make_soup("https://www.footballdb.com/teams/index.html")
pyt = get_links(soup)
for link in pyt:
do_something()
from makesoup import make_soup
def get_links(soupbowl):
links = []
for boot in soupbowl.findAll('tbody'):
for record in boot.findAll('tr', {"row0", "row1"}):
for link in record.find_all('a'):
if link.has_attr('href'):
linkname = linkname+"\n" + (link.attrs['href'])[1:]
links.append(linkname)
return links
soup = make_soup("https://www.footballdb.com/teams/index.html")
pyt = get_links(soup)
print(pyt)

Scrapy SitemapSpider not working

I'm trying to crawl the website of a prominent UK retailer and get an attributeError as follows:
nl_env/lib/python3.6/site-packages/scrapy/spiders/sitemap.py", line 52, in _parse_sitemap
for r, c in self._cbs:
AttributeError: 'NlSMCrawlerSpider' object has no attribute '_cbs'
It's probably me not fully conceiving how a SitemapSpider operates - see my code below:
class NlSMCrawlerSpider(SitemapSpider):
name = 'nl_smcrawler'
allowed_domains = ['newlook.com']
sitemap_urls = ['http://www.newlook.com/uk/sitemap/maps/sitemap_uk_product_en_1.xml']
sitemap_follow = ['/uk/womens/clothing/']
# sitemap_rules = [
# ('/uk/womens/clothing/', 'parse_product'),
# ]
def __init__(self):
self.driver = webdriver.Safari()
self.driver.set_window_size(800,600)
time.sleep(2)
def parse_product(self, response):
driver = self.driver
driver.get(response.url)
time.sleep(1)
# Collect products
itemDetails = driver.find_elements_by_class_name('product-details-page content')
# Pull features
desc = itemDetails[0].find_element_by_class_name('product-description__name').text
href = driver.current_url
# Generate a product identifier
identifier = href.split('/p/')[1].split('?comp')[0]
identifier = int(identifier)
# datetime
dt = date.today()
dt = dt.isoformat()
# Price Symbol removal and integer conversion
try:
priceString = itemDetails[0].find_element_by_class_name('price product-description__price').text
except:
priceString = itemDetails[0].find_element_by_class_name('price--previous-price product-description__price--previous-price ng-scope').text
priceInt = priceString.split('£')[1]
originalPrice = float(priceInt)
# discountedPrice Logic
try:
discountedPriceString = itemDetails[0].find_element_by_class_name('price price--marked-down product-description__price').text
discountedPriceInt = discountedPriceString.split('£')[1]
discountedPrice = float(discountedPriceInt)
except:
discountedPrice = 'N/A'
# NlScrapeItem
item = NlScrapeItem()
# Append product to NlScrapeItem
item['identifier'] = identifier
item['href'] = href
item['description'] = desc
item['originalPrice'] = originalPrice
item['discountedPrice'] = discountedPrice
item['firstSighted'] = dt
item['lastSighted'] = dt
yield item
Also, don't hesitate to ask for any further details, see the link to the sitemap and a link to the actual file within the Scrapy package throwing off the error (link - github). Your help would be sincerely appreciated.
Edit: One Thought
looking at the 2nd link (from the Scrapy package), I can see _cbs is initialised in the def __init__(self, *a, **kw): function - is the fact that I have my own init logic throwing it off?
Two issue are there in your scraper. One is the __init__ method
def __init__(self):
self.driver = webdriver.Safari()
self.driver.set_window_size(800, 600)
time.sleep(2)
Now you have defined a new __init__ and overridden the base class __init__. Which is not called by your init and hence the _cbs is not initialized. You can easily fix this by changing your init method as below
def __init__(self, *a, **kw):
super(NlSMCrawlerSpider, self).__init__(*a, **kw)
self.driver = webdriver.Safari()
self.driver.set_window_size(800, 600)
time.sleep(2)
Next the SitemapScraper will always send response to the parse method. And you have not defined the parse method at all. So I added a simple one to just print the urls
def parse(self, response):
print(response.url)

Web Crawler not working with Python

I'm having issues with a simple web crawler, when I run the following script, it is not iterating through the sites and it does not give me any results.
This is what I get:
1 Visiting: https://www.mongodb.com/
Word never found
Process finished with exit code 0
Any tips as why this is not working correctly? I'm using the following example (http://www.netinstructions.com/how-to-make-a-web-crawler-in-under-50-lines-of-python-code/)
Here is the code:
from html.parser import HTMLParser
from urllib.request import urlopen
from urllib import parse
class LinkParser(HTMLParser):
# This is a function that HTMLParser normally has
# but we are adding some functionality to it
def handle_starttag(self, tag, attrs):
""" We are looking for the begining of a link.
Links normally look
like """
if tag == 'a':
for (key,value) in attrs:
if key == 'href':
# We are grabbing the new URL. We are also adding the
# base URL to it. For example:
# www.netinstructions.com is the base and
# somepage.html is the new URL (a relative URL)
#
# We combine a relative URL with the base URL to create
# an absolute URL like:
# www.netinstructions.com/somepage.html
newUrl = parse.urljoin(self.baseUrl, value)
# And add it to our colection of links:
self.links = self.links + [newUrl]
def getLinks(self, url):
self.links = []
# Remember the base URL which will be important when creating
# absolute URLs
self.baseUrl = url
# Use the urlopen function from the standard Python 3 library
response = urlopen(url)
# Make sure that we are looking at HTML and not other things that
# are floating around on the internet (such as
# JavaScript files, CSS, or .PDFs for example)
if response.getheader('Content-Type') == 'text/html':
htmlBytes = response.read()
# Note that feed() handles Strings well, but not bytes
# (A change from Python 2.x to Python 3.x)
htmlString = htmlBytes.decode("utf-8")
self.feed(htmlString)
return htmlString, self.links
else:
return "", []
# And finally here is our spider. It takes in an URL, a word to find,
# and the number of pages to search through before giving up
def spider(url, word, maxPages):
pagesToVisit = [url]
numberVisited = 0
foundWord = False
# The main loop. Create a LinkParser and get all the links on the page.
# Also search the page for the word or string
# In our getLinks function we return the web page
# (this is useful for searching for the word)
# and we return a set of links from that web page
# (this is useful for where to go next)
while numberVisited < maxPages and pagesToVisit != [] and not foundWord:
numberVisited = numberVisited +1
# Start from the beginning of our collection of pages to visit:
url = pagesToVisit[0]
pagesToVisit = pagesToVisit[1:]
try:
print(numberVisited, "Visiting:", url)
parser = LinkParser()
data, links = parser.getLinks(url)
if data.find(word)>-1:
foundWord = True
# Add the pages that we visited to the end of our collection
# of pages to visit:
pagesToVisit = pagesToVisit + links
print(" **Success!**")
except:
print(" **Failed!**")
if foundWord:
print("The word", word, "was found at", url)
else:
print("Word never found")
if __name__ == "__main__":
spider("https://www.mongodb.com/", "MongoDB" ,400)
First, edit the content-type checker line to:
if response.getheader('Content-Type') == 'text/html; charset=utf-8':
as suggested by #glibdud.
If you would like your program to check all links until maxPages is reached or pagesTovisit = [], simply remove the and condition for found word on the line:
while numberVisited < maxPages and pagesToVisit != [] and not foundWord:
to:
while numberVisited < maxPages and pagesToVisit != []:

Gevent link crawler

Here i have written the code using python and beautiful soup to parse all the links on that page into a repository of links. Next, it fetches the contents of any of the url from the repository just created, parses the links from this new content into the repository and continues this process for all links in the repository until stopped or after a given number of links are fetched.
But this code is very slow. How can i improve it by using asynchronous programming using gevents in python ?
Code
class Crawler(object):
def __init__(self):
self.soup = None # Beautiful Soup object
self.current_page = "http://www.python.org/" # Current page's address
self.links = set() # Queue with every links fetched
self.visited_links = set()
self.counter = 0 # Simple counter for debug purpose
def open(self):
# Open url
print self.counter , ":", self.current_page
res = urllib2.urlopen(self.current_page)
html_code = res.read()
self.visited_links.add(self.current_page)
# Fetch every links
self.soup = BeautifulSoup.BeautifulSoup(html_code)
page_links = []
try :
page_links = itertools.ifilter( # Only deal with absolute links
lambda href: 'http://' in href,
( a.get('href') for a in self.soup.findAll('a') ) )
except Exception as e: # Magnificent exception handling
print 'Error: ',e
pass
# Update links
self.links = self.links.union( set(page_links) )
# Choose a random url from non-visited set
self.current_page = random.sample( self.links.difference(self.visited_links),1)[0]
self.counter+=1
def run(self):
# Crawl 3 webpages (or stop if all url has been fetched)
while len(self.visited_links) < 3 or (self.visited_links == self.links):
self.open()
for link in self.links:
print link
if __name__ == '__main__':
C = Crawler()
C.run()
Update 1
import gevent.monkey; gevent.monkey.patch_thread()
from bs4 import BeautifulSoup
import urllib2
import itertools
import random
import urlparse
import sys
import gevent.monkey; gevent.monkey.patch_all(thread=False)
class Crawler(object):
def __init__(self):
self.soup = None # Beautiful Soup object
self.current_page = "http://www.python.org/" # Current page's address
self.links = set() # Queue with every links fetched
self.visited_links = set()
self.counter = 0 # Simple counter for debug purpose
def open(self):
# Open url
print self.counter , ":", self.current_page
res = urllib2.urlopen(self.current_page)
html_code = res.read()
self.visited_links.add(self.current_page)
# Fetch every links
self.soup = BeautifulSoup(html_code)
page_links = []
try :
for link in [h.get('href') for h in self.soup.find_all('a')]:
print "Found link: '" + link + "'"
if link.startswith('http'):
print 'entered in if link: ',link
page_links.append(link)
print "Adding link" + link + "\n"
elif link.startswith('/'):
print 'entered in elif link: ',link
parts = urlparse.urlparse(self.current_page)
page_links.append(parts.scheme + '://' + parts.netloc + link)
print "Adding link " + parts.scheme + '://' + parts.netloc + link + "\n"
else:
print 'entered in else link: ',link
page_links.append(self.current_page+link)
print "Adding link " + self.current_page+link + "\n"
except Exception, ex: # Magnificent exception handling
print ex
# Update links
self.links = self.links.union( set(page_links) )
# Choose a random url from non-visited set
self.current_page = random.sample( self.links.difference(self.visited_links),1)[0]
self.counter+=1
def run(self):
# Crawl 3 webpages (or stop if all url has been fetched)
crawling_greenlets = []
for i in range(3):
crawling_greenlets.append(gevent.spawn(self.open))
gevent.joinall(crawling_greenlets)
#while len(self.visited_links) < 4 or (self.visited_links == self.links):
# self.open()
for link in self.links:
print link
if __name__ == '__main__':
C = Crawler()
C.run()
import gevent and make sure monkey-patching is done to make standard library calls non-blocking and aware of gevent:
import gevent
from gevent import monkey; monkey.patch_all()
(you can selectively decide what has to be monkey-patched, but let's say it is not
your problem at the moment)
In your run, make your open function to be called inside a greenlet. run can
return the greenlet object, so you can wait for it whenever you need to get the
results using gevent.joinall for example. Something like this:
def run(self):
return gevent.spawn(self.open)
c1 = Crawler()
c2 = Crawler()
c3 = Crawler()
crawling_tasks = [c.run() for c in (c1,c2,c3)]
gevent.joinall(crawling_tasks)
print [c.links for c in (c1, c2, c3)]

Categories

Resources