scrapy : scrape multiple items from 2 levels - python

I'm fairly new to scrapy and I'm looking for a solution for my personal exercise. What I'm trying to do is to crawl IMDB top chart movies to get the ranking, the title, the year, and the plot.
I manage to go through the links and crawl the movie pages but I can't find a way to get the ranking for each movie.
Currently my code looks like this :
import scrapy
from tutorial.items import IMDB_dict # We need this so that Python knows about the item object
class MppaddressesSpider(scrapy.Spider):
name = "mppaddresses" # The name of this spider
# The allowed domain and the URLs where the spider should start crawling:
allowed_domains = ["imdb.com"]
start_urls = ['https://www.imdb.com/chart/top/']
def parse(self, response):
# The main method of the spider. It scrapes the URL(s) specified in the
# 'start_url' argument above. The content of the scraped URL is passed on
# as the 'response' object.
for rank in response.xpath(".//tbody[#class='lister-list']/tr/td[#class='titleColumn']/text()").extract():
rank=" ".join(rank.split())
item = IMDB_dict()
item['rank'] = rank
for url in response.xpath(".//tbody[#class='lister-list']/tr/td[#class='titleColumn']/a/#href").extract():
# This loops through all the URLs found inside an element of class 'mppcell'
# Constructs an absolute URL by combining the response’s URL with a possible relative URL:
full_url = response.urljoin(url)
print("FOOOOOOOOOnd URL: "+full_url)
# The following tells Scrapy to scrape the URL in the 'full_url' variable
# and calls the 'get_details() method below with the content of this
# URL:
#yield {'namyy' : response.xpath(".//tbody[#class='lister-list']/tr/td[#class='titleColumn']/text()").extract().strip("\t\r\n '\""),}
yield scrapy.Request(full_url, callback=self.get_details)
def get_details(self, response):
# This method is called on by the 'parse' method above. It scrapes the URLs
# that have been extracted in the previous step.
#item = OntariomppsItem() # Creating a new Item object
# Store scraped data into that item:
item = IMDB_dict()
item['name'] = response.xpath(".//div[#class='title_bar_wrapper']/div[#class='titleBar']/div[#class='title_wrapper']/h1/text()").extract_first().strip("\t\r\n '\"")
item['phone'] = response.xpath(".//div[#class='titleBar']/div[#class='title_wrapper']/h1/span[#id='titleYear']/a/text()").extract_first().strip("\t\r\n '\"")
item['email'] = response.xpath(".//div[#class='plot_summary ']/div[#class='summary_text']/text()").extract_first().strip("\t\r\n '\"")
# Return that item to the main spider method:
yield item
Besides, my item.py has:
import scrapy
class IMDB_dict(scrapy.Item):
# define the fields for your item here like:
rank = scrapy.Field()
name = scrapy.Field()
phone = scrapy.Field()
email = scrapy.Field()
Main question: How can I get the ranking associated with the title?
Last question (if possible): I can access URLs like I did when URLs are relative (with urljoin), but I can't find a way to access URLs when they are absolute...
Many thanks for your help.
Best,

You need to send rank to your get_details callback using meta:
def parse(self, response):
for movie in response.xpath(".//tbody[#class='lister-list']/tr/td[#class='titleColumn']"):
movie_rank = movie.xpath('./text()').re_first(r'(\d+)')
movie_url = movie.xpath('./a/#href').extract_first()
movie_full_url = response.urljoin(movie_url)
print("FOOOOOOOOOnd URL: " + movie_url)
yield scrapy.Request(movie_full_url, callback=self.get_details, meta={"rank": movie_rank})
def get_details(self, response):
item = IMDB_dict()
item['rank'] = response.meta["rank"]
item['name'] = response.xpath(".//div[#class='title_bar_wrapper']/div[#class='titleBar']/div[#class='title_wrapper']/h1/text()").extract_first().strip("\t\r\n '\"")
item['phone'] = response.xpath(".//div[#class='titleBar']/div[#class='title_wrapper']/h1/span[#id='titleYear']/a/text()").extract_first().strip("\t\r\n '\"")
item['email'] = response.xpath(".//div[#class='plot_summary ']/div[#class='summary_text']/text()").extract_first().strip("\t\r\n '\"")
# Return that item to the main spider method:
yield item
UPDATE
If you check logs you'll find this error
AttributeError: 'NoneType' object has no attribute 'strip'
Sometimes .extract_first() returns None and you can't strip() it. I recommend you to use Scrapy Item Loaders

Related

Trying to add multiple yields into a single json file using Scrapy

I am trying to figure out if my scrapy tool is correctly hitting the product_link for the request callback - 'yield scrapy.Request(product_link, callback=self.parse_new_item)'
product_link should be 'https://www.antaira.com/products/10-100Mbps/LNX-500A'
but I have not been able to confirm if my program is jumping into the next step created so that I can retrieve the correct yield return. Thank you!
# Import the required libraries
import scrapy
# Import the Item class with fields
# mentioned int he items.py file
from ..items import AntairaItem
# Spider class name
class productJumper(scrapy.Spider):
# Name of the spider
name = 'productJumper'
# The domain to be scraped
allowed_domains = ['antaira.com']
# The URLs to be scraped from the domain
start_urls = ['https://www.antaira.com/products/10-100Mbps']
#target_url = ['https://www.antaira.com/products/10-100Mbps/LNX-500A']
# First Step: Find every div with the class 'product-container' and step into the links
def parse(self, response):
#product_link = response.urljoin(rel_product_link)
# creating items dictionary
items = AntairaItem()
rel_product_link = response.css('div.center767')
for url in rel_product_link:
rel_product_link = response.xpath('//div[#class="product-container"]//a/#href').get(),
product_link = response.urljoin('rel_product_link'),
items['rel_product_link'] = rel_product_link,
items['product_link'] = product_link
#yield items
# 2nd Step: Return a list of the all products-links that will be scrapped
#yield {
# take the first relative product link
# 'rel_product_link' : rel_product_link,
# 'product_link' : product_link,
#}
yield scrapy.Request(product_link, callback=self.parse_new_item)
# Final Step: Run through each product and Yield the results
def parse_new_item(self, response):
for product in response.css('main.products'):
name = product.css(('h1.product-name::text').strip(' \t\n\r')).get()
features = product.css('section.features h3 + ul').getall()
overview = product.css('.products .product-overview::text').getall()
main_image = product.css('div.selectors img::attr(src)').get()
rel_links = product.xpath("//script/#src[contains(., '/app/site/hosting/scriptlet.nl')]").getall()
items['name'] = name,
items['features'] = features,
items['overview'] = overview,
items['main_image'] = main_image,
items['rel_links'] = rel_links,
yield items
You have a couple of issues:
scrapy items are essentially dictionaries and are therefore mutable. You need to create a unique item for each and every yield statement.
your second parse callback is referencing a variable items that it doesn't have access too because it was defined in your first parse callback.
In your urljoin method you are using a string literal instead of a variable for rel_product_link
In the example below I fixed those issues and made some additional notes
import scrapy
from ..items import AntairaItem
class ProductJumper(scrapy.Spider): # classes should be TitleCase
name = 'productJumper'
allowed_domains = ['antaira.com']
start_urls = ['https://www.antaira.com/products/10-100Mbps']
def parse(self, response):
# iterate through each of the relative urls
for url in response.xpath('//div[#class="product-container"]//a/#href').getall():
product_link = response.urljoin(url) # use variable
yield scrapy.Request(product_link, callback=self.parse_new_item)
def parse_new_item(self, response):
for product in response.css('main.products'):
items = AntairaItem() # Unique item for each iteration
items['product_link'] = response.url # get the product link from response
name = product.css(('h1.product-name::text').get().strip()
features = product.css('section.features h3 + ul').getall()
overview = product.css('.products .product-overview::text').getall()
main_image = product.css('div.selectors img::attr(src)').get()
rel_links = product.xpath("//script/#src[contains(., '/app/site/hosting/scriptlet.nl')]").getall()
items['name'] = name,
items['features'] = features,
items['overview'] = overview,
items['main_image'] = main_image,
items['rel_links'] = rel_links,
yield items

How to scrape webpage of items, each item has link to new page

I'm creating a web scraper with scrapy and python. The page I'm scraping has each item structured as a card, I'm able to scrape some info from these cards (name, location), but I also want to get info that is reached by clicking on card > new page > click button on new page that opens form > scrape value from the form. How should I structure the parse function, do I need nested loops or separate functions ..?
class StackSpider(Spider):
name = "stack"
allowed_domains = ["example.com"]
start_urls = ["example.com/page"]
def parse(self, response):
for page_url in response.css('a[class ~= search- card]::attr(href)').extract():
page_url = response.urljoin(page_url)
yield scrapy.Request(url=page_url, callback=self.parse)
for vc in response.css('div#vc-profile.container').extract():
item = StackItem()
item['name'] = vc.xpath('//*[#id="vc-profile"]/div/div[2]/div[1]/div[1]/h1/text()').extract()
item['firm'] = vc.expath('//*[#id="vc-profile"]/div/div[2]/div[1]/div[2]/h2/text()[1]').extract()
item['pos'] = vc.expath('//*[#id="vc-profile"]/div/div[2]/div[1]/div[2]/h2/text()[2]').extract()
em = vc.xpath('/*[#id="vc-profile"]/div/div[1]/div[2]/div[2]/div/div[1]/button').extract()
item['email'] = em.xpath('//*[#id="email"]/value').extract()
yield item
the scraper is crawling, but outputting nothing
The best approach is creating an item object on the first page, scrape the needed data and save to the item. Again make a request to the new URL (card > new page > click the button to form) and pass the same item in there. Yielding the output from here will fix the issue.
You should probably split the scraper into 1 'parse' method and 1 'parse_item' method.
Your parse method goes through the page and yields the urls of the items for which you want to get the details. The parse_item method will get back the response from the parse function, and get the details for the specific item.
Difficult to say what it will look like without knowing the website, but it'll probably look more or less like this:
class StackSpider(Spider):
name = "stack"
allowed_domains = ["example.com"]
start_urls = ["example.com/page"]
def parse(self, response):
for page_url in response.css('a[class ~= search- card]::attr(href)').extract():
page_url = response.urljoin(page_url)
yield scrapy.Request(url=page_url, callback=self.parse_item)
def parse_item(self, response)
item = StackItem()
item['name'] = vc.xpath('//*[#id="vc-profile"]/div/div[2]/div[1]/div[1]/h1/text()').extract()
item['firm'] = vc.expath('//*[#id="vc-profile"]/div/div[2]/div[1]/div[2]/h2/text()[1]').extract()
item['pos'] = vc.expath('//*[#id="vc-profile"]/div/div[2]/div[1]/div[2]/h2/text()[2]').extract()
em = vc.xpath('/*[#id="vc-profile"]/div/div[1]/div[2]/div[2]/div/div[1]/button').extract()
item['email'] = em.xpath('//*[#id="email"]/value').extract()
yield item

IMDB Movie Scraping gives blank csv using scrapy

I am getting Blank csv, though its not showing any error in code.
It is unable to crawl through web page.
This is the code which I have written referring youtube:-
import scrapy
from Example.items import MovieItem
class ThirdSpider(scrapy.Spider):
name = "imdbtestspider"
allowed_domains = ["imdb.com"]
start_url = ('http://www.imdb.com/chart/top',)
def parse(self,response):
links = response.xpath('//tbody[#class="lister-list"]/tr/td[#class="titleColumn"]/a/#href').extract()
i = 1
for link in links:
abs_url = response.urljoin(link)
#
url_next = '//*[#id="main"]/div/span/div/div/div[2]/table/tbody/tr['+str(i)+']/td[3]/strong/text()'
rating = response.xpath(url_next).extact()
if (i <= len(link)):
i=i+1
yield scrapy.Request(abs_url, callback = self.parse_indetail, meta = {'rating': rating})
def parse_indetail(self,response):
item = MovieItem()
#
item['title'] = response.xpath('//div[#class="title_wrapper"])/h1/text()').extract[0][:-1]
item['directors'] = response.xpath('//div[#class="credit_summary_items"]/span[#itemprop="director"]/a/span/text()').extract()[0]
item['writers'] = response.xpath('//div[#class="credit_summary_items"]/span[#itemprop="creator"]/a/span/text()').extract()
item['stars'] = response.xpath('//div[#class="credit_summary_items"]/span[#itemprop="actors"]/a/span/text()').extract()
item['popularity'] = response.xpath('//div[#class="titleReviewBarSubItem"]/div/span/text()').extract()[2][21:-8]
return item
This is output I am getting while running executing code with
scrapy crawl imdbtestspider -o example.csv -t csv
2019-01-17 18:44:34 [scrapy.core.engine] INFO: Spider opened
2019-01-17 18:44:34 [scrapy.extensions.logstats] INFO: Crawled 0 pages
(at 0 pag es/min), scraped 0 items (at 0 items/min)
This is another way you might give a try with. I used css selector instead of xpath to make the script less verbose.
import scrapy
class ImbdsdpyderSpider(scrapy.Spider):
name = 'imbdspider'
start_urls = ['http://www.imdb.com/chart/top']
def parse(self, response):
for link in response.css(".titleColumn a[href^='/title/']::attr(href)").extract():
yield scrapy.Request(response.urljoin(link),callback=self.get_info)
def get_info(self, response):
item = {}
title = response.css(".title_wrapper h1::text").extract_first()
item['title'] = ' '.join(title.split()) if title else None
item['directors'] = response.css(".credit_summary_item h4:contains('Director') ~ a::text").extract()
item['writers'] = response.css(".credit_summary_item h4:contains('Writer') ~ a::text").extract()
item['stars'] = response.css(".credit_summary_item h4:contains('Stars') ~ a::text").extract()
popularity = response.css(".titleReviewBarSubItem:contains('Popularity') .subText::text").extract_first()
item['popularity'] = ' '.join(popularity.split()).strip("(") if popularity else None
item['rating'] = response.css(".ratingValue span::text").extract_first()
yield item
I have tested you given xpaths i don't know they are mistakenly wrong or are actually wrong.
e.g;
xpath = //*="main"]/div/span/div/div/div[2]/table/tbody/tr['+str(i)+']/td[3]/strong/text()
#There is not table when you reach at div[2]
//div[#class="title_wrapper"])/h1/text() #here there is and error after `]` ) is bad syntax
Plus your xpaths are not yielding any results.
As to why you are getting the error that says 0/pages crawled, despite not recreating your case, I have to assume that your method of page iteration is not building the page URLs correctly.
I'm having trouble understanding the use for creating the variable array of all the "follow links" and then using len to send them to the parse_indetail() but a couple things to note.
When your using "meta" to pass items from one function to the next, though you have the right idea, you are missing some the instantiation to the function your passing it to (you should also be using a standard naming convention for simplicity)
Should be something like this...
def parse(self,response):
# If you are going to capture an item at the first request, you must instantiate
# your items class
item = MovieItem()
....
# You seem to want to pass ratings to the next function for itimization, so
# you make sure that you have it listed in your items.py file and you set it
item[rating] = response.xpath(PATH).extact() # Why did you ad the url_next? huh?
....
# Standard convention for passing meta using call back is like this, this way
# allows you to pass multiple itemized item gets passed
yield scrapy.Request(abs_url, callback = self.parse_indetail, meta = {'item': item})
def parse_indetail(self,response):
# Then you must initialize the meta again in the function your passing it to
item = response.meta['item']
# Then you can continue your scraping
You should not complicate the page iteration logic. You seem to get how it works but need help fine tuning this aspect. I have recreated you use case and optimized it.
#items.py file
import scrapy
class TestimbdItem(scrapy.Item):
title = scrapy.Field()
directors = scrapy.Field()
writers = scrapy.Field()
stars = scrapy.Field()
popularity = scrapy.Field()
rating = scrapy.Field()
# The spider file
import scrapy
from testimbd.items import TestimbdItem
class ImbdsdpyderSpider(scrapy.Spider):
name = 'imbdsdpyder'
allowed_domains = ['imdb.com']
start_urls = ['http://www.imdb.com/chart/top']
def parse(self, response):
for href in response.css("td.titleColumn a::attr(href)").extract():
yield scrapy.Request(response.urljoin(href),
callback=self.parse_movie)
def parse_movie(self, response):
item = TestimbdItem()
item['title'] = [ x.replace('\xa0', '') for x in response.css(".title_wrapper h1::text").extract()][0]
item['directors'] = response.xpath('//div[#class="credit_summary_item"]/h4[contains(., "Director")]/following-sibling::a/text()').extract()
item['writers'] = response.xpath('//div[#class="credit_summary_item"]/h4[contains(., "Writers")]/following-sibling::a/text()').extract()
item['stars'] = response.xpath('//div[#class="credit_summary_item"]/h4[contains(., "Stars")]/following-sibling::a/text()').extract()
item['popularity'] = response.css(".titleReviewBarSubItem span.subText::text")[2].re('([0-9]+)')
item['rating'] = response.css(".ratingValue span::text").extract_first()
yield item
Notice two things:
Id the parse() function. All I'm doing here is using a for loop through the links, each instance in loop referred to href, and pass the urljoined href to the parser function. Give your use case, this is more than enough. In a situation where you have the next page, it's just creating a variable for the "next" page somehow and callback to parse, it will keep doing that till it cant fint a "next" page.
Secondly, Use xpath only when in the HTML items have the same tagwith different content. This is more of a personal opinion but I tell people that xpath selectors is like scalpel and css selectors is like a butcher knife. You can get damn accurate with scalpel but it takes more time and in many cases may be just easier to go with CSS selector to get the same result.

How to populate a scrapy.Field as a dictionary

I'm building a scraper for www.apkmirror.com using Scrapy (with the SitemapSpider spider). So far the following works:
DEBUG = True
from scrapy.spiders import SitemapSpider
from apkmirror_scraper.items import ApkmirrorScraperItem
class ApkmirrorSitemapSpider(SitemapSpider):
name = 'apkmirror-spider'
sitemap_urls = ['http://www.apkmirror.com/sitemap_index.xml']
sitemap_rules = [(r'.*-android-apk-download/$', 'parse')]
if DEBUG:
custom_settings = {'CLOSESPIDER_PAGECOUNT': 20}
def parse(self, response):
item = ApkmirrorScraperItem()
item['url'] = response.url
item['title'] = response.xpath('//h1[#title]/text()').extract_first()
item['developer'] = response.xpath('//h3[#title]/a/text()').extract_first()
return item
where the ApkMirrorScraperItem is defined in items.py as follows:
class ApkmirrorScraperItem(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field()
developer = scrapy.Field()
The resulting JSON output if I run it from the project directory using the command
scrapy crawl apkmirror-spider -o data.json
is an array of JSON dictionaries with keys url, title, and developer, and the corresponding strings as values. I would like to modify this, however, so that the value of developer is itself a dictionary with a name field, so that I can populate it like this:
item['developer']['name'] = response.xpath('//h3[#title]/a/text()').extract_first()
However, if I try this I get KeyErrors, also if I initialize the developer's Field (which is a dict according to https://doc.scrapy.org/en/latest/topics/items.html#item-fields) as developer = scrapy.Field(name=None). How can I go about this?
Scrapy implements fields internally as dicts, but this does not mean they should be accessed as dicts. When you call item['developer'], what you are really doing is getting the value of the field, not the field itself. So, if the value has not been set yet, this will throw a KeyError.
Considering this, there are two ways you could go about your problem.
First one, just set the developer field value to a dict:
def parse(self, response):
item = ApkmirrorScraperItem()
item['url'] = response.url
item['title'] = response.xpath('//h1[#title]/text()').extract_first()
item['developer'] = {'name': response.xpath('//h3[#title]/a/text()').extract_first()}
return item
Second one, create a new Developer class and set the developer value to be an instance of this class:
# this can go to items.py
class Developer(scrapy.Item):
name = scrapy.Field()
def parse(self, response):
item = ApkmirrorScraperItem()
item['url'] = response.url
item['title'] = response.xpath('//h1[#title]/text()').extract_first()
dev = Developer()
dev['name'] = response.xpath('//h3[#title]/a/text()').extract_first()
item['developer'] = dev
return item
Hope it helps :)

Scrapy request does not callback

I am trying to create a spider that takes data from a csv (two links and a name per row), and scrapes a simple element (price) from each of those links, returning an item for each row, with the item's name being the name in the csv, and two scraped prices (one from each link).
Everything works as expected except the fact that instead of returning the prices, that would be returned from the callback function of each request, I get a request object like this :
< GET https://link.com>..
The callback functions don't get called at all, why is that?
Here is the spider:
f = open('data.csv')
f_reader = csv.reader(f)
f_data = list(f_reader)
parsed_data = []
for product in f_data:
product = product[0].split(';')
parsed_data.append(product)
f.close()
class ProductSpider(scrapy.Spider):
name = 'products'
allowed_domains = ['domain1', 'domain2']
start_urls = ["domain1_but_its_fairly_useless"]
def parse(self, response):
global parsed_data
for product in parsed_data:
item = Product()
item['name'] = product[0]
item['first_price'] = scrapy.Request(product[1], callback=self.parse_first)
item['second_price'] = scrapy.Request(product[2], callback=self.parse_second)
yield item
def parse_first(self, response):
digits = response.css('.price_info .price span').extract()
decimals = response.css('.price_info .price .price_demicals').extract()
yield float(str(digits)+'.'+str(decimals))
def parse_second(self, response):
digits = response.css('.lr-prod-pricebox-price .lr-prod-pricebox-price-primary span[itemprop="price"]').extract()
yield digits
Thanks in advance for your help!
TL;DR: You are yielding an item with Request objects inside of it when you should yield either Item or Request.
Long version:
Parse methods in your spider should either return a scrapy.Item - in which case the chain for that crawl will stop and scrapy will put out an item or a scrapy.Requests in which case scrapy will schedule a request to continue the chain.
Scrapy is asynchronious so to create an item from multiple requests means you need to chain all of those requests while carrying your item to every one of item and fill it up little by little.
Request object has meta attribute where you can store anything you want to (well pretty much) and it will be carried to your callback function. It's very common to use it to chain requests for items that require multiple requests to form a single item.
Your spider should look something like this:
class ProductSpider(scrapy.Spider):
# <...>
def parse(self, response):
for product in parsed_data:
item = Product()
item['name'] = product[0]
# carry next url you want to crawl in meta
# and carry your item in meta
yield Request(product[1], self.parse_first,
meta={"product3": product[2], "item":item})
def parse_first(self, response):
# retrieve your item that you made in parse() func
item = response.meta['item']
# fill it up
digits = response.css('.price_info .price span').extract()
decimals = response.css('.price_info .price .price_demicals').extract()
item['first_price'] = float(str(digits)+'.'+str(decimals))
# retrieve next url from meta
# carry over your item to the next url
yield Request(response.meta['product3'], self.parse_second,
meta={"item":item})
def parse_second(self, response):
# again, retrieve your item
item = response.meta['item']
# fill it up
digits = response.css('.lr-prod-pricebox-price .lr-prod-pricebox-price-primary
span[itemprop="price"]').extract()
item['secodn_price'] = digits
# and finally return the item after 3 requests!
yield item

Categories

Resources