Scrapy Spider only generates one item per loop - python

Since I added another request at the end of the for loop, to test a link, the Spyder only generates Items for the first index of the loop.
def parse_product_page(self, response):
products = response.xpath('//div[#class="content"]//div[#class="tov-rows"]//div[#class="t-row"]')
for x, product in enumerate(products): #ERROR: Just gives an item for the first product
product_loader = VerbraucherweltProdukt()
product_loader['name'] = product.xpath(
'//div[#class="t-center"]//div[#class="t-name"]/text()').extract_first()
request = scrapy.Request(non_ref_link,callback=self.test_link, errback=self.test_link)
request.meta['item'] = product_loader
yield request
It all worked before when i just yielded the product item, but since the item is returned in the callback, i dont know where my problem lays.
The callback is just:
def test_link(self, response):
item = response.meta['item']
item['link_fehlerhaft'] = response.status
yield item
Also the full code, maybe the problem is anywhere else:
http://pastebin.com/tgL38zpD

Here's your culprit:
link = product.xpath('//div[#class="t-right"]//a/#href').extract_first()
You're not grounding your recursive xpath to the product node you have. To fix it simply pre append . to your xpath to indicate current node as root:
link = product.xpath('.//div[#class="t-right"]//a/#href').extract_first()

Related

Using Scrapy to add up numbers across several pages

I am using Scrapy to go from page to page and collect numbers that are on a page. The pages are all similar in the way that I can use the same function to parse them. Simple enough, but I don't need each individual number on the pages, or even each number total from each page. I just need the total sum of all the numbers across all the pages I am visiting. The Scrapy documentation talks about using cb_kwargs to pass arguments, and this is what I have so far.
def parse(self, response):
self.logger.info('A response from %s just arrived!', response.url)
numbers_page = response.css('.numbers + a')
yield from response.follow(numbers_page, callback=self.parse_numbers, cb_kwargs=dict(total_count=0))
def parse_numbers(self, response, total_count):
yield {
total_count = total_count,
}
def extract_with_css(query):
return response.css(query).get(default='').strip()
for number in response.css('div.numbers'):
yield {
'number': extract_with_css('span::text'),
total_count = total_count + int(number.replace(',',''))
}
next_page = response.css('li.next a::attr("href")').get()
if next_page is not None:
request = scrapy.Request(next_page,
callback=self.parse_numbers,
cb_kwargs=dict(total_count=total_count))
yield request
I cut out things irrelevant to the question to make my code more clear. I feel like using a for loop to add up the numbers is okay, but how do I get that total value to the next page (if there is one) and then export it with the rest of the data at the end?
I don't see the need for passing data from one request to another.
The most obvious way I can think of to go about it would be as follows:
You collect the count of the page and yield the result as an item
You create an item pipeline that keeps track of the total count
When the scraping is finished, you have the total count in your item pipeline and you write it to a file, database, ...
Your spider would look something like this:
def parse(self, response):
self.logger.info('A response from %s just arrived!', response.url)
numbers_page = response.css('.numbers + a')
yield from response.follow(numbers_page, callback=self.parse_numbers)
def parse_numbers(self, response):
numbers = response.css('div.numbers')
list_numbers = numbers.css('span::text').getall()
page_sum = sum(int(number) for number in list_numbers if number.strip())
yield {'page_sum': page_sum}
next_page = response.css('li.next a::attr("href")').get()
if next_page:
request = scrapy.Request(next_page,
callback=self.parse_numbers)
yield request
For the item pipeline you can use logic like this:
class TotalCountPipeline(object):
def __init__(self):
# initialize the variable that keeps track of the total count
self.total_count = 0
def process_item(self, item, spider):
# every number yielded from your spider in page_sum will be added to the current total count
page_sum = item['page_sum']
self.total_count += page_sum
return item
def close_spider(self, spider):
# write the final count to a file
output = json.dumps(self.total_count)
with open('test_count_file.jl', 'w') as output_file:
output_file.write(output + '\n')

Scraper collecting the content of first page only

I've written a scraper using python to scrape movie names from yiffy torrents. The webpage has traversed around 12 pages. If i run my crawler using print statement, it gives me all the results from all the pages. However, when I run the same using return then it gives me the content from the first page only and do not go on to the next page to process the rest. As I'm having a hard time understanding the behavior of return statement, if somebody points out where I'm going wrong and give me a workaround I would be very happy. Thanks in advance.
This is what I'm trying with (the full code):
import requests
from urllib.request import urljoin
from lxml.html import fromstring
main_link = "https://www.yify-torrent.org/search/western/"
# film_storage = [] #I tried like this as well (keeping the list storage outside the function)
def get_links(link):
root = fromstring(requests.get(link).text)
film_storage = []
for item in root.cssselect(".mv"):
name = item.cssselect("h3 a")[0].text
film_storage.append(name)
return film_storage
next_page = root.cssselect(".pager a:contains('Next')")[0].attrib['href'] if root.cssselect(".pager a:contains('Next')") else ""
if next_page:
full_link = urljoin(link,next_page)
get_links(full_link)
if __name__ == '__main__':
items = get_links(main_link)
for item in items:
print(item)
But, when i do like below, i get all the results (pasted gist portion only):
def get_links(link):
root = fromstring(requests.get(link).text)
for item in root.cssselect(".mv"):
name = item.cssselect("h3 a")[0].text
print(name) ## using print i get all the results from all the pages
next_page = root.cssselect(".pager a:contains('Next')")[0].attrib['href'] if root.cssselect(".pager a:contains('Next')") else ""
if next_page:
full_link = urljoin(link,next_page)
get_links(full_link)
Your return statement prematurely terminates your get_links() function. Meaning this part
next_page = root.cssselect(".pager a:contains('Next')")[0].attrib['href'] if root.cssselect(".pager a:contains('Next')") else ""
if next_page:
full_link = urljoin(link,next_page)
get_links(full_link)
is never executed.
Quickfix would be to put the return statement at the end of your function, but you have to make film_storage global(defined outside the get_links() function).
Edit:
Just realized, since you will be making your film_storage global, there is no need for the return statement.
Your code in main would just look like this:
get_links(main_link)
for item in film_storage:
print(item)
Your film_storage results list is local to the function get_links() which is called recursively for the next page. After the recursive call (for all the next pages), the initial (entry) function returns results only for the first page.
You'll have to either (1) unwrap the tail recursion into a loop, (2) make results list global; (3) use a callback (like you call print), or the best option (4) is to turn the get_links function into a generator that yields results for all pages.
Generator version:
def get_links(link):
root = fromstring(requests.get(link).text)
for item in root.cssselect(".mv"):
name = item.cssselect("h3 a")[0].text
yield name
next_page = root.cssselect(".pager a:contains('Next')")[0].attrib['href'] if root.cssselect(".pager a:contains('Next')") else ""
if next_page:
full_link = urljoin(link,next_page)
for name in get_links(full_link):
yield name

Scrapy: How to do I prevent a yield request with a conditional item value?

I'm parsing a list of urls, and I want to avoid saving some url resulted item on the condition of some its value. My code is something like this:
start_urls = [www.rootpage.com]
def parse(self,response):
item = CreatedItem()
url_list = response.xpath('somepath').extract()
for url in url_list:
request = scrapy.Request(item['url'],callback=self.parse_article)
request.meta['item'] = item
yield request
def parse_article(self,response):
item = response.meta['item']
item['parameterA'] = response.xpath('somepath').extract()
yield item
Now I want that in case item['parameterA'] follows a condition, there is no need to "yield request" (so that no saving for this url occurs). I tried add a conditional like:
if item['parameterA'] == 0:
continue
else:
yield item
but as expected it does not work, because scrapy continues the loop even before the request is performed.
From what I understand, you should make the decision inside the parse_article method:
def parse_article(self,response):
item = response.meta['item']
item['parameterA'] = response.xpath('somepath').extract_first()
if item['parameterA'] != "0":
yield item
Note the use of the extract_first() and the quotes around 0.

Passing a argument to a callback function [duplicate]

This question already has answers here:
Python Argument Binders
(7 answers)
Closed last month.
def parse(self, response):
for sel in response.xpath('//tbody/tr'):
item = HeroItem()
item['hclass'] = response.request.url.split("/")[8].split('-')[-1]
item['server'] = response.request.url.split('/')[2].split('.')[0]
item['hardcore'] = len(response.request.url.split("/")[8].split('-')) == 3
item['seasonal'] = response.request.url.split("/")[6] == 'season'
item['rank'] = sel.xpath('td[#class="cell-Rank"]/text()').extract()[0].strip()
item['battle_tag'] = sel.xpath('td[#class="cell-BattleTag"]//a/text()').extract()[1].strip()
item['grift'] = sel.xpath('td[#class="cell-RiftLevel"]/text()').extract()[0].strip()
item['time'] = sel.xpath('td[#class="cell-RiftTime"]/text()').extract()[0].strip()
item['date'] = sel.xpath('td[#class="cell-RiftTime"]/text()').extract()[0].strip()
url = 'https://' + item['server'] + '.battle.net/' + sel.xpath('td[#class="cell-BattleTag"]//a/#href').extract()[0].strip()
yield Request(url, callback=self.parse_profile)
def parse_profile(self, response):
sel = Selector(response)
item = HeroItem()
item['weapon'] = sel.xpath('//li[#class="slot-mainHand"]/a[#class="slot-link"]/#href').extract()[0].split('/')[4]
return item
Well, I'm scraping a whole table in the main parse method and I have taken several fields from that table. One of these fields is an url and I want to explore it to get a whole new bunch of fields. How can I pass my already created ITEM object to the callback function so the final item keeps all the fields?
As it is shown in the code above, I'm able to save the fields inside the url (code at the moment) or only the ones in the table (simply write yield item)
but I can't yield only one object with all the fields together.
I have tried this, but obviously, it doesn't work.
yield Request(url, callback=self.parse_profile(item))
def parse_profile(self, response, item):
sel = Selector(response)
item['weapon'] = sel.xpath('//li[#class="slot-mainHand"]/a[#class="slot-link"]/#href').extract()[0].split('/')[4]
return item
This is what you'd use the meta Keyword for.
def parse(self, response):
for sel in response.xpath('//tbody/tr'):
item = HeroItem()
# Item assignment here
url = 'https://' + item['server'] + '.battle.net/' + sel.xpath('td[#class="cell-BattleTag"]//a/#href').extract()[0].strip()
yield Request(url, callback=self.parse_profile, meta={'hero_item': item})
def parse_profile(self, response):
item = response.meta.get('hero_item')
item['weapon'] = response.xpath('//li[#class="slot-mainHand"]/a[#class="slot-link"]/#href').extract()[0].split('/')[4]
yield item
Also note, doing sel = Selector(response) is a waste of resources and differs from what you did earlier, so I changed it. It's automatically mapped in the response as response.selector, which also has the convenience shortcut of response.xpath.
Here's a better way to pass args to callback function:
def parse(self, response):
request = scrapy.Request('http://www.example.com/index.html',
callback=self.parse_page2,
cb_kwargs=dict(main_url=response.url))
request.cb_kwargs['foo'] = 'bar' # add more arguments for the callback
yield request
def parse_page2(self, response, main_url, foo):
yield dict(
main_url=main_url,
other_url=response.url,
foo=foo,
)
source: https://docs.scrapy.org/en/latest/topics/request-response.html#topics-request-response-ref-request-callback-arguments
I had a similar issue with Tkinter's extra argument passing, and found this solution to work (here: http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/extra-args.html), converted to your problem:
def parse(self, response):
item = HeroItem()
[...]
def handler(self = self, response = response, item = item):
""" passing as default argument values """
return self.parse_profile(response, item)
yield Request(url, callback=handler)
#peduDev
Tried your approach but something failed due to an unexpected keyword.
scrapy_req = scrapy.Request(url=url,
callback=self.parseDetailPage,
cb_kwargs=dict(participant_id=nParticipantId))
def parseDetailPage(self, response, participant_id ):
.. Some code here..
yield MyParseResult (
.. some code here ..
participant_id = participant_id
)
Error reported
, cb_kwargs=dict(participant_id=nParticipantId)
TypeError: _init_() got an unexpected keyword argument 'cb_kwargs'
Any idea what caused the unexpected keyword argument other than perhaps an to old scrapy version?
Yep. I verified my own suggestion and after an upgrade it all worked as suspected.
sudo pip install --upgrade scrapy

Scrapy not passing correct item through meta attribute

I'm having a similar problem that the individual below had. I'm trying to pass an item using the meta attribute. I'm seeing the correct number of items outputted, but they are duplicates of a single item. Could someone help? I'm guessing mby the response to the previous individual's post that this should be an obvious fix.
https://github.com/scrapy/scrapy/issues/1257
def parse(self, response):
# some treatment
# a loop
request = scrapy.Request(url=<calculated_url>, callback=parseChapter)
request.meta['item'] = # a dictionary containing some data of the just parsed page
yield request
def parseChapter(self, response):
# some treatment
# a loop
request = scrapy.Request(url=<calculated_url>, callback=parseCategory)
request.meta['item'] = # a dictionary containing some data of the just parsed page
# print request.meta['item'] is good and different in every iteration
yield request
def parseCategory(self, response):
# print response.meta['item'] is not good because it displays the same value many times
# for every new call of parseChapter, meta['item'] received is always the same
# some treatment
Most likely, you modifying the item at each iteration of the for loop instead of creating a new one.
As a consequence all request are being sent with the same value. i.e. the last value of the item variable.
def parseChapter(self, response):
# some treatment
# a loop
request = scrapy.Request(url=<calculated_url>, callback=parseCategory)
request.meta['item'] = my_item_dict.copy()
# print request.meta['item'] is good and different in every iteration
yield request

Categories

Resources