Python Scrapy, How to get second image on the page with scrapy? - python

I only want to extract exact one image on every page that scrapy looking for. For example I want to extract http://eshop.erhanteknik.com.tr/photo/foto_w720_604e44853371a920a52b0a31a3548b8b.jpg from http://eshop.erhanteknik.com.tr/tos_svitavy/tos_svitavy/uc_ayakli_aynalar_t0803?DS7641935 page which scrapy looks first. With this code I am currently get whole images with .getall command but I cannot figure how can get specific image.
from scrapy.http import Request
class BooksSpider(Spider):
name = 'books'
allowed_domains = ['eshop.erhanteknik.com.tr']
start_urls = ['http://eshop.erhanteknik.com.tr/urunlerimiz?categoryId=1']
def parse(self, response):
books = response.xpath('//h3/a/#href').extract()
for book in books:
absolute_url = response.urljoin(book)
yield Request(absolute_url, callback=self.parse_book)
# process next page
next_page_url = response.xpath('//a[#rel="next"]/#href').extract_first()
absolute_next_page_url = response.urljoin(next_page_url)
yield Request(absolute_next_page_url)
def parse_book(self, response):
title = response.css('h1::text').extract_first()
image_url = response.xpath('//img/#src').getall()
yield {
'title': title,
'image_url': image_url,
}
pass

You need to target the src of the images under the slide class.
image_url = response.css('.slide img::attr(src)').extract_first()
extract_first() will grab the first item of the list.
If you use extract(), you will get a list.

Related

Extracting next page and setting a break

I'm trying to extract webpage data and wished to take the next few pages also but up to a limit, which I can alter. However, I've tested to see if I can at least extract the next few web-pages using Scrapy (As I'm trying to figure this out in Scrapy to learn it), but It only returns the items within the first page.
How do I extract the next pages while setting a limit i.e. 5 pages
For example, here's what I have tried:
import scrapy
from scrapy.item import Field
from itemloaders.processors import TakeFirst
from scrapy.crawler import CrawlerProcess
class StatisticsItem(scrapy.Item):
ebay_div = Field(output_processor=TakeFirst())
url = Field(output_processor=TakeFirst())
class StatisticsSpider(scrapy.Spider):
name = 'ebay'
start_urls = ['https://www.ebay.com/b/Collectible-Card-Games-Accessories/2536/bn_1852210?rt=nc&LH_BIN=1' +
'&LH_PrefLoc=2&mag=1&_sop=16']
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(
url
)
def parse(self, response):
all_cards = response.xpath('//div[#class="s-item__wrapper clearfix"]')
for card in all_cards:
name = card.xpath('.//h3/text()').get() #get name of product
price = card.xpath('.//span[#class="s-item__price"]//text()').get() #price
product_url = card.xpath('.//a[#class="s-item__link"]//#href').get() #link to product
# now do whatever you want, append to dictionary, yield as item...
summary_data = {
"Name": name,
"Price": price,
"URL": product_url
}
data = {'summary_data': summary_data}
yield scrapy.Request(product_url, meta=data, callback=self.parse_product_details)
# get the next page
next_page_url = card.xpath('.//a[#class="pagination__next icon-link"]/#href').extract_first()
# The last page do not have a valid url and ends with '#'
if next_page_url == None or str(next_page_url).endswith("#"):
self.log("eBay products collected successfully !!!")
else:
print('\n' + '-' * 30)
print('Next page: {}'.format(next_page_url))
yield scrapy.Request(next_page_url, callback=self.parse)
def parse_product_details(self, response):
# Get the summary data
data = response.meta['summary_data']
data['location'] = response.xpath('//span[#itemprop="availableAtOrFrom"]/text()').extract_first()
yield data
process = CrawlerProcess(
settings={
'FEED_URI': 'collectible_cards.json',
'FEED_FORMAT': 'jsonlines'
}
)
process.crawl(StatisticsSpider)
process.start()
You can try like this first make urls then start start_requests
start_urls = ["https://www.ebay.com/b/Collectible-Card-Games-Accessories/2536/bn_1852210?LH_BIN=1&LH_PrefLoc=2&mag=1&rt=nc&_pgn={}&_sop=16".format(i) for i in range(1,5)]

How Do I Set Up Pagination correctly?

I'm currently working on a Scrapy code that will extract 3 types of data for each product. I called the data "title, price, and upc". For each product I have made my program able to scrape title and price correctly but i am having trouble scraping for upc since the upc is on another page.
What I want my program to do for each product, is to extract the title and price on the mainpage, then go inside another page to extract UPC code. Once it gets the upc code, I want the program to go to the next product on main page and repeat the same method for the remaining products.
Here is my code.
import scrapy
from scrapy.utils.response import open_in_browser
from ..items import QuotetutorialItem
data={hidden}
headers={hidden}
class BrickseekSpider(scrapy.Spider):
name = 'brickseek1'
allowed_domains = ['brickseek.com']
def start_requests(self):
dont_filter = True
yield scrapy.http.FormRequest(url='https://brickseek.com/login/', headers=headers, formdata=data,
callback=self.parse)
def parse(self, response):
items = QuotetutorialItem()
products = response.css('div.item-list__tile')
for product in products:
title = product.css('.item-list__title span::text').extract()
price = product.css('.item-list__price-column--highlighted .price-formatted__dollars::text').extract()
#another_page = response.css('div.item-list__tile a::attr(href)').get()
#if another_page:
#upc = product.css('div.item-overview__meta-item::text').extract()[6]
#yield response.follow(another_page, callback=self.parse)
items['title'] = title
items['price'] = price
#items['upc'] = upc
yield items
All you need to do is to put your item (after filling title,price) in meta when you visit the next page (assuming you css selectors are correct)
def parse(self, response):
items = QuotetutorialItem()
products = response.css('div.item-list__tile')
for product in products:
item = QuotetutorialItem()
item['title'] = product.css('.item-list__title span::text').extract()
item['price'] = product.css('.item-list__price-column--highlighted .price-formatted__dollars::text').extract()
another_page = response.css('div.item-list__tile a::attr(href)').get()
if another_page:
yield response.follow(another_page, callback=self.parse_upc,meta={'item':item})
else:
yield item
def parse_upc(self,response):
item=response.meta['item']
item['upc'] = product.css('div.item-overview__meta-item::text').extract()[6]
yield item

Web Scraping all Urls from a website with Scrapy and Python

I am writing a web scraper to fetch a group of links
(located at tree.xpath('//div[#class="work_area_content"]/a/#href')
from a website and return the Title and Url of all the leafs sectioned by the leafs parent. I have two scrapers: one in python and one in Scrapy for Python. What is the purpose of callbacks in the Scrapy Request method? Should the information be in a multidimensional or single dimension list ( I believe multi-dimensional but it enhances complication)? Which of the below code is better? If the scraper code is better, how do I migrate the python code to the Scrapy code?
From what I understand from callbacks is that it passes a function's arguments to another function; however, if the callback refers to itself, the data gets overwritten and therefore lost, and you're unable to go back to the root data. Is this correct?
python:
url_storage = [ [ [ [] ] ] ]
page = requests.get('http://1.1.1.1:1234/TestSuites')
tree = html.fromstring(page.content)
urls = tree.xpath('//div[#class="work_area_content"]/a/#href').extract()
i = 0
j = 0
k = 0
for i, url in enumerate(urls):
absolute_url = "".join(['http://1.1.1.1:1234/', url])
url_storage[i][j][k].append(absolute_url)
print(url_storage)
#url_storage.insert(i, absolute_url)
page = requests.get(url_storage[i][j][k])
tree2 = html.fromstring(page.content)
urls2 = tree2.xpath('//div[#class="work_area_content"]/a/#href').extract()
for j, url2 in enumerate(urls2):
absolute_url = "".join(['http://1.1.1.1:1234/', url2])
url_storage[i][j][k].append(absolute_url)
page = requests.get(url_storage[i][j][k])
tree3 = html.fromstring(page.content)
urls3 = tree3.xpath('//div[#class="work_area_content"]/a/#href').extract()
for k, url3 in enumerate(urls3):
absolute_url = "".join(['http://1.1.1.1:1234/', url3])
url_storage[i][j][k].append(absolute_url)
page = requests.get(url_storage[i][j][k])
tree4 = html.fromstring(page.content)
urls3 = tree4.xpath('//div[#class="work_area_content"]/a/#href').extract()
title = tree4.xpath('//span[#class="page_title"]/text()').extract()
yield Request(url_storage[i][j][k], callback=self.end_page_parse_TS, meta={"Title": title, "URL": urls3 })
#yield Request(absolute_url, callback=self.end_page_parse_TC, meta={"Title": title, "URL": urls3 })
def end_page_parse_TS(self, response):
print(response.body)
url = response.meta.get('URL')
title = response.meta.get('Title')
yield{'URL': url, 'Title': title}
def end_page_parse_TC(self, response):
url = response.meta.get('URL')
title = response.meta.get('Title')
description = response.meta.get('Description')
description = response.xpath('//table[#class="wiki_table]/tbody[contains(/td/text(), "description")/parent').extract()
yield{'URL': url, 'Title': title, 'Description':description}
Scrapy:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractor import LinkExtractor
from scrapy.spiders import Rule, CrawlSpider
from datablogger_scraper.items import DatabloggerScraperItem
class DatabloggerSpider(CrawlSpider):
# The name of the spider
name = "datablogger"
# The domains that are allowed (links to other domains are skipped)
allowed_domains = ['http://1.1.1.1:1234/']
# The URLs to start with
start_urls = ['http://1.1.1.1:1234/TestSuites']
# This spider has one rule: extract all (unique and canonicalized) links, follow them and parse them using the parse_items method
rules = [
Rule(
LinkExtractor(
canonicalize=True,
unique=True
),
follow=True,
callback="parse_items"
)
]
# Method which starts the requests by visiting all URLs specified in start_urls
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, callback=self.parse, dont_filter=True)
# Method for parsing items
def parse_items(self, response):
# The list of items that are found on the particular page
items = []
# Only extract canonicalized and unique links (with respect to the current page)
links = LinkExtractor(canonicalize=True, unique=True).extract_links(response)
# Now go through all the found links
item = DatabloggerScraperItem()
item['url_from'] = response.url
for link in links:
item['url_to'] = link.url
items.append(item)
# Return all the found items
return items

Scrapy returns repeated out of order results when using a for loop, but not when going link by link

I am attempting to use Scrapy to crawl a site. Here is my code:
import scrapy
class ArticleSpider(scrapy.Spider):
name = "article"
start_urls = [
'http://www.irna.ir/en/services/161',
]
def parse(self, response):
for linknum in range(1, 15):
next_article = response.xpath('//*[#id="NewsImageVerticalItems"]/div[%d]/div[2]/h3/a/#href' % linknum).extract_first()
next_article = response.urljoin(next_article)
yield scrapy.Request(next_article)
for text in response.xpath('//*[#id="ctl00_ctl00_ContentPlaceHolder_ContentPlaceHolder_NewsContent4_BodyLabel"]'):
yield {
'article': text.xpath('./text()').extract()
}
for tag in response.xpath('//*[#id="ctl00_ctl00_ContentPlaceHolder_ContentPlaceHolder_NewsContent4_bodytext"]'):
yield {
'tag1': tag.xpath('./div[3]/p[1]/a/text()').extract(),
'tag2': tag.xpath('./div[3]/p[2]/a/text()').extract(),
'tag3': tag.xpath('./div[3]/p[3]/a/text()').extract(),
'tag4': tag.xpath('./div[3]/p[4]/a/text()').extract()
}
yield response.follow('http://www.irna.ir/en/services/161', callback=self.parse)
But this returns in the JSON a weird mixture of repeated items, out of order and often skipping links: https://pastebin.com/LVkjHrRt
However, when I set linknum to a single number, the code works fine.
Why is iterating changing my results?
As #TarunLalwani already stated, your current approach is not right. Basically you should:
In parse method, extract links to all articles on a page and yield requests for scraping them with a callback named e.g. parse_article.
Still in parse method, check that button for loading more articles is present and if so, yield a request for URL of a pattern http://www.irna.ir/en/services/161/pageN. (This can be found in browser's developer tools under XHR requests on network tab.)
Define parse_article method where you extract the article text and tags from details page and finally yield it as item.
Below is the final spider:
import scrapy
class IrnaSpider(scrapy.Spider):
name = 'irna'
base_url = 'http://www.irna.ir/en/services/161'
def start_requests(self):
yield scrapy.Request(self.base_url, meta={'page_number': 1})
def parse(self, response):
for article_url in response.css('.DataListContainer h3 a::attr(href)').extract():
yield scrapy.Request(response.urljoin(article_url), callback=self.parse_article)
page_number = response.meta['page_number'] + 1
if response.css('#MoreButton'):
yield scrapy.Request('{}/page{}'.format(self.base_url, page_number),
callback=self.parse, meta={'page_number': page_number})
def parse_article(self, response):
yield {
'text': ' '.join(response.xpath('//p[#id="ctl00_ctl00_ContentPlaceHolder_ContentPlaceHolder_NewsContent4_BodyLabel"]/text()').extract()),
'tags': [tag.strip() for tag in response.xpath('//div[#class="Tags"]/p/a/text()').extract() if tag.strip()]
}

In Scrapy, how do I pass the urls generated in one class to the next class in the script?

The following is my spider's code:
import scrapy
class ProductMainPageSpider(scrapy.Spider):
name = 'ProductMainPageSpider'
start_urls = ['http://domain.com/main-product-page']
def parse(self, response):
for product in response.css('article.isotopeItem'):
yield {
'title': product.css('h3 a::text').extract_first().encode("utf-8"),
'category': product.css('h6 a::text').extract_first(),
'img': product.css('figure a img::attr("src")').extract_first(),
'url': product.css('h3 a::attr("href")').extract_first()
}
class ProductSecondaryPageSpider(scrapy.Spider):
name = 'ProductSecondaryPageSpider'
start_urls = """ URLS IN product['url'] FROM PREVIOUS CLASS """
def parse(self, response):
for product in response.css('article.isotopeItem'):
yield {
'title': product.css('h3 a::text').extract_first().encode("utf-8"),
'thumbnail': product.css('figure a img::attr("src")').extract_first(),
'short_description': product.css('div.summary').extract_first(),
'description': product.css('div.description').extract_first(),
'gallery_images': product.css('figure a img.gallery-item ::attr("src")').extract_first()
}
The first class/part works correctly if I remove the second class/part. It generates my json file correctly with the items requested in it. However, the website I need to crawl is a two-parter. It has a product archive page that shows a products as a thumbnail, title, and category (and this info is not in the next page). Then if you click on one of the thumbnails or titles you get sent to a single product page where there is specific info on the product.
There are a lot of products so I would like to pipe (yield?) the urls in product['url'] to the second class as the "start_urls" list. But I simply don't know how to do that. My knowledge doesn't go far enough to even know what I'm missing or what is going wrong so that I can find a solution.
Check out on line 20 what I want to do.
You don't have to create two spiders for this - you can simply go to the next url and carry over your item i.e.:
def parse(self, response):
item = MyItem()
item['name'] = response.xpath("//name/text()").extract()
next_page_url = response.xpath("//a[#class='next']/#href").extract_first()
yield Request(next_page_url,
self.parse_next,
meta={'item': item} # carry over our item
)
def parse_next(self, response):
# get our carried item from response meta
item = response.meta['item']
item['description'] = response.xpath("//description/text()").extract()
yield item
However if for some reason you realy want to split logic of these two steps you can simply save the results in a file (a json for example: scrapy crawl first_spider -o results.json) and open/iterate through it in your second spider in start_requests() class method which would yield urls, i.e.:
import json
from scrapy import spider
class MySecondSpider(spider):
def start_requests(self):
# this overrides `start_urls` logic
with open('results.json', 'r') as f:
data = json.loads(f.read())
for item in data:
yield Request(item['url'])

Categories

Resources