I build a simple crawler application by using urllib2 and beautifulsoup, now i am planning to change it into scrapy spider, but how i can handle errors while running crawler,
My current application have some code like this,
error_file = open('errors.txt','a')
finish_file = open('finishlink.txt','a')
try:
#Code for process each links
#if sucessfully finished link store into 'finish.txt' file
except Exception as e:
#link write into 'errors.txt' file with error code
so when i am processing thousands of links, the successfully processed links will store into finish.txt and error's will be in errors.txt, so i can run links in errors later until successfully processed.
So how i can accomplish these in these code,
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
filename = response.url.split("/")[-2]
with open('filename+'.txt', 'wb') as f:
f.write(response.body)
You can create a spider middleware and override the process_spider_exception() method, saving the links in a file there.
A spider middleware is just a way for you to extend Scrapy's behavior.
Here is a full example that you can modify as needed for your purpose:
from scrapy import signals
class SaveErrorsMiddleware(object):
def __init__(self, crawler):
crawler.signals.connect(self.close_spider, signals.spider_closed)
crawler.signals.connect(self.open_spider, signals.spider_opened)
#classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def open_spider(self, spider):
self.output_file = open('somefile.txt', 'a')
def close_spider(self, spider):
self.output_file.close()
def process_spider_exception(self, response, exception, spider):
self.output_file.write(response.url + '\n')
Put this in a module and set it up in settings.py:
SPIDER_MIDDLEWARES = {
'myproject.middleware.SaveErrorsMiddleware': 1000,
}
This code will run together with your spider, triggering the open_spider(), close_spider(), process_spider_exception() methods when appropriated.
Read more:
Spider Middlewares
Signals in Scrapy
Example middleware in Scrapy source code
Related
I'm new to webscraping/scrapy and python
Scrapy version: Scrapy 2.5.1
OS: windows
IDE: pycharm
I am trying to use FEEDS option in scrapy to automatically export the scrapped data from a website to download into excel
Tried following solution but didn't work stackoverflow solution not sure what i'm doing wrong here am i missing something?
i also tried to add the same in my settings.py file after commenting custom_settings in my spider class as per example provided in documentation: https://docs.scrapy.org/en/latest/topics/feed-exports.html?highlight=feed#feeds
for now i achieved my requirement using spider_closed (signal) to write data to CSV by storing all the scraped items data in a array called result
class SpiderFC(scrapy.Spider):
name = "FC"
start_urls = [
url,
]
custom_setting = {"FEEDS": {r"C:\Users\rreddy\PycharmProjects\fcdc\webscrp\outputfinal.csv": {"format": "csv", "overwrite": True}}}
#classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(SpiderFC, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def __init__(self, name=None):
super().__init__(name)
self.count = None
def parse(self, response, **kwargs):
# each item scrapped from parent page has links where the actual data need to be scrapped so i follow each link and scrape data
yield response.follow(notice_href_follow, callback=self.parse_item,
meta={'item': item, 'index': index, 'next_page': next_page})
def parse_item(self, response):
# logic for items to scrape goes here
# they are saved to temp list and appended to result array and then temp list is cleared
result.append(it) # result data is used at the end to write to csv
item.clear()
if next_page:
yield next(self.follow_next(response, next_page))
def follow_next(self, response, next_page):
next_page_url = urljoin(url, next_page[0])
yield response.follow(next_page_url, callback=self.parse)
spider closed signal
def spider_closed(self, spider):
with open(output_path, mode="a", newline='') as f:
writer = csv.writer(f)
for v in result:
writer.writerow([v["city"]])
when all data is scraped and all requests are completed spider_closed signal will write the data to a csv but i'm trying to avoid this logic or code and use inbuilt exporter from scrapy but I'm having trouble in exporting the data
Check your path. If you are on windows then provide the full path in the custom_settings e.g. as below
custom_settings = {
"FEEDS":{r"C:\Users\Name\Path\To\outputfinal.csv" : {"format" : "csv", "overwrite":True}}
}
If you are on linux or MAC then provide the path as below:
custom_settings = {
"FEEDS":{r"/Path/to/folder/fcdc/webscrp/outputfinal.csv" : {"format" : "csv", "overwrite":True}}
}
Alternatively provide the relative path as below which will create a folder structure of fcdc>>webscrp>>outputfinal.csv in the directory from which the spider is run from.
custom_settings = {
"FEEDS":{r"./fcdc/webscrp/outputfinal.csv" : {"format" : "csv", "overwrite":True}}
}
Total noob just getting started with scrapy.
In my directory structure I have like this...
#FYI: running on Scrapy 2.4.1
WebScraper/
Webscraper/
spiders/
spider.py # (NOTE: contains spider1 and spider2 classes.)
items.py
middlewares.py
pipelines.py # (NOTE: contains spider1Pipeline and spider2Pipeline)
settings.py # (NOTE: I wrote here:
#ITEM_PIPELINES = {
# 'WebScraper.pipelines.spider1_pipelines': 300,
# 'WebScraper.pipelines.spider2_pipelines': 300,
#}
scrapy.cfg
And spider2.py resembles...
class OneSpider(scrapy.Spider):
name = "spider1"
def start_requests(self):
urls = ["url1.com",]
yield scrapy.Request(
url="http://url1.com",
callback=self.parse
)
def parse(self,response):
## Scrape stuff, put it in a dict
yield dictOfScrapedStuff
class TwoSpider(scrapy.Spider):
name = "spider2"
def start_requests(self):
urls = ["url2.com",]
yield scrapy.Request(
url="http://url2.com",
callback=self.parse
)
def parse(self,response):
## Scrape stuff, put it in a dict
yield dictOfScrapedStuff
With pipelines.py looking like...
class spider1_pipelines(object):
def __init__(self):
self.csvwriter = csv.writer(open('spider1.csv', 'w', newline=''))
self.csvwriter.writerow(['header1', 'header2'])
def process_item(self, item, spider):
row = []
row.append(item['header1'])
row.append(item['header2'])
self.csvwrite.writerow(row)
class spider2_pipelines(object):
def __init__(self):
self.csvwriter = csv.writer(open('spider2.csv', 'w', newline=''))
self.csvwriter.writerow(['header_a', 'header_b'])
def process_item(self, item, spider):
row = []
row.append(item['header_a']) #NOTE: this is not the same as header1
row.append(item['header_b']) #NOTE: this is not the same as header2
self.csvwrite.writerow(row)
I have a question about running spider1 and spider2 on different urls with one terminal command:
nohup scrapy crawl spider1 -o spider1_output.csv --logfile spider1.log & scrapy crawl spider2 -o spider2_output.csv --logfile spider2.log
Note: this is an extension of a previous question specific to this stack overflow post (2018).
Desired result: spider1.csv with data from spider1, spider2.csv with data from spider2.
Current result: spider1.csv with data from spider1, spider2.csv BREAKS but error log contains spider2 data, and that there was a keyerror ['header1'], even though the item for spider2 does not include header1, it only includes header_a.
Does anyone know how to run one spider after the other on different urls, and plug data fetched by spider1, spider2, etc. into pipelines specific to that spider, as in spider1 -> spider1Pipeline -> spider1.csv, spider2 -> spider2Pipelines -> spider2.csv.
Or perhaps this is a matter of specifying the spider1_item and spider2_item from items.py? I wonder if I can specify where to insert spider2's data that way.
Thank you!
You can implement this using custom_settings spider attribute to set settings individually per spider
#spider2.py
class OneSpider(scrapy.Spider):
name = "spider1"
custom_settings = {
'ITEM_PIPELINES': {'WebScraper.pipelines.spider1_pipelines': 300}
...
class TwoSpider(scrapy.Spider):
name = "spider2"
custom_settings = {
'ITEM_PIPELINES': {'WebScraper.pipelines.spider2_pipelines': 300}
...
I'm using Scrapy to craw data from website, and this is my code at file spider.py in folder spider of Scrapy
class ThumbSpider(scrapy.Spider):
userInput = readInputData('input/user_input.json')
name = 'thumb'
# start_urls = ['https://vietnamnews.vn/politics-laws', 'https://vietnamnews.vn/society']
def __init__(self, *args, **kwargs):
super(ThumbSpider, self).__init__(*args, **kwargs)
self.start_urls = kwargs.get('start_urls')
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for cssThumb in self.userInput['cssThumb']: # browse each cssThumb which user provides
items = response.css('{0}::attr(href)'.format(cssThumb)).getall() # access it
for item in items:
item = response.urljoin(item)
yield scrapy.Request(url=item, callback=self.parse_details)
def parse_details(self, response):
data = response.css('div.vnnews-text-post p span::text').extract()
with open('result/page_content.txt', 'a') as outfile:
json.dump(data, outfile)
yield data
I call class ThumbSpider in file main.py and run this file in terminal
import json
import os
import modules.misc as msc
from scrapy.crawler import CrawlerProcess
from week_7.spiders.spider import NaviSpider, ThumbSpider
process2 = CrawlerProcess()
process2.crawl(ThumbSpider, start_urls=['https://vietnamnews.vn/politics-laws', 'https://vietnamnews.vn/society'])
process2.start()
My program doesn't get anything from 2 urls, but when I uncomment start_urls = ['https://vietnamnews.vn/politics-laws', 'https://vietnamnews.vn/society'] and delete __init__ and start_requests methods in class ThumbSpider and in file main.py edit process2.crawl(ThumbSpider, start_urls=msc.getUserChoices()) into process2.crawl(ThumbSpider) it worked well. I don't know what happening. Anyone can help me, thank you so much
I wrote a code in scrapy to scrape coffee shops from yellowpage. The total data is around 870 but I'm getting around 1200 with a minimum number of duplicates. Moreover, in the csv output the data are getting placed in every alternate row. Expecting someone to provide me with a workaround. Thanks in advance.
Folder Name "yellpg" and "items.py" contains
from scrapy.item import Item, Field
class YellpgItem(Item):
name= Field()
address = Field()
phone= Field()
Spider Name "yellsp.py" which contains:
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from yellpg.items import YellpgItem
class YellspSpider(CrawlSpider):
name = "yellsp"
allowed_domains = ["yellowpages.com"]
start_urls = (
'https://www.yellowpages.com/search?search_terms=Coffee%20Shops&geo_location_terms=Los%20Angeles%2C%20CA&page=1',
)
rules = (Rule(LinkExtractor(allow=('\&page=.*',)),callback='parse_item',follow=True),)
def parse_item(self, response):
page=response.xpath('//div[#class="info"]')
for titles in page:
item = YellpgItem()
item["name"] = titles.xpath('.//span[#itemprop="name"]/text()').extract()
item["address"] = titles.xpath('.//span[#itemprop="streetAddress" and #class="street-address"]/text()').extract()
item["phone"] = titles.xpath('.//div[#itemprop="telephone" and #class="phones phone primary"]/text()').extract()
yield item
To get the CSV output, the command line I'm using:
scrapy crawl yellsp -o items.csv
I could recommend creating a pipeline that stores items to later check if the new ones are duplicates, but that isn't a real solution here, as it could create memory problems.
The real solution here is that you should avoid "storing" duplicates in your final database.
Define what field of your item is going to behave as the index in your database and everything should be working.
The best way would be to use CSVItemExporter in your pipeline. Create a file named pipeline.py inside your scrapy project and add below code lines.
from scrapy import signals
from scrapy.exporters import CsvItemExporter
class CSVExportPipeline(object):
def __init__(self):
self.files = {}
#classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
file = open('%s_coffer_shops.csv' % spider.name, 'w+b') # hard coded filename, not a good idea
self.files[spider] = file
self.exporter = CsvItemExporter(file)
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
file = self.files.pop(spider)
file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
Now add these lines in setting.py
ITEM_PIPELINES = {
'your_project_name.pipelines.CSVExportPipeline': 300
}
This custom CSVItemExporter will export your data in CSV styles. If you are not getting the data as expected you can modify process_item method to suits your need.
I have written two spiders in single file. When I ran scrapy runspider two_spiders.py, only the first Spider was executed. How can I run both of them without splitting the file into two files.
two_spiders.py:
import scrapy
class MySpider1(scrapy.Spider):
# first spider definition
...
class MySpider2(scrapy.Spider):
# second spider definition
...
Let's read the documentation:
Running multiple spiders in the same process
By default, Scrapy runs a
single spider per process when you run scrapy crawl. However, Scrapy
supports running multiple spiders per process using the internal API.
Here is an example that runs multiple spiders simultaneously:
import scrapy
from scrapy.crawler import CrawlerProcess
class MySpider1(scrapy.Spider):
# Your first spider definition
...
class MySpider2(scrapy.Spider):
# Your second spider definition
...
process = CrawlerProcess()
process.crawl(MySpider1)
process.crawl(MySpider2)
process.start() # the script will block here until all crawling jobs are finished
(there are few more examples in the documentation)
From your question it is not clear how have you put two spiders into one file. It was not enough to concatenate content of two files with single spiders.
Try to do what is written in the documentation. Or at least show us your code. Without it we can't help you.
Here is a full Scrapy project with 2 spiders in one file.
# quote_spiders.py
import json
import string
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.item import Item, Field
class TextCleaningPipeline(object):
def _clean_text(self, text):
text = text.replace('“', '').replace('”', '')
table = str.maketrans({key: None for key in string.punctuation})
clean_text = text.translate(table)
return clean_text.lower()
def process_item(self, item, spider):
item['text'] = self._clean_text(item['text'])
return item
class JsonWriterPipeline(object):
def open_spider(self, spider):
self.file = open(spider.settings['JSON_FILE'], 'a')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
class QuoteItem(Item):
text = Field()
author = Field()
tags = Field()
spider = Field()
class QuotesSpiderOne(scrapy.Spider):
name = "quotes1"
def start_requests(self):
urls = ['http://quotes.toscrape.com/page/1/', ]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for quote in response.css('div.quote'):
item = QuoteItem()
item['text'] = quote.css('span.text::text').get()
item['author'] = quote.css('small.author::text').get()
item['tags'] = quote.css('div.tags a.tag::text').getall()
item['spider'] = self.name
yield item
class QuotesSpiderTwo(scrapy.Spider):
name = "quotes2"
def start_requests(self):
urls = ['http://quotes.toscrape.com/page/2/', ]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for quote in response.css('div.quote'):
item = QuoteItem()
item['text'] = quote.css('span.text::text').get()
item['author'] = quote.css('small.author::text').get()
item['tags'] = quote.css('div.tags a.tag::text').getall()
item['spider'] = self.name
yield item
if __name__ == '__main__':
settings = dict()
settings['USER_AGENT'] = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
settings['HTTPCACHE_ENABLED'] = True
settings['JSON_FILE'] = 'items.jl'
settings['ITEM_PIPELINES'] = dict()
settings['ITEM_PIPELINES']['__main__.TextCleaningPipeline'] = 800
settings['ITEM_PIPELINES']['__main__.JsonWriterPipeline'] = 801
process = CrawlerProcess(settings=settings)
process.crawl(QuotesSpiderOne)
process.crawl(QuotesSpiderTwo)
process.start()
Install Scrapy and run the script
$ pip install Scrapy
$ python quote_spiders.py
No other file is needed.
This example coupled with graphical debugger of pycharm/vscode can help understand scrapy workflow and make debugging easier.