i am new to scrapy.
is it possible to change the CLOSESPIDER_ITEMCOUNT while the spider is running?
class TestSpider(scrapy.Spider):
name = 'tester'
custom_settings = {'CLOSESPIDER_ITEMCOUNT': 100,}
def start_requests(self):
urls = ['https://google.com', 'https://amazon.com']
for url in urls:
yield scrapy.Request(url, callback=self.parse)
def parse(self, response):
if response.xpath('//*[id="content"]') or True: # only for testing
# set CLOSESPIDER_ITEMCOUNT to 300
# rest of code
I want to be able to change the value on an "if condition" in the parse method
You can get access to the crawler settings object, unfreeze the settings, change the value and then freeze the settings object again. Please note that since this is not documented in the docs, it may have unexpected effects.
class TestSpider(scrapy.Spider):
name = 'tester'
custom_settings = {'CLOSESPIDER_ITEMCOUNT': 100,}
def start_requests(self):
urls = ['https://google.com', 'https://amazon.com']
for url in urls:
yield scrapy.Request(url, callback=self.parse)
def parse(self, response):
if response.xpath('//*[id="content"]') or True: # only for testing
self.crawler.settings.frozen = False
self.crawler.settings.set("CLOSESPIDER_ITEMCOUNT", 300)
self.crawler.settings.frozen = True
# add the rest of the code
Related
I am practicing web scrapping. I am trying to scrape the websites and wanted to include allowed_domains so that it does not scrape other urls.
import scrapy
class SeleniumSpider(scrapy.Spider):
name = 'test_selenium'
allowed_domains=['quotes.toscrape.com']
start_urls = ['https://quotes.toscrape.com/page/1/']
def parse(self, response):
for quote in response.css('div.quote'):
result = {
'text': quote.css('span.text::text').get(),
'author': quote.css('small.author::text').get(),
'tags': quote.css('div.tags a.tag::text').getall(),
}
print(result)
So, I wanted to changes the allowed domain as URL changes in start_url, not with the same domain but the different domain.
Thank You
I don't know if I understand your problem because it is not so big problem to add manually new allowed_domains when you add manually new start_urls.
But if you want to create automatically allowed_domains based on start_urls
then you can use __init__ to get domains from self.start_urls and add to self.allowed_domains.
import urllib.parse
class SeleniumSpider(scrapy.Spider):
name = 'test_selenium'
start_urls = ['https://quotes.toscrape.com/page/1/']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
allowed = set() # `set()` to keep every domain only once
for url in self.start_urls:
parts = urllib.parse.urlparse(url)
allowed.add( parts.netloc )
self.allowed_domains = list(allowed)
You may use __init__ to set other values automatically - i.e. to read values from file or database or get from command line.
Full working example
import scrapy
import urllib.parse
class SeleniumSpider(scrapy.Spider):
name = 'test_selenium'
#allowed_domains=['quotes.toscrape.com']
start_urls = ['https://quotes.toscrape.com/page/1/']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
allowed = set() # `set()` to keep every domain only once
for url in self.start_urls:
parts = urllib.parse.urlparse(url)
#print(parts)
allowed.add( parts.netloc )
self.allowed_domains = list(allowed)
for domain in self.allowed_domains:
print("allowed:", domain)
def parse(self, response):
print('parse url:', response.url)
for a in response.xpath('//a/#href'):
yield response.follow(a)
# --- run without project ---
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess()
c.crawl(SeleniumSpider)
c.start()
I am trying to parse a list from a javascript website. When I run it, it only gives me back one entry on each column and then the spider shuts down. I have already set up my middleware settings. I am not sure what is going wrong. Thanks in advance!
import scrapy
from scrapy_splash import SplashRequest
class MalrusSpider(scrapy.Spider):
name = 'malrus'
allowed_domains = ['backgroundscreeninginrussia.com']
start_urls = ['http://www.backgroundscreeninginrussia.com/publications/new-citizens-of-malta-since-january-2015-till-december-2017/']
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url=url,
callback=self.parse,
endpoint='render.html')
def parse(self, response):
russians = response.xpath('//table[#id="tablepress-8"]')
for russian in russians:
yield{'name' : russian.xpath('//*[#class="column-1"]/text()').extract_first(),
'source' : russian.xpath('//*[#class="column-2"]/text()').extract_first()}
script = """function main(splash)
assert(splash:go(splash.args.url))
splash:wait(0.3)
button = splash:select("a[class=paginate_button next] a")
splash:set_viewport_full()
splash:wait(0.1)
button:mouse_click()
splash:wait(1)
return {url = splash:url(),
html = splash:html()}
end"""
yield SplashRequest(url=response.url,
callback=self.parse,
endpoint='execute',
args={'lua_source': script})
The .extract_first() (now .get()) you used will always return the first result. It's not an iterator so there is no sense to call it several times. You should try the .getall() method. That will be something like:
names = response.xpath('//table[#id="tablepress-8"]').xpath('//*[#class="column-1"]/text()').getall()
sources = response.xpath('//table[#id="tablepress-8"]').xpath('//*[#class="column-2"]/text()').getall()
I have the following code:
#FirstSpider.py
class FirstSpider(scrapy.Spider):
name = 'first'
start_urls = ['https://www.basesite.com']
next_urls = []
def parse(self, response):
for url in response.css('bunch > of > css > here'):
self.next_urls.append(url.css('more > css > here'))
l = Loader(item=Item(), selector=url.css('more > css'))
l.add_css('add', 'more > css')
...
...
yield l.load_item()
for url in self.next_urls:
new_urls = self.start_urls[0] + url
yield scrapy.Request(new_urls, callback=SecondSpider.parse_url)
#SecondSpider.py
class SecondSpider(scrapy.Spider):
name = 'second'
start_urls = ['https://www.basesite.com']
def parse_url(self):
"""Parse team data."""
return self
# self is a HtmlResponse not a 'response' object
def parse(self, response):
"""Parse all."""
summary = self.parse_url(response)
return summary
#ThirdSpider.py
class ThirdSpider(scrapy.Spider):
# take links from second spider, continue:
I want to be able to pass the url scraped in Spider 1 to Spider 2 (in a different script). I'm curious as to why when I do, the 'response' is a HtmlResponse and not a response object ( When doing something similar to a method in the same class as Spider 1; I don't have this issue )
What am i missing here? How do i just pass the original response(s) to the second spider? ( and from the second onto the third, etc..?)
You could use Redis as shared resource between all spiders https://github.com/rmax/scrapy-redis
Run all N spiders (don't close on idle state), so each of them will be connected to same Redis and waiting tasks(url, request headers) from there;
As the side-effect push task data to Redis from X_spider with specific key (Y_spider name).
What about using inheritance? "parse" function names should be different.
If your first spider inherits from the second, it will be able to set the callback to self.parse_function_spider2
I'm interested in using Scrapy-Redis to store scraped items in Redis. In particular, the Redis-based request duplicates filter seems like a useful feature.
To start off, I adapted the spider at https://doc.scrapy.org/en/latest/intro/tutorial.html#extracting-data-in-our-spider as follows:
import scrapy
from tutorial.items import QuoteItem
class QuotesSpider(scrapy.Spider):
name = "quotes"
start_urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
custom_settings = {'SCHEDULER': 'scrapy_redis.scheduler.Scheduler',
'DUPEFILTER_CLASS': 'scrapy_redis.dupefilter.RFPDupeFilter',
'ITEM_PIPELINES': {'scrapy_redis.pipelines.RedisPipeline': 300}}
def parse(self, response):
for quote in response.css('div.quote'):
item = QuoteItem()
item['text'] = quote.css('span.text::text').extract_first()
item['author'] = quote.css('small.author::text').extract_first()
item['tags'] = quote.css('div.tags a.tag::text').extract()
yield item
where I generated the project using scrapy startproject tutorial at the command line and defined QuoteItem in items.py as
import scrapy
class QuoteItem(scrapy.Item):
text = scrapy.Field()
author = scrapy.Field()
tags = scrapy.Field()
Basically, I've implemented the settings in the "Usage" section of the README in the settings per-spider and made the spider yield an Item object instead of a regular Python dictionary. (I figured this would be necessary to trigger the Item Pipeline).
Now, if I crawl the spider using scrapy crawl quotes from the command line and then do redis-cli, I see a quotes:items key:
127.0.0.1:6379> keys *
1) "quotes:items"
which is a list of length 20:
127.0.0.1:6379> llen quotes:items
(integer) 20
If I run scrapy crawl quotes again, the length of the list doubles to 40:
127.0.0.1:6379> llen quotes:items
(integer) 40
However, I would expect the length of quotes:items to still be 20, since I have simply re-scraped the same pages. Am I doing something wrong here?
Scrapy-redis doesn't filter duplicate items automatically.
The (requests) dupefilter is about the requests in a crawl. What you want seems to be something similar to the deltafetch middleware: https://github.com/scrapy-plugins/scrapy-deltafetch
You would need to adapt deltafetch to work with a distributed storage, perhaps redis' bitmap feature will fit this case.
Here is how I fixed the problem in the end. First of all, as pointed out to me in a separate question, How to implement a custom dupefilter in Scrapy?, using the start_urls class variable results in an implementation of start_requests in which the yielded Request objects have dont_filter=True. To disable this and use the default dont_filter=False instead, I implemented start_requests directly:
import scrapy
from tutorial.items import QuoteItem
class QuotesSpider(scrapy.Spider):
name = "quotes"
custom_settings = {
'SCHEDULER': 'scrapy_redis.scheduler.Scheduler',
'DUPEFILTER_CLASS': 'tutorial.dupefilter.RedisDupeFilter',
'ITEM_PIPELINES': {'scrapy_redis.pipelines.RedisPipeline': 300}
}
def start_requests(self):
urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for quote in response.css('div.quote'):
item = QuoteItem()
item['text'] = quote.css('span.text::text').extract_first()
item['author'] = quote.css('small.author::text').extract_first()
item['tags'] = quote.css('div.tags a.tag::text').extract()
yield item
Secondly, as pointed out by Rolando, the fingerprints aren't by default persisted across different crawls. To implement this, I subclassed Scrapy-Redis' RFPDupeFilter class:
import scrapy_redis.dupefilter
from scrapy_redis.connection import get_redis_from_settings
class RedisDupeFilter(scrapy_redis.dupefilter.RFPDupeFilter):
#classmethod
def from_settings(cls, settings):
server = get_redis_from_settings(settings)
key = "URLs_seen" # Use a fixed key instead of one containing a timestamp
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server=server, key=key, debug=debug)
def request_seen(self, request):
added = self.server.sadd(self.key, request.url)
return added == 0
def clear(self):
pass # Don't delete the key from Redis
The main differences are (1) the key is set to a fixed value (not one containing a time stamp) and (2) the clear method, which in Scrapy-Redis' implementation deletes the key from Redis, is effectively disabled.
Now, when I run scrapy crawl quotes the second time, I see the expected log output
2017-05-05 15:13:46 [scrapy_redis.dupefilter] DEBUG: Filtered duplicate request <GET http://quotes.toscrape.com/page/1/> - no more duplicates will be shown (see DUPEFILTER_DEBUG to show all duplicates)
and no items are scraped.
I'm crawling around 20 million urls. But before the request is actually made the process gets killed due to excessive memory usage (4 GB RAM). How can I handle this in scrapy so that the process doesn't gets killed ?
class MySpider(Spider):
name = "mydomain"
allowed_domains = ["mydomain.com"]
urls = []
for d in range(0,20000000):
link = "http://example.com/"+str(d)
urls.append(link)
start_urls = urls
def parse(self, response):
yield response
I think I found the workaround.
Add this method to your spider.
def start_requests(self):
for d in range(1,26999999):
yield scrapy.Request("http://example.com/"+str(d), self.parse)
you dont have to specify the start_urls in the starting.
It will start generating URLs and start sending asynchronous requests and the callback will be called when the scrapy gets the response.In the start the memory usage will be more but later on it will take constant memory.
Along with this you can use
scrapy crawl somespider -s JOBDIR=crawls/somespider-1
By using this you can pause the spider and resume it any time by using the same command
and in order to save CPU (and log storage requirements)
use
LOG_LEVEL = 'INFO'
in settings.py of the scrapy project.
I believe creating a big list of urls to use as start_urls may be causing the problem.
How about doing this instead?
class MySpider(Spider):
name = "mydomain"
allowed_domains = ["mydomain.com"]
start_urls = ["http://example.com/0"]
def parse(self, response):
for d in xrange(1,20000000):
link = "http://example.com/"+str(d)
yield Request(url=link, callback=self.parse_link)
def parse_link(self, response):
yield response