Related
i am trying to get last row of column plate and append data to it. But it gives corrupt file error even though scrapy is working properly.
I guess error is due to lines below. Where I firstly, use pandas ExcelWriter object, then for getting last row I use dataframe.
with pd.ExcelWriter('output_res.xlsx', mode='r+',if_sheet_exists='overlay') as writer:
df_last=pd.DataFrame('output_res.xlsx')
lastRow=df_last['plate'].iget(-1)
df_output = pd.DataFrame(itemList)
df_output.to_excel(writer, sheet_name='result', index=False, header=True,startrow=lastRow)
and variable lastRow is unassigned, as I guess. That's why it does not give a value to to_excel method
import scrapy
from scrapy.crawler import CrawlerProcess
import pandas as pd
class plateScraper(scrapy.Spider):
name = 'scrapePlate'
allowed_domains = ['dvlaregistrations.dvla.gov.uk']
def start_requests(self):
df=pd.read_excel('data.xlsx')
columnA_values=df['PLATE']
for row in columnA_values:
global plate_num_xlsx
plate_num_xlsx=row
base_url =f"https://dvlaregistrations.dvla.gov.uk/search/results.html?search={plate_num_xlsx}&action=index&pricefrom=0&priceto=&prefixmatches=¤tmatches=&limitprefix=&limitcurrent=&limitauction=&searched=true&openoption=&language=en&prefix2=Search&super=&super_pricefrom=&super_priceto="
url=base_url
yield scrapy.Request(url)
def parse(self, response):
itemList=[]
for row in response.css('div.resultsstrip'):
plate = row.css('a::text').get()
price = row.css('p::text').get()
if plate_num_xlsx==plate.replace(" ","").strip():
item= {"plate": plate.strip(), "price": price.strip()}
itemList.append(item)
yield item
else:
item = {"plate": plate.strip(), "price": "-"}
itemList.append(item)
yield item
with pd.ExcelWriter('output_res.xlsx', mode='r+',if_sheet_exists='overlay') as writer:
df_last=pd.DataFrame('output_res.xlsx')
lastRow=df_last['plate'].iget(-1)
df_output = pd.DataFrame(itemList)
df_output.to_excel(writer, sheet_name='result', index=False, header=True,startrow=lastRow)
process = CrawlerProcess()
process.crawl(plateScraper)
process.start()
gives an error
Traceback (most recent call last):
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\utils\defer.py", line 240, in iter_errback
yield next(it)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\utils\python.py", line 338, in __next__
return next(self.data)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\utils\python.py", line 338, in __next__
return next(self.data)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\core\spidermw.py", line 79, in process_sync
for r in iterable:
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\spidermiddlewares\offsite.py", line 29, in <genexpr>
return (r for r in result or () if self._filter(r, spider))
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\core\spidermw.py", line 79, in process_sync
for r in iterable:
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\spidermiddlewares\referer.py", line 336, in <genexpr>
return (self._set_referer(r, response) for r in result or ())
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\core\spidermw.py", line 79, in process_sync
for r in iterable:
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\spidermiddlewares\urllength.py", line 28, in <genexpr>
return (r for r in result or () if self._filter(r, spider))
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\core\spidermw.py", line 79, in process_sync
for r in iterable:
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\spidermiddlewares\depth.py", line 32, in <genexpr>
return (r for r in result or () if self._filter(r, response, spider))
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\core\spidermw.py", line 79, in process_sync
for r in iterable:
File "C:\pythonPro\w_crawl\SimonDarak\scrpy_00.py", line 33, in parse
with pd.ExcelWriter('output_res.xlsx', mode='a',if_sheet_exists='overlay') as writer:
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\pandas\io\excel\_openpyxl.py", line 73, in __init__
self._book = load_workbook(self._handles.handle, **engine_kwargs)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\openpyxl\reader\excel.py", line 317, in load_workbook
reader.read()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\openpyxl\reader\excel.py", line 282, in read
self.read_worksheets()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\openpyxl\reader\excel.py", line 228, in read_worksheets
ws_parser.bind_all()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\openpyxl\worksheet\_reader.py", line 448, in bind_all
self.bind_cells()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\openpyxl\worksheet\_reader.py", line 351, in bind_cells
for idx, row in self.parser.parse():
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\site-packages\openpyxl\worksheet\_reader.py", line 144, in parse
for _, element in it:
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\xml\etree\ElementTree.py", line 1255, in iterator
data = source.read(16 * 1024)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\zipfile.py", line 925, in read
data = self._read1(n)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\zipfile.py", line 1015, in _read1
self._update_crc(data)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python310\lib\zipfile.py", line 943, in _update_crc
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
zipfile.BadZipFile: Bad CRC-32 for file 'xl/worksheets/sheet1.xml'
Process finished with exit code -1
I take list from parse method and put it outside class.
itemList=[]
def parse(self, response):
for row in response.css('div.resultsstrip'):
plate = row.css('a::text').get()
price = row.css('p::text').get()
a = plate.replace(" ", "").strip()
print(plate_num_xlsx,a,a == plate_num_xlsx)
if plate_num_xlsx==plate.replace(" ","").strip():
item= {"plate": plate.strip(), "price": price.strip()}
itemList.append(item)
yield item
else:
item = {"plate": plate_num_xlsx, "price": "-"}
itemList.append(item)
yield item
with pd.ExcelWriter('output_res.xlsx', mode='r+',if_sheet_exists='overlay') as writer:
df_output = pd.DataFrame(itemList)
df_output.to_excel(writer, sheet_name='result', index=False, header=True)
I'm working on an ocr project using scrapy using the ocr api at https://ocr.space/ocrapi . I have some code that works successfully using requests:
file_string = ctypes.string_at(image_data_pointer, length.value)
payload_filename = 'my_hires_image.jpg'
# Post payload as multipart encoded image file with filename.
# requests.post(THE_URL, files={'file': (payload_filename, payload)})
payload = {'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
r = requests.post('https://api.ocr.space/parse/image',
files={payload_filename: file_string},
data=payload,
)
return r.content.decode()
I'm now trying to turn this into a scrapy post request. I have:
payload_filename = 'my_hires_image.jpg'
# Post payload as multipart encoded image file with filename.
# requests.post(THE_URL, files={'file': (payload_filename, payload)})
body = {'file': file_string,
'isOverlayRequired': True,
'apikey': 'mykey',
'language': 'eng',
}
files = {payload_filename: file_string}
yield FormRequest(url='https://api.ocr.space/parse/image', headers=headers2, formdata=body, callback=self.ocr_space, meta={'row': row, 'cookiejar': i}, dont_filter=True)
please note that file_string is a byte string. You can see it in screenshot above. The code is giving me:
File "/\lib\site-packages\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
File "/\lib\site-packages\scrapy\spidermiddlewares\offsite.py", line 29, in process_spider_output
for x in result:
File "/\lib\site-packages\scrapy\spidermiddlewares\referer.py", line 339, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/\lib\site-packages\scrapy\spidermiddlewares\urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/\lib\site-packages\scrapy\spidermiddlewares\depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "Emy_PROject/spiders\ocr_spider.py", line 148, in get_PDF
yield FormRequest(url='https://api.ocr.space/parse/image', headers=headers2, body=body, callback=self.ocr_space, meta={'row': row, 'cookiejar': i}, dont_filter=True)
File "/\lib\site-packages\scrapy\http\request\form.py", line 27, in __init__
super(FormRequest, self).__init__(*args, **kwargs)
File "/\lib\site-packages\scrapy\http\request\__init__.py", line 26, in __init__
self._set_body(body)
File "/\lib\site-packages\scrapy\http\request\__init__.py", line 69, in _set_body
self._body = to_bytes(body, self.encoding)
File "/\lib\site-packages\scrapy\utils\python.py", line 117, in to_bytes
'object, got %s' % type(text).__name__)
TypeError: to_bytes must receive a unicode, str or bytes object, got dict
How can I get this working?
edit:
body = {'files':file_string,
'isOverlayRequired': True,
'apikey': '*******',
'language': 'eng',
}
body = urllib.parse.urlencode(body)
x = FormRequest('https://api.ocr.space/parse/image', headers=headers2, formdata=body, callback=self.ocr_space, meta={'row': row, 'cookiejar': i}, dont_filter=True)
yields:
File "....\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
File "....\scrapy\spidermiddlewares\offsite.py", line 29, in process_spider_output
for x in result:
File "....\scrapy\spidermiddlewares\referer.py", line 339, in <genexpr>
return (_set_referer(r) for r in result or ())
File "....\scrapy\spidermiddlewares\urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "....\scrapy\spidermiddlewares\depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "myproject\spiders\ocr_spider.py", line 151, in get_PDF
x = FormRequest('https://api.ocr.space/parse/image', headers=headers2, formdata=body, callback=self.ocr_space, meta={'row': row, 'cookiejar': i}, dont_filter=True)
File "....\scrapy\http\request\form.py", line 31, in __init__
querystr = _urlencode(items, self.encoding)
File "....\scrapy\http\request\form.py", line 66, in _urlencode
for k, vs in seq
File "....\scrapy\http\request\form.py", line 65, in <listcomp>
values = [(to_bytes(k, enc), to_bytes(v, enc))
ValueError: not enough values to unpack (expected 2, got 1)
Intention / expected behaviour
Return the text of the links from page: https://www.bezrealitky.cz/vypis/nabidka-prodej/byt/praha
In CSV format and in the shell.
Error
I get a KeyError: 'title', even though I have defined the key in the item.py itemloader.
Full Traceback
Traceback (most recent call last):
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\spidermiddlewares\offsite.py", line 29, in process_spider_output
for x in result:
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\spidermiddlewares\referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\spidermiddlewares\urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\spidermiddlewares\depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "C:\Users\phili\Documents\Python Scripts\Scrapy Spiders\bezrealitky\bezrealitky\spiders\bezrealitky_spider.py", line 33, in parse
yield loader.load_item()
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\loader\__init__.py", line 115, in load_item
value = self.get_output_value(field_name)
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\loader\__init__.py", line 122, in get_output_value
proc = self.get_output_processor(field_name)
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\loader\__init__.py", line 144, in get_output_processor
self.default_output_processor)
File "C:\Users\phili\Anaconda3\envs\py35\lib\site-packages\scrapy\loader\__init__.py", line 154, in _get_item_field_attr
value = self.item.fields[field_name].get(key, default)
KeyError: 'title'
Spider.py
def parse(self, response):
for records in response.xpath('//*[starts-with(#class,"record")]'):
loader = BaseItemLoader(selector=records)
loader.add_xpath('title', './/div[#class="details"]/h2/a[#href]/text()')
yield loader.load_item()
Item.py - Itemloader
class BaseItemLoader(ItemLoader):
title_in = MapCompose(unidecode)
Conclusion
I am a bit at a loss, as I think I followed the Scrapy manual and defined the item loader and the key by "title_in", but then when I yield the value to it I get the KeyError. I check in the shell that the Xpath provides the text I want, so at least that is working. Hoping to get some help!
Even if you use ItemLoader you should define Item class first and then pass it to the item loader either defining it as loader's property:
class CustomItemLoader(ItemLoader):
default_item_class = MyItem
or passing its instance to loader's constructor:
l = CustomItemLoader(item=Item())
otherwise item loader knows nothing about the item and its fields.
I've been using scrapy for over a year now with a script that someone else wrote for me. It was working great for over a year, until 6-8 weeks ago when it started giving me the following error whenever I try to download. Does anyone have any ideas?
I am running this on Ubuntu 14.04 LTS.
Command: scrapy crawl googleplay
2015-08-30 13:10:31-0400 [googleplay] ERROR: Spider error processing <GET https://accounts.google.com/ServiceLogin?continue=https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fcategory%2FGAME&followup=https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fcategory%2FGAME&passive=1209600&service=googleplay>
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 800, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/usr/lib/python2.7/dist-packages/twisted/internet/task.py", line 595, in _tick
taskObj._oneWorkUnit()
File "/usr/lib/python2.7/dist-packages/twisted/internet/task.py", line 472, in _oneWorkUnit
result = self._iterator.next()
File "/usr/lib/pymodules/python2.7/scrapy/utils/defer.py", line 57, in <genexpr>
work = (callable(elem, *args, **named) for elem in iterable)
--- <exception caught here> ---
File "/usr/lib/pymodules/python2.7/scrapy/utils/defer.py", line 96, in iter_errback
yield next(it)
File "/usr/lib/pymodules/python2.7/scrapy/contrib/spidermiddleware/offsite.py", line 23, in process_spider_output
for x in result:
File "/usr/lib/pymodules/python2.7/scrapy/contrib/spidermiddleware/referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/usr/lib/pymodules/python2.7/scrapy/contrib/spidermiddleware/urllength.py", line 33, in <genexpr>
return (r for r in result or () if _filter(r))
File "/usr/lib/pymodules/python2.7/scrapy/contrib/spidermiddleware/depth.py", line 50, in <genexpr>
return (r for r in result or () if _filter(r))
File "/usr/lib/pymodules/python2.7/scrapy/contrib/spiders/crawl.py", line 73, in _parse_response
for request_or_item in self._requests_to_follow(response):
File "/usr/lib/pymodules/python2.7/scrapy/contrib/spiders/crawl.py", line 52, in _requests_to_follow
links = [l for l in rule.link_extractor.extract_links(response) if l not in seen]
File "/usr/lib/pymodules/python2.7/scrapy/contrib/linkextractors/sgml.py", line 129, in extract_links
links = self._extract_links(body, response.url, response.encoding, base_url)
File "/usr/lib/pymodules/python2.7/scrapy/contrib/linkextractors/sgml.py", line 29, in _extract_links
self.feed(response_text)
File "/usr/lib/python2.7/sgmllib.py", line 104, in feed
self.goahead(0)
File "/usr/lib/python2.7/sgmllib.py", line 174, in goahead
k = self.parse_declaration(i)
File "/usr/lib/python2.7/markupbase.py", line 98, in parse_declaration
decltype, j = self._scan_name(j, i)
File "/usr/lib/python2.7/markupbase.py", line 392, in _scan_name
% rawdata[declstartpos:declstartpos+20])
File "/usr/lib/python2.7/sgmllib.py", line 111, in error
raise SGMLParseError(message)
sgmllib.SGMLParseError: expected name token at '<!\\\\])/g,"\\\\$1").rep'
here is my GooglePlay spider (after update) along with the error message I am now receiving
import string
import requests
from scrapy import log
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.htmlparser import HtmlParserLinkExtractor
from scrapy.selector import Selector
from scrapy.http import Request
from scraper.items import ApkItem
from play import parse_app
class GooglePlaySpider(CrawlSpider):
name = 'googleplay'
start_urls = [
'https://play.google.com/store/apps'
]
rules = (
Rule(SgmlLinkExtractor(allow=('/store/apps$', )), callback='parse_category_group', follow=True),
Rule(SgmlLinkExtractor(allow=('/store/apps/category/.*', )), callback='parse_category', follow=True),
Rule(SgmlLinkExtractor(allow=('/store/search\?.*', )), callback='parse_search', follow=True),
)
def parse_category_group(self, response):
sel = Selector(response)
category_groups = sel.xpath('//div[#class="padded-content3 app-home-nav"]')
for category_group in category_groups:
category_group_name = category_group.xpath('h2/a/text()').extract()
categories = category_group.xpath('ul/li')
for category in categories:
category_name = category.xpath('a/text()').extract()
category_url = category.xpath('a/#href').extract()[0]
chars = string.ascii_uppercase + string.digits
for x in chars:
yield Request('https://play.google.com/store/search?q=' + x + '&c=apps', callback=self.parse_search)
for x in chars:
for y in chars:
yield Request('https://play.google.com/store/search?q=' + x + y + '&c=apps', callback=self.parse_search)
for x in chars:
for y in chars:
for z in chars:
yield Request('https://play.google.com/store/search?q=' + x + y + z + '&c=apps', callback=self.parse_search)
return
def parse_category(self, response):
base_path = response.url.split('?')[0]
if '/collection/' in response.url:
sel = Selector(response)
apps = sel.xpath('//a[#class="title"]')
has_app = False
for app in apps:
has_app = True
app_name = app.xpath('text()').extract()
app_url = app.xpath('#href').extract()
yield Request('https://play.google.com' + app_url[0], meta={'come_from': self.name}, callback=parse_app)
if has_app:
m = re.match(r'(.*)\?start=(\d+)&num=24', response.url)
if m is None:
start_number = 24
else:
start_number = int(m.group(2)) + 24
yield Request(base_path + '?start=' + str(start_number) + '&num=24', callback=self.parse_category)
return
def parse_search(self, response):
m = re.match(r'(.*)&start=(\d+)&num=24', response.url)
if m is None:
base_path = response.url
start_number = 24
else:
start_number = int(m.group(2)) + 24
base_path = m.group(1)
sel = Selector(response)
apps = sel.xpath('//a[contains(#href,"/store/apps/details")]')
has_app = False
for app in apps:
has_app = True
app_url = app.xpath('#href').extract()
yield Request('https://play.google.com' + app_url[0], meta={'come_from': self.name}, callback=parse_app)
if has_app:
yield Request(base_path + '&start=' + str(start_number) + '&num=24', callback=self.parse_search)
return
**** Error ****Traceback (most recent call last):
File "/usr/bin/scrapy", line 4, in <module>
execute()
File "/usr/lib/pymodules/python2.7/scrapy/cmdline.py", line 143, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/usr/lib/pymodules/python2.7/scrapy/cmdline.py", line 89, in _run_print_help
func(*a, **kw)
File "/usr/lib/pymodules/python2.7/scrapy/cmdline.py", line 150, in _run_command
cmd.run(args, opts)
File "/usr/lib/pymodules/python2.7/scrapy/commands/crawl.py", line 47, in run
crawler = self.crawler_process.create_crawler()
File "/usr/lib/pymodules/python2.7/scrapy/crawler.py", line 87, in create_crawler
self.crawlers[name] = Crawler(self.settings)
File "/usr/lib/pymodules/python2.7/scrapy/crawler.py", line 25, in __init__
self.spiders = spman_cls.from_crawler(self)
File "/usr/lib/pymodules/python2.7/scrapy/spidermanager.py", line 35, in from_crawler
sm = cls.from_settings(crawler.settings)
File "/usr/lib/pymodules/python2.7/scrapy/spidermanager.py", line 31, in from_settings
return cls(settings.getlist('SPIDER_MODULES'))
File "/usr/lib/pymodules/python2.7/scrapy/spidermanager.py", line 22, in __init__
for module in walk_modules(name):
File "/usr/lib/pymodules/python2.7/scrapy/utils/misc.py", line 68, in walk_modules
submod = import_module(fullpath)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/home/darwin/ProjectKrutz/scraper/scraper/spiders/googlePlaySpider.py", line 12, in <module>
class GooglePlaySpider(CrawlSpider):
File "/home/darwin/ProjectKrutz/scraper/scraper/spiders/googlePlaySpider.py", line 18, in GooglePlaySpider
Rule(SgmlLinkExtractor(allow=('/store/apps$', )), callback='parse_category_group', follow=True),
NameError: name 'SgmlLinkExtractor' is not defined
The problem is that SgmlLinkExtractor has problems with comments. And the error message tells you that there is a comment: <!.
So the solution would be to change the spider you are using and replace the SgmlLinkExtractor with either
from scrapy.contrib.linkextractors.htmlparser import HtmlParserLinkExtractor
or
from scrapy.contrib.linkextractors.lxmlhtml import LxmlParserLinkExtractor
Naturally these were only the import statements, you have to change the Rule where the link extractor is used too to use one of these extractors.
Without the code I cannot give you more advice where to change the parts.
I added restrict_xpaths rules to my scrapy spider and now it immediately fails with:
2015-03-16 15:46:53+0000 [tsr] ERROR: Spider error processing <GET http://www.thestudentroom.co.uk/forumdisplay.php?f=143>
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/base.py", line 800, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/task.py", line 602, in _tick
taskObj._oneWorkUnit()
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/task.py", line 479, in _oneWorkUnit
result = self._iterator.next()
File "/Library/Python/2.7/site-packages/scrapy/utils/defer.py", line 57, in <genexpr>
work = (callable(elem, *args, **named) for elem in iterable)
--- <exception caught here> ---
File "/Library/Python/2.7/site-packages/scrapy/utils/defer.py", line 96, in iter_errback
yield next(it)
File "/Library/Python/2.7/site-packages/scrapy/contrib/spidermiddleware/offsite.py", line 26, in process_spider_output
for x in result:
File "/Library/Python/2.7/site-packages/scrapy/contrib/spidermiddleware/referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/Library/Python/2.7/site-packages/scrapy/contrib/spidermiddleware/urllength.py", line 33, in <genexpr>
return (r for r in result or () if _filter(r))
File "/Library/Python/2.7/site-packages/scrapy/contrib/spidermiddleware/depth.py", line 50, in <genexpr>
return (r for r in result or () if _filter(r))
File "/Library/Python/2.7/site-packages/scrapy/contrib/spiders/crawl.py", line 73, in _parse_response
for request_or_item in self._requests_to_follow(response):
File "/Library/Python/2.7/site-packages/scrapy/contrib/spiders/crawl.py", line 52, in _requests_to_follow
links = [l for l in rule.link_extractor.extract_links(response) if l not in seen]
File "/Library/Python/2.7/site-packages/scrapy/contrib/linkextractors/lxmlhtml.py", line 107, in extract_links
links = self._extract_links(doc, response.url, response.encoding, base_url)
File "/Library/Python/2.7/site-packages/scrapy/linkextractor.py", line 94, in _extract_links
return self.link_extractor._extract_links(*args, **kwargs)
File "/Library/Python/2.7/site-packages/scrapy/contrib/linkextractors/lxmlhtml.py", line 50, in _extract_links
for el, attr, attr_val in self._iter_links(selector._root):
**File "/Library/Python/2.7/site-packages/scrapy/contrib/linkextractors/lxmlhtml.py", line 38, in _iter_links
for el in document.iter(etree.Element):
exceptions.AttributeError: 'str' object has no attribute 'iter'**
I cannot understand why this error is happening.
Here is my short Spider:
import scrapy
from tutorial.items import DmozItem
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
class TsrSpider(CrawlSpider):
name = 'tsr'
allowed_domains = ['thestudentroom.co.uk']
start_urls = ['http://www.thestudentroom.co.uk/forumdisplay.php?f=143']
download_delay = 4
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:35.0) Gecko/20100101 Firefox/35.0'
rules = (
Rule(
LinkExtractor(
allow=('forumdisplay\.php\?f=143\&page=\d',),
restrict_xpaths=("//li[#class='pager-page_numbers']/a/#href",))),
Rule(
LinkExtractor(
allow=('showthread\.php\?t=\d+\&page=\d+',),
restrict_xpaths=("//li[#class='pager-page_numbers']/a/#href",)),
callback='parse_link'),
Rule(
LinkExtractor(
allow=('showthread\.php\?t=\d+',),
restrict_xpaths=("//tr[#class='thread unread ']",)),
callback='parse_link'),
)
def parse_link(self, response):
# Iterate over posts.
for sel in response.xpath("//li[#class='post threadpost old ']"):
rating = sel.xpath(
"div[#class='post-footer']//span[#class='score']/text()").extract()
if not rating:
rating = 0
else:
rating = rating[0]
item = DmozItem()
item['post'] = sel.xpath(
"div[#class='post-content']/blockquote[#class='postcontent restore']/text()").extract()
item['link'] = response.url
item['topic'] = response.xpath(
"//div[#class='forum-header section-header']/h1/span/text()").extract()
item['rating'] = rating
yield item
source: http://pastebin.com/YXdWvPgX
Can someone help me out? Where is the mistake? I've been searching for days!?
The problem is that restrict_xpaths should point to elements - either the links directly or containers containing links, not attributes:
rules = [
Rule(LinkExtractor(allow='forumdisplay\.php\?f=143\&page=\d',
restrict_xpaths="//li[#class='pager-page_numbers']/a")),
Rule(LinkExtractor(allow='showthread\.php\?t=\d+\&page=\d+',
restrict_xpaths="//li[#class='pager-page_numbers']/a"),
callback='parse_link'),
Rule(LinkExtractor(allow='showthread\.php\?t=\d+',
restrict_xpaths="//tr[#class='thread unread ']"),
callback='parse_link'),
]
Tested (worked for me).
FYI, Scrapy defines restrict_xpaths as "expressions pointing to regions":
restrict_xpaths (str or list) – is a XPath (or list of XPath’s) which
defines regions inside the response where links should be extracted
from. If given, only the text selected by those XPath will be scanned
for links. See examples below.