I am on this page: http://www.metacritic.com/browse/games/title/ps4/a?view=condensed
And I want to go into each item and get the Developer and Genre, but my code doesn't seem to work.
For example, I want to go into this page: http://www.metacritic.com/game/playstation-4/angry-birds-star-wars
Then leave it and continue through the rest doing the same and adding to a database. What can I change in my code to make it work? Right now the database is for the dev and genre is null but it gets the rest of the data so it's like it never enters parse_Game
Also I added print statements into parseGame and none of them print
from scrapy.spider import BaseSpider
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from metacritic.items import MetacriticItem
import MySQLdb
import re
from string import lowercase
class MetacriticSpider(BaseSpider):
def start_requests(self):
#iterate through ps4 pages
for c in lowercase:
for i in range(self.max_id):
yield Request('http://www.metacritic.com/browse/games/title/ps4/{0}?page={1}'.format(c, i), callback = self.parseps4)
#gets the developer and genre of a game
def parseGame(self, response):
print("Here")
item = response.meta['item']
db1 = MySQLdb.connect("localhost", "root", "andy", "metacritic")
cursor = db1.cursor()
hxs = HtmlXPathSelector(response)
sites = hxs.select('//div[#class="product_wrap"]')
items = []
item['dev'] = site.xpath('.//span[contains(#class, "summary_detail developer")]/span[1]/text()').extract()
item['genre'] = site.xpath('.//span[contains(#class, "summary_detail product_genre")]/span[1]/text()').extract()
cursor.execute("INSERT INTO ps4 (dev, genre) VALUES (%s,%s)",[item['dev'][0],item['genre'][0]])
items.append(item)
print item['dev']
print item['genre']
def parseps4(self, response):
#some local variables
db1 = MySQLdb.connect("localhost", "root", "andy", "metacritic")
cursor = db1.cursor()
hxs = HtmlXPathSelector(response)
sites = hxs.select('//div[#class="product_wrap"]')
items = []
#iterates through each site
for site in sites:
with db1:
item = MetacriticItem()
#sets the item
item['title'] = site.xpath('.//div[contains(#class, "basic_stat product_title")]/a/text()').extract()
item['cscore'] = site.xpath('.//div[contains(#class, "basic_stat product_score brief_metascore")]/div[1]/text()').extract()
item['uscore'] = site.xpath('.//div/ul/li/span[contains(#class, "data textscore")]/text()').extract()
item['release'] = site.xpath('.//li[contains(#class, "stat release_date full_release_date")]/span[2]/text()').extract()
#some processing to check if there is a score attached, if there is, it adds it to the database
if ("tbd" in item['cscore'][0] and "tbd" not in item['uscore'][0]) or ("tbd" not in item['cscore'][0] and "tbd" in item['uscore'][0]) or ("tbd" not in item['cscore'][0] and "tbd" not in item['uscore'][0]):
cursor.execute("INSERT INTO ps4 (title, criticalscore, userscore, releasedate) VALUES (%s,%s,%s, %s)",[(' '.join(item['title'][0].split())).replace("(PS4)","",1),item['cscore'][0],item['uscore'][0],item['release'][0]])
items.append(item)
itemLink = site.xpath('.//div[contains(#class, "basic_stat product_title")]/a/#href' ).extract()
req = Request('http://www.metacritic.com' + itemLink[0], callback = self.parseGame)
req.meta['item'] = item
Several problems in the code:
meta argument should contain a dictionary {'item': item}
HtmlXPathSelector is deprecated - use Selector instead
I think you shouldn't do mysql inserts inside the spider - use Database Pipeline instead:
Writing items to a MySQL database in Scrapy
you need to get the first item of extract() call and do strip() on it (this will help to have strings in the Fields, not lists and without leading and trailing spaces and newlines)
Here's the code without mysql related calls:
from string import lowercase
from scrapy.item import Field, Item
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector, Selector
from metacritic.items import MetacriticItem
class MetacriticSpider(BaseSpider):
name = 'metacritic'
allowed_domains = ['metacritic.com']
max_id = 1 # your max_id value goes here!!!
def start_requests(self):
for c in lowercase:
for i in range(self.max_id):
yield Request('http://www.metacritic.com/browse/games/title/ps4/{0}?page={1}'.format(c, i), callback=self.parseps4)
def parseGame(self, response):
item = response.meta['item']
hxs = HtmlXPathSelector(response)
site = hxs.select('//div[#class="product_wrap"]')
# get additional data!!!
yield item
def parseps4(self, response):
hxs = Selector(response)
sites = hxs.select('//div[#class="product_wrap"]')
for site in sites:
item = MetacriticItem()
item['title'] = site.xpath('.//div[contains(#class, "basic_stat product_title")]/a/text()').extract()[0].strip()
item['cscore'] = site.xpath('.//div[contains(#class, "basic_stat product_score brief_metascore")]/div[1]/text()').extract()[0].strip()
item['uscore'] = site.xpath('.//div/ul/li/span[contains(#class, "data textscore")]/text()').extract()[0].strip()
item['release'] = site.xpath('.//li[contains(#class, "stat release_date full_release_date")]/span[2]/text()').extract()[0].strip()
link = site.xpath('.//div[contains(#class, "basic_stat product_title")]/a/#href').extract()[0]
yield Request('http://www.metacritic.com/' + link, meta={'item': item}, callback=self.parseGame)
It works for me - I see the yielded items from parseGame() on a console.
Make sure it yields items first, then see the !!! comments - fill these lines accordingly.
After that, if you see items on a console, try creating a database pipeline to write items to mysql.
Related
i'm trying to scrape this site using scrapy but returns all the value in a
single cell, i except each value in a different row.
example:
milage: 25
milage: 377
milage: 247433
milage: 464130
but i'm getting the data like this
example:
milage:[u'25',
u'377',
u'247433',
u'399109',
u'464130',
u'399631',
u'435238',
u'285000',
u'287470',
u'280000']
here is my code
import scrapy
from ..items import ExampleItem
from scrapy.selector import HtmlXPathSelector
url = 'https://example.com'
class Example(scrapy.Spider):
name = 'example'
allowed_domains = ['www.example.com']
start_urls = [url]
def parse(self, response):
hxs = HtmlXPathSelector(response)
item_selector = hxs.select('//div[#class="listing_format card5 relative"]')
for fields in item_selector:
item = ExampleItem()
item ['Mileage'] = fields.select('//li[strong="Mileage"]/span/text()').extract()
yield item
You didn't show your site but may be you need relative XPath:
item ['Mileage'] = fields.select('.//li[strong="Mileage"]/span/text()').extract_first()
It sounds like you need to iterate over your milages.
for fields in item_selector:
milages = fields.select('//li[strong="Mileage"]/span/text()').extract()
for milage in milages:
item = CommercialtrucktraderItem()
item ['Mileage'] = milage
yield item
Also consider making your fields.select('//li[strong="Mileage"]/span/text()').extract() more specific?
Im trying to scrape details from a subsite and merge with the details scraped with site. I've been researching through stackoverflow, as well as documentation. However, I still cant get my code to work. It seems that my function to extract additional details from the subsite does not work. If anyone could take a look I would be very grateful.
# -*- coding: utf-8 -*-
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapeInfo.items import infoItem
import pyodbc
class scrapeInfo(Spider):
name = "info"
allowed_domains = ["http://www.nevermind.com"]
start_urls = []
def start_requests(self):
#Get infoID and Type from database
self.conn = pyodbc.connect('DRIVER={SQL Server};SERVER=server;DATABASE=dbname;UID=user;PWD=password')
self.cursor = self.conn.cursor()
self.cursor.execute("SELECT InfoID, category FROM dbo.StageItem")
rows = self.cursor.fetchall()
for row in rows:
url = 'http://www.nevermind.com/info/'
InfoID = row[0]
category = row[1]
yield self.make_requests_from_url(url+InfoID, InfoID, category, self.parse)
def make_requests_from_url(self, url, InfoID, category, callback):
request = Request(url, callback)
request.meta['InfoID'] = InfoID
request.meta['category'] = category
return request
def parse(self, response):
hxs = Selector(response)
infodata = hxs.xpath('div[2]/div[2]') # input item path
itemPool = []
InfoID = response.meta['InfoID']
category = response.meta['category']
for info in infodata:
item = infoItem()
item_cur, item_hist = InfoItemSubSite()
# Stem Details
item['id'] = InfoID
item['field'] = info.xpath('tr[1]/td[2]/p/b/text()').extract()
item['field2'] = info.xpath('tr[2]/td[2]/p/b/text()').extract()
item['field3'] = info.xpath('tr[3]/td[2]/p/b/text()').extract()
item_cur['field4'] = info.xpath('tr[4]/td[2]/p/b/text()').extract()
item_cur['field5'] = info.xpath('tr[5]/td[2]/p/b/text()').extract()
item_cur['field6'] = info.xpath('tr[6]/td[2]/p/b/#href').extract()
# Extract additional information about item_cur from refering site
# This part does not work
if item_cur['field6'] = info.xpath('tr[6]/td[2]/p/b/#href').extract():
url = 'http://www.nevermind.com/info/sub/' + item_cur['field6'] = info.xpath('tr[6]/td[2]/p/b/#href').extract()[0]
request = Request(url, housingtype, self.parse_item_sub)
request.meta['category'] = category
yield self.parse_item_sub(url, category)
item_his['field5'] = info.xpath('tr[5]/td[2]/p/b/text()').extract()
item_his['field6'] = info.xpath('tr[6]/td[2]/p/b/text()').extract()
item_his['field7'] = info.xpath('tr[7]/td[2]/p/b/#href').extract()
item['subsite_dic'] = [dict(item_cur), dict(item_his)]
itemPool.append(item)
yield item
pass
# Function to extract additional info from the subsite, and return it to the original item.
def parse_item_sub(self, response, category):
hxs = Selector(response)
subsite = hxs.xpath('div/div[2]') # input base path
category = response.meta['category']
for i in subsite:
item = InfoItemSubSite()
if (category == 'first'):
item['subsite_field1'] = i.xpath('/td[2]/span/#title').extract()
item['subsite_field2'] = i.xpath('/tr[4]/td[2]/text()').extract()
item['subsite_field3'] = i.xpath('/div[5]/a[1]/#href').extract()
else:
item['subsite_field1'] = i.xpath('/tr[10]/td[3]/span/#title').extract()
item['subsite_field2'] = i.xpath('/tr[4]/td[1]/text()').extract()
item['subsite_field3'] = i.xpath('/div[7]/a[1]/#href').extract()
return item
pass
I've been looking at these examples together with a lot of other examples (stackoverflow is great for that!), as well as scrapy documentation, but still unable to understand how I get details send from one function and merged with the scraped items from the original function.
how do i merge results from target page to current page in scrapy?
How can i use multiple requests and pass items in between them in scrapy python
What you are looking here is called request chaining. Your problem is - yield one item from several requests. A solution to this is to chain requests while carrying your item in requests meta attribute.
Example:
def parse(self, response):
item = MyItem()
item['name'] = response.xpath("//div[#id='name']/text()").extract()
more_page = # some page that offers more details
# go to more page and take your item with you.
yield Request(more_page,
self.parse_more,
meta={'item':item})
def parse_more(self, response):
# get your item from the meta
item = response.meta['item']
# fill it in with more data and yield!
item['last_name'] = response.xpath("//div[#id='lastname']/text()").extract()
yield item
I'm using the below Scrapy code, which is fully functioning, to scrape data from from a website. The scraper inputs a text list of product IDs, which are generated into a URL on line 10. How can I add the current start_url as an additional element to my item array?
from scrapy.spider import Spider
from scrapy.selector import Selector
from site_scraper.items import SiteScraperItem
class MySpider(Spider):
name = "product"
allowed_domains = ["site.com"]
url_list = open("productIDs.txt")
base_url = "http://www.site.com/p/"
start_urls = [base_url + url.strip() for url in url_list.readlines()]
url_list.close()
def parse(self, response):
hxs = Selector(response)
titles = hxs.xpath("//span[#itemprop='name']")
items = []
item = SiteScraperItem()
item ["Classification"] = titles.xpath("//div[#class='productSoldMessage']/text()").extract()[1:]
item ["Price"] = titles.xpath("//span[#class='pReg']/text()").extract()
item ["Name"] = titles.xpath("//span[#itemprop='name']/text()").extract()
try:
titles.xpath("//link[#itemprop='availability']/#href").extract()[0] == 'http://schema.org/InStock'
item ["Availability"] = 'In Stock'
except:
item ["Availability"] = 'Out of Stock'
if len(item ["Name"]) == 0:
item ["OnlineStatus"] = 'Offline'
item ["Availability"] = ''
else:
item ["OnlineStatus"] = 'Online'
items.append(item)
return items
I am exporting this data to CSV using the below command line code and would like the URL to be an additional value in my CSV file.
scrapy crawl product -o items.csv -t csv
Thanks in advance for your help!
Add a new Field to your SiteScraperItem Item class and set it to response.url in the parse() method.
I try to extract job offers informations from this website and this is my code
from scrapy.spider import Spider
from scrapy.selector import Selector
from tutorial.items import DmozItem
class DmozSpider(Spider):
name = "myspider"
allowed_domains =["tanitjobs.com/"]
start_urls =["http://tanitjobs.com/search-results-jobs/"]
def parse(self, response):
sel = Selector(response)
sites = sel.xpath('//div[#class="offre"]/div[#class="detail"]')
items = []
item = DmozItem()
for site in sites:
item['title'] = site.xpath('a/text()').extract()
item['link'] = site.xpath('a/#href').extract()
item['desc'] = site.xpath('div[#class="descriptionjob"]/text()').extract()
items.append(item)
return items
but the result is incorrect (empty item list):
{'desc': [],
'link': [u'lien'],
'title': []}
and many blocks like this ...
item = DmozItem() should be called for each loop iteration, otherwise you are rewriting the same item, appending the same item to the items list
It should look like:
from scrapy.spider import Spider
from scrapy.selector import Selector
from tutorial.items import DmozItem
class DmozSpider(Spider):
name = "myspider"
allowed_domains =["tanitjobs.com/"]
start_urls =["http://tanitjobs.com/search-results-jobs/"]
def parse(self, response):
sel = Selector(response)
sites = sel.xpath('//div[#class="offre"]/div[#class="detail"]')
items = []
for site in sites:
item = DmozItem()
item['title'] = site.xpath('a/text()').extract()
item['link'] = site.xpath('a/#href').extract()
item['desc'] = site.xpath('div[#class="descriptionjob"]/text()').extract()
items.append(item)
return items
Your title xpath didn't take into account the <strong> tags on either side of the text, and your desc xpath needs to go down another div to retrieve the required information.
I just noticed that the xpath for job description varies. The xpath in the code below returns job descriptions for the first three results but not subsequent ones. You would need to examine subsequent results to determine how the xpath changes to retrieve descriptions for those jobs.
def parse(self, response):
sel = Selector(response)
sites = sel.xpath('//div[#class="offre"]/div[#class="detail"]')
items = []
for site in sites:
item = DmozItem()
item['title'] = site.xpath('normalize-space(a/strong/text())').extract()
item['link'] = site.xpath('a/#href').extract()
item['desc'] = site.xpath('normalize-space(./div/div[#class="descriptionjob"]/text())').extract()
items.append(item)
return items
I have a spider written as below, but it doesn't seem to be getting to the function parse. Could someone take a quick look and let me know if I'm missing something. Am I implementing the SgmlLinkExtractor properly?
The spider should pick out all the links from the left sidebar, create a request from them, then parse the next page for a facebook link. It should also do this for other pages as specified in the SgmlLinkExtractor. At the moment, the spider is running, but not parsing any pages.
class PrinzSpider(CrawlSpider):
name = "prinz"
allowed_domains = ["prinzwilly.de"]
start_urls = ["http://www.prinzwilly.de/"]
rules = (
Rule(
SgmlLinkExtractor(
allow=(r'veranstaltungen-(.*)', ),
),
callback='parse'
),
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
startlinks = hxs.select("//ul[#id='mainNav2']/li/a")
print startlinks
for link in startlinks:
giglink = link.select('#href').extract()
item = GigItem()
item['gig_link'] = giglink
request = Request(item['gig_link'], callback='parse_gig_page')
item.meta['item'] = item
yield request
def parse_gig_page(self, response):
hxs = HtmlXPathSelector(response)
item = response.meta['item']
gig_content = hxs.select("//div[#class='n']/table/tbody").extract()
fb_link = re.findall(r'(?:www.facebook.com/)(.*)', gig_content)
print '********** FB LINK ********', fb_link
return item
EDIT **
settings.py
BOT_NAME = 'gigscraper'
SPIDER_MODULES = ['gigscraper.spiders']
NEWSPIDER_MODULE = 'gigscraper.spiders'
ITEM_PIPLINES = ['gigscraper.pipelines.GigscraperPipeline']
items.py
from scrapy.item import Item, Field
class GigItem(Item):
gig_link = Field()
pipelines.py
class GigscraperPipeline(object):
def process_item(self, item, spider):
print 'here I am in the pipeline'
return item
Two problems:
extract() returns a list, you are missing [0]
Request's callback should not be a string, use self.parse_gig_page
Here's the modified code (working):
import re
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.http import Request
from scrapy.item import Item, Field
from scrapy.selector import HtmlXPathSelector
class GigItem(Item):
gig_link = Field()
class PrinzSpider(CrawlSpider):
name = "prinz"
allowed_domains = ["prinzwilly.de"]
start_urls = ["http://www.prinzwilly.de/"]
rules = (Rule(SgmlLinkExtractor(allow=(r'veranstaltungen-(.*)',)), callback='parse'),)
def parse(self, response):
hxs = HtmlXPathSelector(response)
startlinks = hxs.select("//ul[#id='mainNav2']/li/a")
for link in startlinks:
item = GigItem()
item['gig_link'] = link.select('#href').extract()[0]
yield Request(item['gig_link'], callback=self.parse_gig_page, meta={'item': item})
def parse_gig_page(self, response):
hxs = HtmlXPathSelector(response)
item = response.meta['item']
gig_content = hxs.select("//div[#class='n']/table/tbody").extract()[0]
fb_link = re.findall(r'(?:www.facebook.com/)(.*)', gig_content)
print '********** FB LINK ********', fb_link
return item
Hope that helps.