为什么 scrapy 不给出所有结果并且规则部分也不起作用?
Why scrapy not giving all the results and the rules part is also not working?
此脚本仅提供第一个结果或 .extract()[0]
如果我将 0 更改为 1 然后是下一个项目。为什么它不再迭代整个 xpath?
规则部分也不起作用。我知道问题出在 response.xpath
。如何处理?
我的其他脚本可以运行,但这个不行
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from urlparse import urljoin
class CompItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
name = scrapy.Field()
date = scrapy.Field()
class criticspider(CrawlSpider):
name = "hand"
allowed_domains = ["consumercomplaints.in"]
start_urls = ["http://www.consumercomplaints.in/bysubcategory/mobile-handsets/page/1"]
rules = (
Rule(
SgmlLinkExtractor(allow=('"/bysubcategory/mobile-handsets/page/1/+"',)),
callback="parse_start_url",
follow=True),
)
def parse(self, response):
sites = response.xpath('//table[@width="100%"]')
items = []
for site in sites:
item = CompItem()
item['date'] = site.xpath('.//td[@class="small"]/text()').extract()[1]
item['name'] = site.xpath('.//td[@class="small"]//a/text()').extract()[0]
item['title'] = site.xpath('.//td[@class="complaint"]/h4/a/text()').extract()[0]
item['link'] = site.xpath('.//td[@class="complaint"]/h4/a/@href').extract()[0]
if item['link']:
if 'http://' not in item['link']:
item['link'] = urljoin(response.url, item['link'])
yield scrapy.Request(item['link'],
meta={'item': item},
callback=self.anchor_page)
items.append(item)
def anchor_page(self, response):
old_item = response.request.meta['item']
old_item['data'] = response.xpath('.//td[@class="compl-text"]/div/text()').extract()
yield old_item
问题在于您如何定义 sites
。
目前,只有 //table[@width="100%"]
会导致匹配完整的 table。相反,直接在 td
标签内找到所有具有 id
属性的 div
元素:
sites = response.xpath("//td/div[@id]")
至于 rules
部分 - 这是我要采用的方法 - 使用不同于 parse
的回调来收集搜索结果。有更多改进的完整代码:
from urlparse import urljoin
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
class CompItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
name = scrapy.Field()
date = scrapy.Field()
class criticspider(CrawlSpider):
name = "hand"
allowed_domains = ["consumercomplaints.in"]
start_urls = ["http://www.consumercomplaints.in/bysubcategory/mobile-handsets"]
rules = (
Rule(LinkExtractor(restrict_xpaths="//div[@class='pagelinks']"), follow=True, callback="parse_results"),
)
def parse_results(self, response):
sites = response.xpath("//td/div[@id]")
for site in sites:
item = CompItem()
item['date'] = site.xpath('.//td[@class="small"]/text()').extract()[1]
item['name'] = site.xpath('.//td[@class="small"]//a/text()').extract()[0]
item['title'] = site.xpath('.//td[@class="complaint"]/h4/a/text()').extract()[0]
item['link'] = site.xpath('.//td[@class="complaint"]/h4/a/@href').extract()[0]
if item['link']:
if 'http://' not in item['link']:
item['link'] = urljoin(response.url, item['link'])
yield scrapy.Request(item['link'],
meta={'item': item},
callback=self.anchor_page)
def anchor_page(self, response):
old_item = response.request.meta['item']
old_item['data'] = response.xpath('.//td[@class="compl-text"]/div/text()').extract()
yield old_item
此脚本仅提供第一个结果或 .extract()[0]
如果我将 0 更改为 1 然后是下一个项目。为什么它不再迭代整个 xpath?
规则部分也不起作用。我知道问题出在 response.xpath
。如何处理?
我的其他脚本可以运行,但这个不行
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from urlparse import urljoin
class CompItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
name = scrapy.Field()
date = scrapy.Field()
class criticspider(CrawlSpider):
name = "hand"
allowed_domains = ["consumercomplaints.in"]
start_urls = ["http://www.consumercomplaints.in/bysubcategory/mobile-handsets/page/1"]
rules = (
Rule(
SgmlLinkExtractor(allow=('"/bysubcategory/mobile-handsets/page/1/+"',)),
callback="parse_start_url",
follow=True),
)
def parse(self, response):
sites = response.xpath('//table[@width="100%"]')
items = []
for site in sites:
item = CompItem()
item['date'] = site.xpath('.//td[@class="small"]/text()').extract()[1]
item['name'] = site.xpath('.//td[@class="small"]//a/text()').extract()[0]
item['title'] = site.xpath('.//td[@class="complaint"]/h4/a/text()').extract()[0]
item['link'] = site.xpath('.//td[@class="complaint"]/h4/a/@href').extract()[0]
if item['link']:
if 'http://' not in item['link']:
item['link'] = urljoin(response.url, item['link'])
yield scrapy.Request(item['link'],
meta={'item': item},
callback=self.anchor_page)
items.append(item)
def anchor_page(self, response):
old_item = response.request.meta['item']
old_item['data'] = response.xpath('.//td[@class="compl-text"]/div/text()').extract()
yield old_item
问题在于您如何定义 sites
。
目前,只有 //table[@width="100%"]
会导致匹配完整的 table。相反,直接在 td
标签内找到所有具有 id
属性的 div
元素:
sites = response.xpath("//td/div[@id]")
至于 rules
部分 - 这是我要采用的方法 - 使用不同于 parse
的回调来收集搜索结果。有更多改进的完整代码:
from urlparse import urljoin
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
class CompItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
name = scrapy.Field()
date = scrapy.Field()
class criticspider(CrawlSpider):
name = "hand"
allowed_domains = ["consumercomplaints.in"]
start_urls = ["http://www.consumercomplaints.in/bysubcategory/mobile-handsets"]
rules = (
Rule(LinkExtractor(restrict_xpaths="//div[@class='pagelinks']"), follow=True, callback="parse_results"),
)
def parse_results(self, response):
sites = response.xpath("//td/div[@id]")
for site in sites:
item = CompItem()
item['date'] = site.xpath('.//td[@class="small"]/text()').extract()[1]
item['name'] = site.xpath('.//td[@class="small"]//a/text()').extract()[0]
item['title'] = site.xpath('.//td[@class="complaint"]/h4/a/text()').extract()[0]
item['link'] = site.xpath('.//td[@class="complaint"]/h4/a/@href').extract()[0]
if item['link']:
if 'http://' not in item['link']:
item['link'] = urljoin(response.url, item['link'])
yield scrapy.Request(item['link'],
meta={'item': item},
callback=self.anchor_page)
def anchor_page(self, response):
old_item = response.request.meta['item']
old_item['data'] = response.xpath('.//td[@class="compl-text"]/div/text()').extract()
yield old_item