Scrapy 仅通过 next_page_url 的前 5 个链接

Scrapy only going through first 5 links with next_page_url

我的代码似乎只经过请求的前 5 个链接,然后在请求第 6 个链接时停止。我尝试过使用 start_urls 和 next_page_url。两者都只摘自给定的前 5 页。

import scrapy
from scrapy.crawler import CrawlerProcess
import time
class finvizSpider(scrapy.Spider):
    global tickers
    global urlcheck
    urlcheck = 1
    tickers = []
    name = "finviz"

    start_urls =  ["https://finviz.com/screener.ashx?v=111&f=cap_small,geo_usa,sh_avgvol_o300,sh_opt_option,sh_short_low&ft=4&o=change"]

    def parse(self, response):


        tickers.append(response.xpath('//a[@class="screener-link-primary"]/text()').extract())
        print(tickers)
        next_page_url = "https://finviz.com/"
        html = response.xpath(
            '//a[@class="screener_arrow"]/@href').extract()[0]
        print(html)
        next_page_url += html
        print(next_page_url)
        if next_page_url is not None:

            yield scrapy.Request(next_page_url, callback=self.parse)
    def returnTickers(self):
        newTickerList= []
        for lists in tickers:
            if lists:
                for t in lists:
                    newTickerList.append(t)
        return newTickerList

错误说明如下:

感谢任何帮助。

编辑:

我已经更新了代码,但似乎仍然出现错误。

import scrapy
from scrapy.crawler import CrawlerProcess
import time
from bs4 import BeautifulSoup
class finvizSpider(scrapy.Spider):
    global tickers
    global urlcheck
    urlcheck = 1
    tickers = []
    name = "finviz"

    start_urls = [
        "https://finviz.com/screener.ashx?v=111&f=cap_small,geo_usa,sh_avgvol_o300,sh_opt_option,sh_short_low&ft=4&o=-change"]

    def parse(self, url):
        raw_html = scrapy.Request(url)
        good_html = BeautifulSoup(raw_html, 'html.parser')
        first_part = "https://finviz.com/"
        tickers.append([x.text for x in good_html.findAll('a', {'class': 'screener-link-primary'})])
        second_part = good_html.find('a', {'class': 'screener_arrow'})['href']

        # Check if there is next page
        if second_part:
            next_url = first_part + second_part
            self.parse(next_url)
    def returnTickers(self):
        newTickerList= []
        for lists in tickers:
            if lists:
                for t in lists:
                    newTickerList.append(t)
        return newTickerList


stock_list = finvizSpider()
process = CrawlerProcess()
process.crawl(finvizSpider)
process.start()
list2 = stock_list.returnTickers()

这是 运行 时出现以下错误。

看起来 scrapy 只能回调 5 次,所以我建议不要回调,而是迭代包含所有链接的列表,您可以使用 BeautifulSoup 来完成,这将非常简单。

安装

pip install BeautifulSoup4

BS4 导入:

from bs4 import BeautifulSoup

其余代码:

def parse(self, url):
    raw_html    = scrapy.Request(url)
    good_html   = BeautifulSoup(raw_html, 'html.parser')
    first_part  = "https://finviz.com/"
    tickers.append([x.text for x in good_html.findAll('a', {'class':'screener-link-primary'})])
    second_part = good_html.find('a', {'class':'screener_arrow'})['href']

    # Check if there is next page
    if second_part:
        next_url    = first_part + second_part
        self.parse(next_url)

if next_page_url is not None:永远不会是None,你需要检查html是否是None。

当html为None时,next_page_url += html行会给你一个错误,所以首先你需要检查它是否为None。

如果html是None,那你就不能做html[0],把extract换成extract_first(我用的是get)。

固定代码如下:

import scrapy
from scrapy.crawler import CrawlerProcess
import time


class FinvizSpider(scrapy.Spider):
    name = "finviz"

    urlcheck = 1
    tickers = []

    start_urls = ["https://finviz.com/screener.ashx?v=111&f=cap_small,geo_usa,sh_avgvol_o300,sh_opt_option,sh_short_low&ft=4&o=change"]


    def parse(self, response):
        self.tickers.append(response.xpath('//a[@class="screener-link-primary"]/text()').extract())
        print(self.tickers)
        next_page_url = "https://finviz.com/"
        html = response.xpath('//a[@class="screener_arrow"]/@href').get()
        print(html)
        if html is not None:
            next_page_url += html
            print(next_page_url)
            yield scrapy.Request(next_page_url, callback=self.parse)

    def returnTickers(self):
        newTickerList= []
        for lists in self.tickers:
            if lists:
                for t in lists:
                    newTickerList.append(t)
        return newTickerList