Scrapy 在处理所有 url 之前完成处理

Scrapy finishes process before all urls are processed

我正在尝试抓取 IMDb 网站。所以,我需要从演员页面开始,打开它,然后为他们电影目录的前 15 部电影打开电影页面并从中获取一些信息。我的代码是:

class Actor(scrapy.Item):
    name = scrapy.Field()
    url = scrapy.Field()
    bio = scrapy.Field()
    born = scrapy.Field()
    movies = scrapy.Field()

class Film(scrapy.Item):
    title = scrapy.Field()
    url = scrapy.Field()
    cast = scrapy.Field()


class ImdbSpider(scrapy.Spider):
    name = "imdb"
    allowed_domains = ["imdb.com"]
    start_urls = ['https://www.imdb.com/search/name/?gender=male%2Cfemale&ref_=nv_cel_m',]

    def parse_cast(self, response):
      actor = response.meta['actor']
      filmsToProcess = response.meta['filmsToProcess']

      film = Film()
      ... getting some information about filf
      actor['movies'].append(film)

      if not filmsToProcess:
        yield actor
        return
      else:
         film = filmsToProcess.pop()
         url = 'https://www.imdb.com' + film.extract().strip() 
         yield scrapy.Request(url + 'fullcredits', callback=self.parse_cast, meta = {
          "actor": actor,
          'url': url,
          "filmsToProcess": filmsToProcess
      })  


    def parse_bio(self, response):
      actor = Actor()
      #... getting some information about the actor
      actor["movies"] = list()

      # getting list of films from filmography
      filmsToProcess = response.xpath('//*[@id="filmography"]/div[@class="filmo-category-section"]')[0].xpath('./div[contains(@class, "filmo-row")]/b/a/@href')[:10]
      film = filmsToProcess.pop()
      url = 'https://www.imdb.com' + film.extract().strip() 

      yield scrapy.Request(url + 'fullcredits', callback=self.parse_cast, meta = {
          "actor": actor,
          'url': url,
          "filmsToProcess": filmsToProcess
      })

    def parse(self, response):
      table_rows = response.xpath('.//*[@class="lister-list"]/div')
      for row in table_rows[:25]:
        name = row.xpath("./div[@class='lister-item-content']/h3/a/text()").extract_first().strip()  
        url = 'https://www.imdb.com/' + row.xpath("./div[@class='lister-item-content']/h3/a/@href").extract_first().strip() + '/'  
        yield scrapy.Request(url, callback=self.parse_bio, meta = {
               "url": url,
               "name": name
              })

问题是结果我只得到 15 个演员(但在解析方法中我迭代了 25 个以上)并且一些演员由于某种原因没有进入结果。我想可能是多线程的缘故,但是为什么scrapy在所有actors都处理完之前就完成了进程?

我怀疑您没有看到所有演员的原因是因为在解析了他们所有的电影之前您没有提供演员信息。 scrapy 过滤掉重复的请求,所以如果您有任何重复的电影,我认为它只会停止该链。一个(未经测试的)选项是将 dont_filter 选项传递给您的电影 Requests,以便它重复重复的请求。

如果你能把演员和电影分开,那么事情就变得简单多了。

我将从演员页面解析所有演员信息,而不是在 meta 中传递任何内容,因为所有信息都在一个页面上。下面是一个让您入门的示例。

class IMDBSpider(Spider):
    name = "imdb"
    allowed_domains = ["imdb.com"]
    start_urls = [
        "https://www.imdb.com/search/name/?gender=male%2Cfemale&ref_=nv_cel_m"
    ]   

    def parse(self, response):       
        for actor in response.css(".lister-item-header > a")[:25]:
            yield response.follow(actor, callback=self.parse_actor)

    def parse_actor(self, response):
        actor = response.css("h1 > .itemprop::text").get()
        movie_links = response.css(".filmo-category-section > div > b > a")
        movies = movie_links.css("::text").getall()
        # bio = ...
        # born = ...
        yield {"actor": actor, "url": response.url, "movies": movies}

        for link in movie_links[:15]:
            yield response.follow(link, callback=self.parse_movie)
    
    def parse_movie(self, response):
        # parse the movie page from here