无法实现任何逻辑来使用 puppeteer 从最内层页面抓取内容

Unable to implement any logic to scrape content from innermost pages using puppeteer

我创建了一个脚本,使用 puppeteer 从 webpage 遍历多个页面触发点击下一页按钮来抓取不同作者的链接。该脚本似乎以正确的方式工作。

虽然这个网站的内容是静态的,但我特意在下面的脚本中使用了 puppeteer,只是为了学习如何解析内页的内容。

考虑到我想深入一层从这样的pages中刮取description。我怎样才能做到这一点?

const puppeteer = require('puppeteer');

function run (pagesToScrape) {
    return new Promise(async (resolve, reject) => {
        try {
            if (!pagesToScrape) {
                pagesToScrape = 1;
            }
            const browser = await puppeteer.launch({headless:false});
            const [page] = await browser.pages();
            await page.goto("https://quotes.toscrape.com/");
            let currentPage = 1;
            let urls = [];
            while (currentPage <= pagesToScrape) {
                let newUrls = await page.evaluate(() => {
                    let results = [];
                    let items = document.querySelectorAll('[class="quote"]');
                    items.forEach((item) => {
                        results.push({
                            authorUrl:  'https://quotes.toscrape.com' + item.querySelector("small.author + a").getAttribute('href'),
                            title: item.querySelector("span.text").innerText
                        });
                    });
                    return results;
                });
                urls = urls.concat(newUrls);
                if (currentPage < pagesToScrape) {
                    await Promise.all([
                        await page.waitForSelector('li.next > a'),
                        await page.click('li.next > a'),
                        await page.waitForSelector('[class="quote"]')
                    ])
                }
                currentPage++;
            }
            browser.close();
            return resolve(urls);
        } catch (e) {
            return reject(e);
        }
    })
}
run(3).then(console.log).catch(console.error);

我会走这条路:

const puppeteer = require('puppeteer');

let browser;

(async function main() {
  browser = await puppeteer.launch({ headless: false, defaultViewport: null });

  const [pageQuotes] = await browser.pages();
  const pageAbout = await browser.newPage();
  await pageQuotes.bringToFront(); // Otherwise, click on the next page link does not work.

  const pagesToScrape = 3;

  await pageQuotes.goto('https://quotes.toscrape.com/');
  let currentPage = 1;

  const data = { quotes: {}, abouts: {} };
  const visitedAbouts = new Set();

  while (currentPage <= pagesToScrape) {
    await pageQuotes.waitForSelector('.quote');

    const { quotes, aboutURLs } = await pageQuotes.evaluate(() => ({
      quotes: Array.from(
        document.querySelectorAll('.quote'),
        quote => [quote.querySelector('small.author').innerText, quote.innerText],
      ),
      aboutURLs: Array.from(
        document.querySelectorAll('.quote small.author + a[href]'),
        quote => quote.href,
      ),
    }));

    for (const [author, quote] of quotes) {
      if (data.quotes[author] === undefined) data.quotes[author] = [];
      data.quotes[author].push(quote);
    }

    for (const aboutURL of aboutURLs) {
      if (!visitedAbouts.has(aboutURL)) {
        visitedAbouts.add(aboutURL);

        await pageAbout.goto(aboutURL);
        await pageAbout.waitForSelector('div.author-details');

        const { title, about } = await pageAbout.evaluate(() => ({
          title: document.querySelector('div.author-details h3.author-title').innerText,
          about: document.querySelector('div.author-details').innerText,
        }));

        data.abouts[title] = about;
      }
    }

    if (currentPage < pagesToScrape) {
      const nextLink = await pageQuotes.waitForSelector('li.next > a');

      await Promise.all([
        nextLink.click(),
        pageQuotes.waitForNavigation(),
      ]);
    }
    currentPage++;
  }

  console.log(JSON.stringify(data, null, '  '));
})().catch(console.error).finally(async () => { if (browser) await browser.close(); });