脚本只抓取第一页而不是多个页面

Script crawls only the first page instead of multiple pages

我正在尝试抓取网站的多个页面。但是程序只能抓取第一页。

import requests
from bs4 import BeautifulSoup
import re
import json
import time

def make_soup(url):

    source = requests.get(url).text
    soup = BeautifulSoup(source, 'lxml')

    pattern = re.compile(r'window.__WEB_CONTEXT__={pageManifest:(\{.*\})};')
    script = soup.find("script", text=pattern)
    jsonData = pattern.search(script.text).group(1)

    pattern_number = re.compile(r'\"[0-9]{9,12}\":(\{\"data\":\{\"cachedFilters\":(.*?)\}\}),\"[0-9]{9,11}\"')
    jsonData2 = pattern_number.search(jsonData).group(1)

    dictData = json.loads(jsonData2)
    return dictData

def get_reviews(dictData):

    """ Return a list of five dicts with reviews.
    """

    all_dictionaries = []

    for data in dictData['data']['locations']:
        for reviews in data['reviewListPage']['reviews']:

            review_dict = {}

            review_dict["reviewid"] = reviews['id']
            review_dict["reviewurl"] =  reviews['absoluteUrl']
            review_dict["reviewlang"] = reviews['language']
            review_dict["reviewdate"] = reviews['createdDate']

            userProfile = reviews['userProfile']
            review_dict["author"] = userProfile['displayName']

            all_dictionaries.append(review_dict)

    return all_dictionaries

def main():

    url = 'https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-Coronado_Hotel-Zurich.html#REVIEWS'

    dictData = make_soup(url)
    review_list = get_reviews(dictData) # list with five dicts
    #print(review_list)

    page_number = 5

    while page_number <= 260: # number in the URL
        next_url = 'https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or' + str(page_number) + '-Coronado_Hotel-Zurich.html#REVIEWS'
        dictData = make_soup(url)
        review_list2 = get_reviews(dictData)
        print(review_list2)

        page_number += 5
        time.sleep(0.5)

if __name__ == "__main__":
    main()

而且我不确定是否可以使用此 URL 抓取多个页面。在网站上有 54 页,但在 URL 中我总是要添加数字 5,如下所示:

Page 1
https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-Coronado_Hotel-Zurich.html#REVIEWS

Page2
https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or5-Coronado_Hotel-Zurich.html#REVIEWS

Page3
https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or10-Coronado_Hotel-Zurich.html#REVIEWS

我不知道这是不是个好主意。 你有什么建议吗?先感谢您!

您将新的 url 分配给 next_url 但您使用 url 阅读页面。

next_url = 'https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or' + str(page_number) + '-Coronado_Hotel-Zurich.html#REVIEWS'
dictData = make_soup(url)

您必须重命名变量

url = 'https://www.tripadvisor.ch/Hotel_Review-g188113-d228146-Reviews-or' + str(page_number) + '-Coronado_Hotel-Zurich.html#REVIEWS'
dictData = make_soup(url)