下一页 Selenium 与 Scrapy 不工作

Next Page Selenium with Scrapy not working

我一直在努力使用下一页按钮;刮板设法点击下一页并转到它,但是,它一直转到第一页并最终中断。我只想抓取所有下一页(在这种情况下只有一个,但将来可能会有更多)。 关于这里可能出什么问题的任何想法?这是代码:

class DatatracSpider(scrapy.Spider):
    name = 'data_trac'

    start_urls = [
        # FOR SALE
        'https://www.milieuproperties.com/search-results.aspx?paramb=ADVANCE%20SEARCH:%20Province%20(Western%20Cape),%20%20Area%20(Cape%20Town)']

    def __init__(self):
        #path to driver
        self.driver = webdriver.Chrome('my_path')
    

    def parse(self,response):
        self.driver.get(response.url)
        url = self.driver.current_url
        while True:
            try: 
                elem = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ContentPlaceHolder1_lvDataPager1"]/a[text()="Next"]')))
                elem.click()
            except TimeoutException:
                break
            WebDriverWait(self.driver, 10).until(lambda driver: self.driver.current_url != url)
            url = self.driver.current_url
            yield scrapy.Request(url=url, callback=self.parse_page, dont_filter=False)

    def parse_page(self, response):

        offering = response.css('span#ContentPlaceHolder1_lblbreadcum::text').get()
        try:
            offering = 'rent' if 'Rental' in offering else 'buy'
        except TypeError:
            offering = 'buy'

        base_link = response.request.url.split('/')
        try:
            base_link = base_link[0] + '//' + base_link[2] + '/'
        except:
            pass

        for p in response.xpath('//div[@class="ct-itemProducts ct-u-marginBottom30 ct-hover"]'):
            link = base_link + p.css('a::attr(href)').get()

            yield scrapy.Request(
                link,
                callback=self.parse_property,
                meta={'item': {
                    'url': link,
                    'offering': offering,
                    }},
            )

        # follow to next page

    def parse_property(self, response):
        item = response.meta.get('item')
     . . . 
   

请检查这个逻辑,我可以在其中成功移动到下一页

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

driver= webdriver.Chrome('') // Please provide your driver path here
driver.get('https://www.milieuproperties.com/search-results.aspx?paramb=ADVANCE%20SEARCH:%20Province%20(Western%20Cape),%20%20Area%20(Cape%20Town)')

driver.find_elements(By.XPATH,'//*[@id="ContentPlaceHolder1_lvDataPager1"]').click();

要重复单击下一步按钮直到结束页面,请执行以下操作。

driver.get('https://www.milieuproperties.com/search-results.aspx?paramb=ADVANCE%20SEARCH:%20Province%20(Western%20Cape),%20%20Area%20(Cape%20Town)')
wait = WebDriverWait(driver, 10)
while True:
    try:
        wait.until(EC.element_to_be_clickable((By.XPATH, "//a[contains(.,'Next') and not(contains(@class,'aspNetDisabled'))]"))).click()
    except:
        break

对于这个特定的网站,您的代码将无法工作,因为在您单击“下一步”按钮后 URL 不会更改。尝试等到当前页码更改(而不是等待 URL 更改)

def parse(self,response):
    self.driver.get(response.url)
    current_page_number = self.driver.find_element_by_css_selector('#ContentPlaceHolder1_lvDataPager1>span').text
    while True:
        try: 
            elem = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="ContentPlaceHolder1_lvDataPager1"]/a[text()="Next"]')))
            elem.click()
        except TimeoutException:
            break
        WebDriverWait(self.driver, 10).until(lambda driver: self.driver.find_element_by_css_selector('#ContentPlaceHolder1_lvDataPager1>span').text != current_page_number)
        current_page_number = self.driver.find_element_by_css_selector('#ContentPlaceHolder1_lvDataPager1>span').text
        yield scrapy.Request(url=url, callback=self.parse_page, dont_filter=False)