如何使用 python 抓取 Ajax 网页

How to scrape Ajax webpage using python

我正在学习 Python 抓取技术,但我遇到了抓取 Ajax 页面的问题 like this one

我想抓取页面中出现的所有药物名称和详细信息。由于我阅读了关于堆栈溢出的大部分答案,但在抓取后我没有得到正确的数据。我还尝试使用 selenium 进行抓取或发送伪造 post 请求但失败了。

所以请帮助我解决这个 Ajax 抓取主题,特别是这个页面,因为 ajax 在从下拉选项中选择一个选项时被触发。 另外,请为我提供一些 ajax 页面抓取的资源。

//使用硒

from selenium import webdriver
import bs4 as bs
import lxml
import requests

path_to_chrome = '/home/brutal/Desktop/chromedriver'

browser = webdriver.Chrome(executable_path = path_to_chrome)

url = 'https://www.gianteagle.com/Pharmacy/Savings/4-10-Dollar-Drug-Program/Generic-Drug-Program/'

browser.get(url)
browser.find_element_by_xpath('//*[@id="ctl00_RegionPage_RegionPageMainContent_RegionPageContent_userControl_StateList"]/option[contains(text(), "Ohio")]').click()

new_url = browser.current_url
r = requests.get(new_url)
print(r.content)

您可以下载 ChromeDriver here

normalize-space用于去除网页文本中的垃圾,例如x0

from time import sleep
from selenium import webdriver
from lxml.html import fromstring

data = {}

driver = webdriver.Chrome('PATH TO YOUR DRIVER/chromedriver')  # i.e '/home/superman/www/myproject/chromedriver'
driver.get('https://www.gianteagle.com/Pharmacy/Savings/4-10-Dollar-Drug-Program/Generic-Drug-Program/')

# Loop states
for i in range(2, 7):
    dropdown_state = driver.find_element(by='id', value='ctl00_RegionPage_RegionPageMainContent_RegionPageContent_userControl_StateList')

    # open dropdown
    dropdown_state.click()

    # click state
    driver.find_element_by_xpath('//*[@id="ctl00_RegionPage_RegionPageMainContent_RegionPageContent_userControl_StateList"]/option['+str(i)+']').click()

    # let download the page
    sleep(3)

    # prepare HTML
    page_content = driver.page_source
    tree = fromstring(page_content)

    state = tree.xpath('//*[@id="ctl00_RegionPage_RegionPageMainContent_RegionPageContent_userControl_StateList"]/option['+str(i)+']/text()')[0]
    data[state] = []

    # Loop products inside the state
    for line in tree.xpath('//*[@id="ctl00_RegionPage_RegionPageMainContent_RegionPageContent_userControl_gridSearchResults"]/tbody/tr[@style]'):
        med_type = line.xpath('normalize-space(.//td[@class="medication-type"])')
        generic_name = line.xpath('normalize-space(.//td[@class="generic-name"])')

        brand_name = line.xpath('normalize-space(.//td[@class="brand-name hidden-xs"])')
        strength = line.xpath('normalize-space(.//td[@class="strength"])')
        form = line.xpath('normalize-space(.//td[@class="form"])')

        qty_30_day = line.xpath('normalize-space(.//td[@class="30-qty"])')
        price_30_day = line.xpath('normalize-space(.//td[@class="30-price"])')

        qty_90_day = line.xpath('normalize-space(.//td[@class="90-qty hidden-xs"])')
        price_90_day = line.xpath('normalize-space(.//td[@class="90-price hidden-xs"])')

        data[state].append(dict(med_type=med_type,
                                generic_name=generic_name,
                                brand_name=brand_name,
                                strength=strength,
                                form=form,
                                qty_30_day=qty_30_day,
                                price_30_day=price_30_day,
                                qty_90_day=qty_90_day,
                                price_90_day=price_90_day))

print('data:', data)
driver.quit()