使用 requests.post() 从网页中抓取文本

Using requests.post() to scrape text from web page

我想从房地产列表网页上抓取文本。当我预先知道 URL 时,我就成功了,但我无法搜索邮政编码,然后抓取该搜索结果的页面。

# I know the URL, and I can scrape data from the page successfully
from lxml import html
import requests
url = 'https://www.mlslistings.com/Search/Result/6b1a2c4f-3976-43d8-94a7-5742859f26f1/1' # this URL is the page that follows a zip code search on the 'mlslistings.com' homepage
page = requests.get(url)
tree = html.fromstring(page.content)
address_raw = list(map(str, tree.xpath('//a[@class="search-nav-link"]//text()'))) # returns addresses found on listings page
# I want to do the zip code search on the homepage, and scrape the page that follows, but this time get an empty list
url = 'https://www.mlslistings.com/'
data = {'transactionType': 'buy', 'listing_status': 'Active', 'searchTextType': '', 'searchText': '94618','__RequestVerificationToken': 'CfDJ8K_Ve2wchEZEvUasrULD6jPUmwSLRaolrWoc10T8tMJD8LVSE2c4zMKhNIRwuuwzLZPPsypcZzWaXTHX7Unk1NtVdtAIqIY8AL0DThPMv3xwVMhrzC8UumhLGSXh00oaDHDreGBlWXB2NmRAJi3MbqE'}
post = requests.post(url, data=data)
tree = html.fromstring(post.content)
address_raw = list(map(str, tree.xpath('//a[@class="search-nav-link"]//text()'))) # returns empty list! why?

您可能需要使用正确的RequestVerificationToken,这可以通过首先请求主页获得。

下面显示了使用 BeautifulSoup 提取它的方法(请随意使用您自己的方法)。您还需要将 post 请求提交到正确的 URL.

from bs4 import BeautifulSoup
from lxml import html
import requests

sess = requests.Session()
home_page = sess.get('https://www.mlslistings.com/')
soup = BeautifulSoup(home_page.content, "html.parser")
rvt = soup.find("input", attrs={"name" : "__RequestVerificationToken"})['value']
data = {'transactionType': 'buy', 'listing_status': 'Active', 'searchTextType': '', 'searchText': '94618','__RequestVerificationToken': rvt}
search_results = sess.post("https://www.mlslistings.com/Search/ResultPost", data=data)
tree = html.fromstring(search_results.content)
address_raw = list(map(str, tree.xpath('//a[@class="search-nav-link"]//text()'))) # returns addresses found on listings page

print(address_raw)

这将为您提供如下地址:

['5351 Belgrave Pl, Oakland, CA, 94618', '86 Starview Dr, Oakland, CA, 94618', '1864 Grand View Drive, Oakland, CA, 94618', '5316 Miles Ave, Oakland, CA, 94618', '280 Caldecott Ln, Oakland, CA, 94618', '6273 Brookside Ave, Oakland, CA, 94618', '50 Elrod Ave, Oakland, CA, 94618', '5969 Keith Avenue, Oakland, CA, 94618', '6 Starview Dr, Oakland, CA, 94618', '375 62nd St, Oakland, CA, 94618', '5200 Masonic Ave, Oakland, CA, 94618', '49 Starview, Oakland, CA, 94618', '4863 Harbord Dr, Oakland, CA, 94618', '5200 Cochrane Ave, Oakland, CA, 94618', '6167 Acacia Ave, Oakland, CA, 94618', '5543 Claremont Ave, Oakland, CA, 94618', '5283 Broadway Ter, Oakland, CA, 94618', '0 Sheridan Rd, Oakland, CA, 94618']

为了避免在有效负载中对​​名称和值进行硬编码以及即时获取验证令牌,您可以像下面这样尝试。该脚本基于 lxml 解析器。坚持其中一个,但不要同时坚持。

import requests
from lxml.html import fromstring

gurl = 'https://www.mlslistings.com/' #url for get requests
purl = 'https://www.mlslistings.com/Search/ResultPost' #url for post requests

with requests.Session() as session:
    r = session.get(gurl)
    root = fromstring(r.text)
    payload = {item.get('name'):item.get('value') for item in root.cssselect('input[name]')}
    payload['searchText'] = '94618'
    res = session.post(purl,data=payload)
    tree = fromstring(res.text)
    address = [item.text.strip() for item in tree.cssselect('.listing-address a.search-nav-link')]
    print(address)