如何获取第二个网页的数据?

How can I get the data of the second web page?

我正在尝试使用 rvest 包从 R 中的 Web 获取数据:https://etfdb.com/stock/AAPL/

但是无论我怎么尝试,我都只能得到第一页的table。有人可以帮我做这个吗?非常感谢。

见下面的代码。 tb1 和 tb2 是一样的!!那是有线的。

url1 <- "https://etfdb.com/stock/AAPL/#etfs&sort_name=weighting&sort_order=desc&page=1"
url2 <- "https://etfdb.com/stock/AAPL/#etfs&sort_name=weighting&sort_order=desc&page=2"

tbs1 <- rvest::html_nodes(xml2::read_html(url1), "table")
tbs2 <- rvest::html_nodes(xml2::read_html(url2), "table")
tb1 <- rvest::html_table(tbs1[1])[[1]]
tb2 <- rvest::html_table(tbs2[1])[[1]]

此网站 post GET 请求更新 JSON 数据到 table。经过一些尝试,这是我想出的用于处理 JSON 数据的代码:(不是一个漂亮的代码,但它有效)

library(rjson)
library(rvest)
library(writexl)

lastpage <- 9;
df <- data.frame();
for (i in 1:lastpage){
  x <- fromJSON(file = paste("https://etfdb.com/data_set/?tm=40274&cond={%22by_stock%22:25}&no_null_sort=&count_by_id=true&limit=25&sort=weighting&order=desc&limit=25&offset=", 25 * (i-1), sep = ""));
  x <- x[2][[1]];
  pg_df <- data.frame(matrix(unlist(x), nrow=length(x), byrow=T),stringsAsFactors=FALSE);
  df <- rbind(df, pg_df);
}
for (i in 1:nrow(df)){
  df$X1[i] <- read_html(df$X1[i]) %>% html_text(trim = TRUE);
  df$X3[i] <- read_html(df$X3[i]) %>% html_text(trim = TRUE);
  df$X5[i] <- read_html(df$X5[i]) %>% html_text(trim = TRUE);
}
df <- data.frame(df$X1, df$X3, df$X5, df$X7, df$X9);
colnames(df) <- c("Ticker", "ETF", "ETFdb.com Category", "Expense Ratio", "Weighting");
write_xlsx(
  df,
  path = "stock.xlsx",
  col_names = TRUE,
  format_headers = TRUE,
  use_zip64 = FALSE
)

更新: 您可以在此处 table 的属性 data-url 处查看数据源: 我将更新使您更轻松的代码:

library(rjson)
library(rvest)
library(writexl)

stock_ticket <- "AAPL";
url <- paste("https://etfdb.com/stock/", stock_ticket, sep = "");
lastpage <- 9;
df <- data.frame();
data_url <- read_html(url) %>% html_node(xpath = "//table[@id='etfs']") %>% html_attr("data-url");
for (i in 1:lastpage){
  x <- fromJSON(file = paste("https://etfdb.com", data_url, "&offset=", 25 * (i-1), sep = ""));
  x <- x[2][[1]];
  pg_df <- data.frame(matrix(unlist(x), nrow=length(x), byrow=T),stringsAsFactors=FALSE);
  df <- rbind(df, pg_df);
}
for (i in 1:nrow(df)){
  df$X1[i] <- read_html(df$X1[i]) %>% html_text(trim = TRUE);
  df$X3[i] <- read_html(df$X3[i]) %>% html_text(trim = TRUE);
  df$X5[i] <- read_html(df$X5[i]) %>% html_text(trim = TRUE);
}
df <- data.frame(df$X1, df$X3, df$X5, df$X7, df$X9);
colnames(df) <- c("Ticker", "ETF", "ETFdb.com Category", "Expense Ratio", "Weighting");
write_xlsx(
  df,
  path = "stock.xlsx",
  col_names = TRUE,
  format_headers = TRUE,
  use_zip64 = FALSE
)