从 table 抓取多个网站数据
Scraping multiple website data from a table
我正在练习抓取网站并将信息输入 table。我导入了 link,但我似乎收到了一条基于 url.
的错误消息
from bs4 import BeautifulSoup
import requests
import pandas as pd
eastern_basketball_players = {'mens_basketball':'https://www.espn.com/nba/seasonleaders/_/league/east'}
def scraping_processing(season_leaders, url):
player = []
teamcity = []
games_played = []
minutes_per_game = []
points = []
espn_score = []
for link in url:
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
raw_names = soup.find_all('td', class_ = 'Player')
teams = soup.find_all('td', class_ = 'Team')
games = soup.find_all('td', class_ = 'Games Played')
minutes = soup.find_all('td', class_ = 'Minutes Per Game')
pts = soup.find_all('td', class_ = 'Points Per Game')
espnscores = soup.find_all('td', class_ = 'EPSN Rating')
for raw_name in raw_names:
player.append(raw_name.get_text().strip())
for team in teams:
teamcity.append(team.get_text().strip())
for game in games:
games_played.append(raw_name.get_text().strip())
for minute in minutes:
minutes_per_game.append(minute.get_text().strip())
for pt in pts:
points.append(pt.get_text().strip())
for espnscore in espnscores:
espn_score.append(espnscore.get_text().strip())
filename = season_leaders + '.csv'
df = pd.DataFrame()
df['Names'] = player
df['Teams'] = teamcity
df['Games Played'] = games_played
df['Minutes Per Game'] = minutes_per_game
df['Points'] = points
df['ESPN Scores'] = espn_score
df.to_csv(filename, index = False)
east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
我收到的错误消息指出:
MissingSchema Traceback (most recent call last)
<ipython-input-49-ca254e49e854> in <module>
----> 1 east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
<ipython-input-48-2f1a85c4b240> in scraping_processing(season_leaders, url)
10 for link in url:
11
---> 12 page = requests.get(link)
13
14 soup = BeautifulSoup(page.content, 'html.parser')
~\anaconda3\Python\lib\site-packages\requests\api.py in get(url, params, **kwargs)
74
75 kwargs.setdefault('allow_redirects', True)
---> 76 return request('get', url, params=params, **kwargs)
77
78
~\anaconda3\Python\lib\site-packages\requests\api.py in request(method, url, **kwargs)
59 # cases, and look like a memory leak in others.
60 with sessions.Session() as session:
---> 61 return session.request(method=method, url=url, **kwargs)
62
63
~\anaconda3\Python\lib\site-packages\requests\sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
526 hooks=hooks,
527 )
--> 528 prep = self.prepare_request(req)
529
530 proxies = proxies or {}
~\anaconda3\Python\lib\site-packages\requests\sessions.py in prepare_request(self, request)
454
455 p = PreparedRequest()
--> 456 p.prepare(
457 method=request.method.upper(),
458 url=request.url,
~\anaconda3\Python\lib\site-packages\requests\models.py in prepare(self, method, url, headers, files, data, params, auth, cookies, hooks, json)
314
315 self.prepare_method(method)
--> 316 self.prepare_url(url, params)
317 self.prepare_headers(headers)
318 self.prepare_cookies(cookies)
~\anaconda3\Python\lib\site-packages\requests\models.py in prepare_url(self, url, params)
388 error = error.format(to_native_string(url, 'utf8'))
389
--> 390 raise MissingSchema(error)
391
392 if not host:
MissingSchema: Invalid URL 'h': No schema supplied. Perhaps you meant http://h?
我再次检查了 url 到 运行 代码,但我仍然收到此错误消息。有人可以帮我吗?
您将此 https://www.espn.com/nba/seasonleaders/_/league/east 作为 url
但是您正在使用 for 循环访问它
即 url 中的 link:
你会得到 link='h'
哪个无效..
不要在这里使用 for 循环
您需要摆脱循环,或者您的网址需要在列表中。
但更重要的是,您需要去复习 html 以及如何识别标签和属性。 class="Player"
或 class="Team"
等 html 中没有 <td>
标签
查看这段代码,然后查看 html 以了解它是如何迭代的。
from bs4 import BeautifulSoup
import requests
import pandas as pd
eastern_basketball_players = {'mens_basketball':['https://www.espn.com/nba/seasonleaders/_/league/east']}
def scraping_processing(season_leaders, url):
rows = []
for link in url:
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
colNames = soup.find('tr', class_='colhead')
colNames = [x.text for x in colNames.find_all('td')]
trs = soup.find_all('tr')
for row in trs:
if row['class'][0] not in ['colhead', 'stathead']:
rows.append([x.text for x in row.find_all('td')])
filename = season_leaders + '.csv'
df = pd.DataFrame(rows, columns=colNames)
df.to_csv(filename, index = False)
east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
我正在练习抓取网站并将信息输入 table。我导入了 link,但我似乎收到了一条基于 url.
的错误消息from bs4 import BeautifulSoup
import requests
import pandas as pd
eastern_basketball_players = {'mens_basketball':'https://www.espn.com/nba/seasonleaders/_/league/east'}
def scraping_processing(season_leaders, url):
player = []
teamcity = []
games_played = []
minutes_per_game = []
points = []
espn_score = []
for link in url:
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
raw_names = soup.find_all('td', class_ = 'Player')
teams = soup.find_all('td', class_ = 'Team')
games = soup.find_all('td', class_ = 'Games Played')
minutes = soup.find_all('td', class_ = 'Minutes Per Game')
pts = soup.find_all('td', class_ = 'Points Per Game')
espnscores = soup.find_all('td', class_ = 'EPSN Rating')
for raw_name in raw_names:
player.append(raw_name.get_text().strip())
for team in teams:
teamcity.append(team.get_text().strip())
for game in games:
games_played.append(raw_name.get_text().strip())
for minute in minutes:
minutes_per_game.append(minute.get_text().strip())
for pt in pts:
points.append(pt.get_text().strip())
for espnscore in espnscores:
espn_score.append(espnscore.get_text().strip())
filename = season_leaders + '.csv'
df = pd.DataFrame()
df['Names'] = player
df['Teams'] = teamcity
df['Games Played'] = games_played
df['Minutes Per Game'] = minutes_per_game
df['Points'] = points
df['ESPN Scores'] = espn_score
df.to_csv(filename, index = False)
east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
我收到的错误消息指出:
MissingSchema Traceback (most recent call last)
<ipython-input-49-ca254e49e854> in <module>
----> 1 east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
<ipython-input-48-2f1a85c4b240> in scraping_processing(season_leaders, url)
10 for link in url:
11
---> 12 page = requests.get(link)
13
14 soup = BeautifulSoup(page.content, 'html.parser')
~\anaconda3\Python\lib\site-packages\requests\api.py in get(url, params, **kwargs)
74
75 kwargs.setdefault('allow_redirects', True)
---> 76 return request('get', url, params=params, **kwargs)
77
78
~\anaconda3\Python\lib\site-packages\requests\api.py in request(method, url, **kwargs)
59 # cases, and look like a memory leak in others.
60 with sessions.Session() as session:
---> 61 return session.request(method=method, url=url, **kwargs)
62
63
~\anaconda3\Python\lib\site-packages\requests\sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
526 hooks=hooks,
527 )
--> 528 prep = self.prepare_request(req)
529
530 proxies = proxies or {}
~\anaconda3\Python\lib\site-packages\requests\sessions.py in prepare_request(self, request)
454
455 p = PreparedRequest()
--> 456 p.prepare(
457 method=request.method.upper(),
458 url=request.url,
~\anaconda3\Python\lib\site-packages\requests\models.py in prepare(self, method, url, headers, files, data, params, auth, cookies, hooks, json)
314
315 self.prepare_method(method)
--> 316 self.prepare_url(url, params)
317 self.prepare_headers(headers)
318 self.prepare_cookies(cookies)
~\anaconda3\Python\lib\site-packages\requests\models.py in prepare_url(self, url, params)
388 error = error.format(to_native_string(url, 'utf8'))
389
--> 390 raise MissingSchema(error)
391
392 if not host:
MissingSchema: Invalid URL 'h': No schema supplied. Perhaps you meant http://h?
我再次检查了 url 到 运行 代码,但我仍然收到此错误消息。有人可以帮我吗?
您将此 https://www.espn.com/nba/seasonleaders/_/league/east 作为 url 但是您正在使用 for 循环访问它 即 url 中的 link: 你会得到 link='h'
哪个无效.. 不要在这里使用 for 循环
您需要摆脱循环,或者您的网址需要在列表中。
但更重要的是,您需要去复习 html 以及如何识别标签和属性。 class="Player"
或 class="Team"
等 html 中没有 <td>
标签
查看这段代码,然后查看 html 以了解它是如何迭代的。
from bs4 import BeautifulSoup
import requests
import pandas as pd
eastern_basketball_players = {'mens_basketball':['https://www.espn.com/nba/seasonleaders/_/league/east']}
def scraping_processing(season_leaders, url):
rows = []
for link in url:
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
colNames = soup.find('tr', class_='colhead')
colNames = [x.text for x in colNames.find_all('td')]
trs = soup.find_all('tr')
for row in trs:
if row['class'][0] not in ['colhead', 'stathead']:
rows.append([x.text for x in row.find_all('td')])
filename = season_leaders + '.csv'
df = pd.DataFrame(rows, columns=colNames)
df.to_csv(filename, index = False)
east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])