如何将已抓取数据列表转换为 excel 列?
How can I convert list of crawled data to excel column?
import openpyxl
xl_file = openpyxl.Workbook()
xl_sheet =xl_file.active
from urllib.request import urlopen
from bs4 import BeautifulSoup
stockItem = '028300'
url = 'http://finance.naver.com/item/sise_day.nhn?code='+ stockItem
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
maxPage=source.find_all("table",align="center")
mp = maxPage[0].find_all("td",class_="pgRR")
mpNum = int(mp[0].a.get('href')[-3:])
for page in range(1, 10):
print (str(page) )
url = 'http://finance.naver.com/item/sise_day.nhn?code=' + stockItem +'&page='+ str(page)
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
srlists=source.find_all("tr")
isCheckNone = None
if((page % 1) == 0):
time.sleep(0)
for i in range(1,len(srlists)-1):
if(srlists[i].span != isCheckNone):
srlists[i].td.text
data1 = srlists[i].find_all("td",align="center")
data2 = srlists[i].find_all("td",class_="num")
print(srlists[i].find_all("td",align="center")[0].text, srlists[i].find_all("td",class_="num")[0].text )
for item in data1:
xl_sheet.append([item.get_text()])
这就是我从站点抓取股票数据所做的工作。
我已经成功地爬取了股票数据。
但是,我无法将数据保存到 excel 文件中。
我已经试过了,但它只显示日期数据而没有价格数据。
如何将结果转换为 excel 文件?
您错过了 2 件事,
1)导入包错误
2) 没有在包含价格
的 excel 中附加 data2
这是最终代码,可以提供您想要的输出。只需将您的文件夹位置用于保存 excel 文件。
import time
from openpyxl import Workbook #
xl_file = Workbook()
xl_sheet =xl_file.active
from urllib.request import urlopen
from bs4 import BeautifulSoup
i = 0
stockItem = '028300'
url = 'http://finance.naver.com/item/sise_day.nhn?code='+ stockItem
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
maxPage=source.find_all("table",align="center")
mp = maxPage[0].find_all("td",class_="pgRR")
mpNum = int(mp[0].a.get('href')[-3:])
for page in range(1, 10):
print (str(page) )
url = 'http://finance.naver.com/item/sise_day.nhn?code=' + stockItem +'&page='+ str(page)
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
srlists=source.find_all("tr")
isCheckNone = None
if((page % 1) == 0):
time.sleep(0)
for i in range(1,len(srlists)-1):
if(srlists[i].span != isCheckNone):
srlists[i].td.text
data1 = srlists[i].find_all("td",align="center")
data2 = srlists[i].find_all("td",class_="num")
#print(srlists[i].find_all("td",align="center")[0].text, srlists[i].find_all("td",class_="num")[0].text )
for item1,item2 in zip(data1,data2):
xl_sheet.append([item.get_text(),item2.get_text()])
print(xl_sheet)
xl_file.save(r'C:\Users\Asus\Desktop\vi.xlsx')
建议:您可以使用 Yahoofinance 软件包 python 轻松下载股票数据。
你可以关注这个 link >> https://pypi.org/project/yfinance/
import openpyxl
xl_file = openpyxl.Workbook()
xl_sheet =xl_file.active
from urllib.request import urlopen
from bs4 import BeautifulSoup
stockItem = '028300'
url = 'http://finance.naver.com/item/sise_day.nhn?code='+ stockItem
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
maxPage=source.find_all("table",align="center")
mp = maxPage[0].find_all("td",class_="pgRR")
mpNum = int(mp[0].a.get('href')[-3:])
for page in range(1, 10):
print (str(page) )
url = 'http://finance.naver.com/item/sise_day.nhn?code=' + stockItem +'&page='+ str(page)
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
srlists=source.find_all("tr")
isCheckNone = None
if((page % 1) == 0):
time.sleep(0)
for i in range(1,len(srlists)-1):
if(srlists[i].span != isCheckNone):
srlists[i].td.text
data1 = srlists[i].find_all("td",align="center")
data2 = srlists[i].find_all("td",class_="num")
print(srlists[i].find_all("td",align="center")[0].text, srlists[i].find_all("td",class_="num")[0].text )
for item in data1:
xl_sheet.append([item.get_text()])
这就是我从站点抓取股票数据所做的工作。 我已经成功地爬取了股票数据。 但是,我无法将数据保存到 excel 文件中。 我已经试过了,但它只显示日期数据而没有价格数据。 如何将结果转换为 excel 文件?
您错过了 2 件事, 1)导入包错误 2) 没有在包含价格
的 excel 中附加 data2这是最终代码,可以提供您想要的输出。只需将您的文件夹位置用于保存 excel 文件。
import time
from openpyxl import Workbook #
xl_file = Workbook()
xl_sheet =xl_file.active
from urllib.request import urlopen
from bs4 import BeautifulSoup
i = 0
stockItem = '028300'
url = 'http://finance.naver.com/item/sise_day.nhn?code='+ stockItem
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
maxPage=source.find_all("table",align="center")
mp = maxPage[0].find_all("td",class_="pgRR")
mpNum = int(mp[0].a.get('href')[-3:])
for page in range(1, 10):
print (str(page) )
url = 'http://finance.naver.com/item/sise_day.nhn?code=' + stockItem +'&page='+ str(page)
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
srlists=source.find_all("tr")
isCheckNone = None
if((page % 1) == 0):
time.sleep(0)
for i in range(1,len(srlists)-1):
if(srlists[i].span != isCheckNone):
srlists[i].td.text
data1 = srlists[i].find_all("td",align="center")
data2 = srlists[i].find_all("td",class_="num")
#print(srlists[i].find_all("td",align="center")[0].text, srlists[i].find_all("td",class_="num")[0].text )
for item1,item2 in zip(data1,data2):
xl_sheet.append([item.get_text(),item2.get_text()])
print(xl_sheet)
xl_file.save(r'C:\Users\Asus\Desktop\vi.xlsx')
建议:您可以使用 Yahoofinance 软件包 python 轻松下载股票数据。 你可以关注这个 link >> https://pypi.org/project/yfinance/