股票代码列表的属性错误
Attribute Error with List of Stock Tickers
我对为什么会收到属性错误感到有点困惑。仅当我放置等于 stock_list 的列表时才会发生此错误。如果我打印列表然后复制并粘贴我没有得到错误/
我尝试从代码顶部输入技术行情,但尝试时出现属性错误,当我先打印列表然后复制并粘贴时不会发生这种情况,这应该是同一回事?
file = 'techtickerlist.csv'
with open(file) as f:
reader = csv.reader(f)
technologyTickers = []
for row in reader:
technologyTickers.append(row[0])
def scrape(stock_list, interested, technicals):
SuggestedStocks = []
for each_stock in stock_list:
try:
technicals = scrape_yahoo(each_stock)
condition_1 = float(technicals.get('Return on Equity',0).replace('%','').replace('N/A','-100').replace(',','')) > 25
condition_2 = float(technicals.get('Trailing P/E',0).replace('N/A','0').replace(',','')) < 25
condition_3 = float(technicals.get('Price/Book',0).replace('N/A','100')) <8
condition_4 = float(technicals.get('Beta (3Y Monthly)',0).replace('N/A','100')) <1.1
if condition_1 and condition_2 and condition_3 and condition_4:
print(each_stock)
SuggestedStocks.append(each_stock)
for ind in interested:
print(ind + ": "+ technicals[ind])
print("------")
time.sleep(1)
except ValueError:
print('Value Error')
return
# Use delay to avoid getting flagged as bot
#return technicals
print(SuggestedStocks)
def main():
stock_list = technologyTickers
interested = ['Return on Equity', 'Revenue', 'Quarterly Revenue Growth','Trailing P/E', 'Beta (3Y Monthly)','Price/Book']
technicals = {}
tech = scrape(stock_list, interested, technicals)
print(tech)
AttributeError: 'int' object has no attribute 'replace'
检查您的实施情况
technicals.get('Return on Equity',0)
方法get
(对于类型dict
)将return默认值0
如果键不存在。通过您的实现,所有默认值都有一个类型 int
。因为他们被设置为数字,而不是字符串(用引号引起来)。
如果零是正确的默认值,您可以忽略类型更改的错误并保留您的实现。
technicals.get('Return on Equity', '0')
我想这会做你想要的。
import csv
import requests
from bs4 import BeautifulSoup
url_base = "https://finviz.com/quote.ashx?t="
tckr = ['SBUX','MSFT','AAPL']
url_list = [url_base + s for s in tckr]
with open('C:\Users\Excel\Downloads\SO.csv', 'a', newline='') as f:
writer = csv.writer(f)
for url in url_list:
try:
fpage = requests.get(url)
fsoup = BeautifulSoup(fpage.content, 'html.parser')
# write header row
writer.writerow(map(lambda e : e.text, fsoup.find_all('td', {'class':'snapshot-td2-cp'})))
# write body row
writer.writerow(map(lambda e : e.text, fsoup.find_all('td', {'class':'snapshot-td2'})))
except HTTPError:
print("{} - not found".format(url))
我对为什么会收到属性错误感到有点困惑。仅当我放置等于 stock_list 的列表时才会发生此错误。如果我打印列表然后复制并粘贴我没有得到错误/
我尝试从代码顶部输入技术行情,但尝试时出现属性错误,当我先打印列表然后复制并粘贴时不会发生这种情况,这应该是同一回事?
file = 'techtickerlist.csv'
with open(file) as f:
reader = csv.reader(f)
technologyTickers = []
for row in reader:
technologyTickers.append(row[0])
def scrape(stock_list, interested, technicals):
SuggestedStocks = []
for each_stock in stock_list:
try:
technicals = scrape_yahoo(each_stock)
condition_1 = float(technicals.get('Return on Equity',0).replace('%','').replace('N/A','-100').replace(',','')) > 25
condition_2 = float(technicals.get('Trailing P/E',0).replace('N/A','0').replace(',','')) < 25
condition_3 = float(technicals.get('Price/Book',0).replace('N/A','100')) <8
condition_4 = float(technicals.get('Beta (3Y Monthly)',0).replace('N/A','100')) <1.1
if condition_1 and condition_2 and condition_3 and condition_4:
print(each_stock)
SuggestedStocks.append(each_stock)
for ind in interested:
print(ind + ": "+ technicals[ind])
print("------")
time.sleep(1)
except ValueError:
print('Value Error')
return
# Use delay to avoid getting flagged as bot
#return technicals
print(SuggestedStocks)
def main():
stock_list = technologyTickers
interested = ['Return on Equity', 'Revenue', 'Quarterly Revenue Growth','Trailing P/E', 'Beta (3Y Monthly)','Price/Book']
technicals = {}
tech = scrape(stock_list, interested, technicals)
print(tech)
AttributeError: 'int' object has no attribute 'replace'
检查您的实施情况
technicals.get('Return on Equity',0)
方法get
(对于类型dict
)将return默认值0
如果键不存在。通过您的实现,所有默认值都有一个类型 int
。因为他们被设置为数字,而不是字符串(用引号引起来)。
如果零是正确的默认值,您可以忽略类型更改的错误并保留您的实现。
technicals.get('Return on Equity', '0')
我想这会做你想要的。
import csv
import requests
from bs4 import BeautifulSoup
url_base = "https://finviz.com/quote.ashx?t="
tckr = ['SBUX','MSFT','AAPL']
url_list = [url_base + s for s in tckr]
with open('C:\Users\Excel\Downloads\SO.csv', 'a', newline='') as f:
writer = csv.writer(f)
for url in url_list:
try:
fpage = requests.get(url)
fsoup = BeautifulSoup(fpage.content, 'html.parser')
# write header row
writer.writerow(map(lambda e : e.text, fsoup.find_all('td', {'class':'snapshot-td2-cp'})))
# write body row
writer.writerow(map(lambda e : e.text, fsoup.find_all('td', {'class':'snapshot-td2'})))
except HTTPError:
print("{} - not found".format(url))