RuntimeError: expected scalar type Float but found Long neural network
RuntimeError: expected scalar type Float but found Long neural network
我知道有一些问题与此问题类似,但当我按照它们进行操作时,似乎让我陷入了困境。好像我刚刚解决的问题导致了另一个问题。
这是我保留的 2 个兔子洞解决方案,因为它们似乎解决了他们的问题。我怀疑他们会有什么帮助,但在这里他们只是为了以防万一。
一个:
batch_X = batch_X.to(device=device, dtype=torch.int64)
batch_y = batch_y.to(device=device, dtype=torch.int64)
两个:
x = x.view(x.size(0), -1)
这是我遇到的错误。
Traceback (most recent call last):
File "c:/Users/14055/Desktop/Research/new 1.0.py", line 93, in <module>
training()
File "c:/Users/14055/Desktop/Research/new 1.0.py", line 63, in training
output = model(batch_X) # passed input data here!!!!!!!!!!!!!!!!!!!!!!!!!!
File "C:\Users055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "c:/Users/14055/Desktop/Research/new 1.0.py", line 31, in forward
x = F.relu(self.fc1(x))
File "C:\Users055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\linear.py", line 96, in forward
return F.linear(input, self.weight, self.bias)
File "C:\Users055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\functional.py", line 1847, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: expected scalar type Float but found Long
我的代码如下
import torch.cuda
import torch
import numpy as np
import sys
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
#-------------------------------------------------------------------------
torch.cuda.set_device(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
np.set_printoptions(threshold=sys.maxsize)
#-------------------------------------------------------------------------
input_data = torch.Tensor(np.load("inputData.npy", allow_pickle=True))
predict_data = torch.Tensor(np.load("predict.npy", allow_pickle=True))
input_data = input_data.type(torch.FloatTensor)
predict_data = predict_data.type(torch.FloatTensor)
print(type(input_data))
class NeuralNet(nn.Module):
def __init__(self, gpu = True):
super(NeuralNet, self ).__init__()
self.fc1 = nn.Linear(248, 750).to(device)
self.fc2 = nn.Linear(750, 10).to(device)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x).to(device)
return x.to(device)
def training():
model.to(device)
training.criterion = nn.CrossEntropyLoss()
optimizer= torch.optim.SGD(model.parameters(), lr=0.003, weight_decay= 0.00005, momentum = .9, nesterov = True)
n_epochs = 20000
a = np.float64([9,9,9,9,9]) #antioverfit
testing_loss = 0.0
BATCH_SIZE = 10
EPOCHS = 3
for epoch in range(EPOCHS):
if(testing_loss <= a[4]): # part of anti overfit
train_loss = 0.0
testing_loss = 0.0
model.train()
for i in (range(0, len(input_data), BATCH_SIZE)):
batch_X = input_data[i:i+BATCH_SIZE]
batch_y = predict_data[i:i+BATCH_SIZE]
optimizer.zero_grad()
batch_X = batch_X.to(device=device, dtype=torch.int64) #gpu # input data here!!!!!!!!!!!!!!!!!!!!!!!!!!
batch_y = batch_y.to(device=device, dtype=torch.int64) #gpu # larget data here!!!!!!!!!!!!!!!!!!!!!!!!!!
output = model(batch_X) # passed input data here!!!!!!!!!!!!!!!!!!!!!!!!!!
loss = training.criterion(output, batch_y)
loss.backward()
optimizer.step()
train_loss += loss.item()*batch_X.size(0)
train_loss = train_loss/len(predict_data.train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch+1, train_loss))
model.eval() # Gets Validation loss
train_loss = 0.0
with torch.no_grad():
for i in (range(0, len(input_data), BATCH_SIZE)):
batch_X = input_data[i:i+BATCH_SIZE]
batch_y = predict_data[i:i+BATCH_SIZE]
batch_X = batch_X.to(device=device, dtype=torch.int64)
batch_y = batch_y.to(device=device, dtype=torch.int64)
output = model(batch_X)
loss = training.criterion(output, batch_y).to(device=device, dtype=torch.int64)
testing_loss += loss.item()*batch_X.size(0)
testing_loss = testing_loss / len(predict_data.test_loader.dataset)
print('Validation loss = ' , testing_loss)
a = np.insert(a,0,testing_loss) # part of anti overfit
a = np.delete(a,5)
print('Validation loss = ' , testing_loss)
model = NeuralNet().to(device=device)
#summary(model, input_size=(1, 248, 248))
training()
为什么要将 X 和 Y 投射到 int64
?主要是这个问题。
batch_X = batch_X.to(device=device, dtype=torch.int64) #gpu # input data here!!!!!!!!!!!!!!!!!!!!!!!!!!
batch_y = batch_y.to(device=device, dtype=torch.int64) #gpu
您将 batch_X
和 batch_Y
转换为 int64
a.k.a long
,但 float
是预期的,因此出现错误。将其替换为
batch_X = batch_X.to(device=device)
batch_y = batch_y.to(device=device, dtype=torch.int64)
或
batch_X = batch_X.to(device=device, dtype=torch.float)
batch_y = batch_y.to(device=device, dtype=torch.int64)
这应该可以解决您的问题。
编辑:您只需要将 y 保持为 int。由于您使用的是 CrossEntropyLoss,它需要目标标签(预计为 int 或 long)。总的来说,你需要保持x的数据类型为float,y应该是long或int。
我知道有一些问题与此问题类似,但当我按照它们进行操作时,似乎让我陷入了困境。好像我刚刚解决的问题导致了另一个问题。
这是我保留的 2 个兔子洞解决方案,因为它们似乎解决了他们的问题。我怀疑他们会有什么帮助,但在这里他们只是为了以防万一。
一个:
batch_X = batch_X.to(device=device, dtype=torch.int64)
batch_y = batch_y.to(device=device, dtype=torch.int64)
两个:
x = x.view(x.size(0), -1)
这是我遇到的错误。
Traceback (most recent call last):
File "c:/Users/14055/Desktop/Research/new 1.0.py", line 93, in <module>
training()
File "c:/Users/14055/Desktop/Research/new 1.0.py", line 63, in training
output = model(batch_X) # passed input data here!!!!!!!!!!!!!!!!!!!!!!!!!!
File "C:\Users055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "c:/Users/14055/Desktop/Research/new 1.0.py", line 31, in forward
x = F.relu(self.fc1(x))
File "C:\Users055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\linear.py", line 96, in forward
return F.linear(input, self.weight, self.bias)
File "C:\Users055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\functional.py", line 1847, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: expected scalar type Float but found Long
我的代码如下
import torch.cuda
import torch
import numpy as np
import sys
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
#-------------------------------------------------------------------------
torch.cuda.set_device(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
np.set_printoptions(threshold=sys.maxsize)
#-------------------------------------------------------------------------
input_data = torch.Tensor(np.load("inputData.npy", allow_pickle=True))
predict_data = torch.Tensor(np.load("predict.npy", allow_pickle=True))
input_data = input_data.type(torch.FloatTensor)
predict_data = predict_data.type(torch.FloatTensor)
print(type(input_data))
class NeuralNet(nn.Module):
def __init__(self, gpu = True):
super(NeuralNet, self ).__init__()
self.fc1 = nn.Linear(248, 750).to(device)
self.fc2 = nn.Linear(750, 10).to(device)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x).to(device)
return x.to(device)
def training():
model.to(device)
training.criterion = nn.CrossEntropyLoss()
optimizer= torch.optim.SGD(model.parameters(), lr=0.003, weight_decay= 0.00005, momentum = .9, nesterov = True)
n_epochs = 20000
a = np.float64([9,9,9,9,9]) #antioverfit
testing_loss = 0.0
BATCH_SIZE = 10
EPOCHS = 3
for epoch in range(EPOCHS):
if(testing_loss <= a[4]): # part of anti overfit
train_loss = 0.0
testing_loss = 0.0
model.train()
for i in (range(0, len(input_data), BATCH_SIZE)):
batch_X = input_data[i:i+BATCH_SIZE]
batch_y = predict_data[i:i+BATCH_SIZE]
optimizer.zero_grad()
batch_X = batch_X.to(device=device, dtype=torch.int64) #gpu # input data here!!!!!!!!!!!!!!!!!!!!!!!!!!
batch_y = batch_y.to(device=device, dtype=torch.int64) #gpu # larget data here!!!!!!!!!!!!!!!!!!!!!!!!!!
output = model(batch_X) # passed input data here!!!!!!!!!!!!!!!!!!!!!!!!!!
loss = training.criterion(output, batch_y)
loss.backward()
optimizer.step()
train_loss += loss.item()*batch_X.size(0)
train_loss = train_loss/len(predict_data.train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch+1, train_loss))
model.eval() # Gets Validation loss
train_loss = 0.0
with torch.no_grad():
for i in (range(0, len(input_data), BATCH_SIZE)):
batch_X = input_data[i:i+BATCH_SIZE]
batch_y = predict_data[i:i+BATCH_SIZE]
batch_X = batch_X.to(device=device, dtype=torch.int64)
batch_y = batch_y.to(device=device, dtype=torch.int64)
output = model(batch_X)
loss = training.criterion(output, batch_y).to(device=device, dtype=torch.int64)
testing_loss += loss.item()*batch_X.size(0)
testing_loss = testing_loss / len(predict_data.test_loader.dataset)
print('Validation loss = ' , testing_loss)
a = np.insert(a,0,testing_loss) # part of anti overfit
a = np.delete(a,5)
print('Validation loss = ' , testing_loss)
model = NeuralNet().to(device=device)
#summary(model, input_size=(1, 248, 248))
training()
为什么要将 X 和 Y 投射到 int64
?主要是这个问题。
batch_X = batch_X.to(device=device, dtype=torch.int64) #gpu # input data here!!!!!!!!!!!!!!!!!!!!!!!!!!
batch_y = batch_y.to(device=device, dtype=torch.int64) #gpu
您将 batch_X
和 batch_Y
转换为 int64
a.k.a long
,但 float
是预期的,因此出现错误。将其替换为
batch_X = batch_X.to(device=device)
batch_y = batch_y.to(device=device, dtype=torch.int64)
或
batch_X = batch_X.to(device=device, dtype=torch.float)
batch_y = batch_y.to(device=device, dtype=torch.int64)
这应该可以解决您的问题。
编辑:您只需要将 y 保持为 int。由于您使用的是 CrossEntropyLoss,它需要目标标签(预计为 int 或 long)。总的来说,你需要保持x的数据类型为float,y应该是long或int。