深度学习 PyTorch:Expected 4 维输入为 4 维权重 [64, 3, 7, 7],但得到大小为 [32, 1728] 的二维输入
Deep learning with PyTorch:Expected 4-dimensional input for 4-dimensional weight [64, 3, 7, 7], but got 2-dimensional input of size [32, 1728] instead
我正在使用 PyTorch 的深度学习来做一些图像分类。每当我尝试训练我的模型时,forward 函数都会失败。有人可以解释一下输入大小错误的原因是什么以及如何解决这个问题吗?
这是我的模型的代码,以及我的训练损失和我的优化器:
model.fc = nn.Sequential(
nn.Conv2d(1, 6, 9, padding=0), # 64040
nn.ReLU(), #
nn.AvgPool2d(2, stride=2), # max 62020
nn.Conv2d(6, 16, 11, padding=0), # 161010
nn.ReLU(), # 161010
nn.AvgPool2d(2, stride=2), # 1655 = 400
nn.Flatten(),
nn.Linear(400, 200),
nn.ReLU(),
nn.Linear(200, 100),
nn.ReLU(),
nn.Linear(100, 3),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.fc.parameters(), lr=0.003)
model.to(device)
这是验证函数:
def validation(model, val_dataloader, criterion):
val_loss = 0
accuracy = 0
for images, labels in iter(val_dataloader):
images, labels = images.to('cuda'), labels.to('cuda')
output = model.forward(images)
val_loss += criterion(output, labels).item()
probabilities = torch.exp(output)
equality = (labels.data == probabilities.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return val_loss, accuracy
最后,这是我的训练函数:
def train_classifier():
epochs = 10
steps = 0
print_every = 40
model.to('cuda')
for e in range(epochs):
model.train()
running_loss = 0
for images, labels in iter(train_dataloader):
images = images.view(images.shape[0], -1) #this flattens it?
steps += 1
images, labels = images.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
# training
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
validation_loss, accuracy = validation(model, validate_loader, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(validation_loss/len(validate_loader)),
"Validation Accuracy: {:.3f}".format(accuracy/len(validate_loader)))
running_loss = 0
model.train()
train_classifier()
错误代码:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-32-60a435d940e1> in <module>()
49 model.train()
50
---> 51 train_classifier()
5 frames
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight, bias)
438 _pair(0), self.dilation, self.groups)
439 return F.conv2d(input, weight, bias, self.stride,
--> 440 self.padding, self.dilation, self.groups)
441
442 def forward(self, input: Tensor) -> Tensor:
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [64, 3, 7, 7], but got 2-dimensional input of size [32, 1728] instead
感谢任何帮助!谢谢!
你的网络既有 Conv2d
层也有完全连接的 Linear
层 - 这就是问题的根源:Conv2d
期望它的输入是 4D:batch-channel -高度宽度。另一方面,nn.Linear
作品的“扁平化”功能:批处理通道。因此,您需要“扁平化”您的数据,但不是在应用网络之前,而是 在 处理期间,您有一个 Flattening
层。
我正在使用 PyTorch 的深度学习来做一些图像分类。每当我尝试训练我的模型时,forward 函数都会失败。有人可以解释一下输入大小错误的原因是什么以及如何解决这个问题吗?
这是我的模型的代码,以及我的训练损失和我的优化器:
model.fc = nn.Sequential(
nn.Conv2d(1, 6, 9, padding=0), # 64040
nn.ReLU(), #
nn.AvgPool2d(2, stride=2), # max 62020
nn.Conv2d(6, 16, 11, padding=0), # 161010
nn.ReLU(), # 161010
nn.AvgPool2d(2, stride=2), # 1655 = 400
nn.Flatten(),
nn.Linear(400, 200),
nn.ReLU(),
nn.Linear(200, 100),
nn.ReLU(),
nn.Linear(100, 3),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.fc.parameters(), lr=0.003)
model.to(device)
这是验证函数:
def validation(model, val_dataloader, criterion):
val_loss = 0
accuracy = 0
for images, labels in iter(val_dataloader):
images, labels = images.to('cuda'), labels.to('cuda')
output = model.forward(images)
val_loss += criterion(output, labels).item()
probabilities = torch.exp(output)
equality = (labels.data == probabilities.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return val_loss, accuracy
最后,这是我的训练函数:
def train_classifier():
epochs = 10
steps = 0
print_every = 40
model.to('cuda')
for e in range(epochs):
model.train()
running_loss = 0
for images, labels in iter(train_dataloader):
images = images.view(images.shape[0], -1) #this flattens it?
steps += 1
images, labels = images.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
# training
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
validation_loss, accuracy = validation(model, validate_loader, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(validation_loss/len(validate_loader)),
"Validation Accuracy: {:.3f}".format(accuracy/len(validate_loader)))
running_loss = 0
model.train()
train_classifier()
错误代码:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-32-60a435d940e1> in <module>()
49 model.train()
50
---> 51 train_classifier()
5 frames
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight, bias)
438 _pair(0), self.dilation, self.groups)
439 return F.conv2d(input, weight, bias, self.stride,
--> 440 self.padding, self.dilation, self.groups)
441
442 def forward(self, input: Tensor) -> Tensor:
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [64, 3, 7, 7], but got 2-dimensional input of size [32, 1728] instead
感谢任何帮助!谢谢!
你的网络既有 Conv2d
层也有完全连接的 Linear
层 - 这就是问题的根源:Conv2d
期望它的输入是 4D:batch-channel -高度宽度。另一方面,nn.Linear
作品的“扁平化”功能:批处理通道。因此,您需要“扁平化”您的数据,但不是在应用网络之前,而是 在 处理期间,您有一个 Flattening
层。