使用具有微不足道目标的 pytorch 发布训练 RNN 模型
Issue training RNN model with pytorch with trivial goal
我正在尝试训练一个简单的 RNN 模型,目标很简单,无论输入如何,输出都匹配一个固定向量
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
print "i2h WEIGHT size ", list(self.i2h.weight.size())
print "i2h bias size ", list(self.i2h.bias.size())
self.i2o = nn.Linear(hidden_size, output_size)
print "i2o WEIGHT size ", list(self.i2o.weight.size())
print "i2o bias size ", list(self.i2o.bias.size())
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(hidden)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
n_hidden = 20
rnn = RNN(10, n_hidden, 3)
learning_rate = 1e-3
loss_fn = torch.nn.MSELoss(size_average=False)
out_target = Variable( torch.FloatTensor([[0.0 , 1.0, 0.0]] ) , requires_grad=False)
print "target output::: ", out_target
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
#print "train iteration ", i, ": input data: ", line_tensor[i]
output, hidden = rnn(line_tensor[i], hidden)
loss = loss_fn(output, out_target)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
#print "parameter: ", p, " gradient: ", p.grad.data
p.data.add_(-learning_rate, p.grad.data)
return output, loss.data[0]
current_loss = 0
n_iters = 500
for iter in range(1, n_iters + 1):
inp = Variable(torch.randn(100,1,10) + 5)
output, loss = train(out_target, inp)
current_loss += loss
if iter % 1 == 0:
print "weights: ",rnn.i2h.weight
print "LOSS: ", loss
print output
如图所示,损失保持在 6 以上并且永远不会下降。另请注意,我将所有随机输入正态分布偏置 5,因此它们大多是正数,因此应该存在接近目标输出的权重分布
在这个例子中,我做错了什么,没有输出接近目标?
你的固定输出是:
torch.FloatTensor([[0.0, 1.0, 0.0]])
但是您在 RNN 中使用以下作为最后一层:
self.softmax = nn.LogSoftmax(dim=1)
LogSoftmax
returns 值在 [0, 1]
中吗?虽然,您可以使用 Softmax
,但我建议您使用 sign 函数并将 -1
转换为 0。
我正在尝试训练一个简单的 RNN 模型,目标很简单,无论输入如何,输出都匹配一个固定向量
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
print "i2h WEIGHT size ", list(self.i2h.weight.size())
print "i2h bias size ", list(self.i2h.bias.size())
self.i2o = nn.Linear(hidden_size, output_size)
print "i2o WEIGHT size ", list(self.i2o.weight.size())
print "i2o bias size ", list(self.i2o.bias.size())
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(hidden)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
n_hidden = 20
rnn = RNN(10, n_hidden, 3)
learning_rate = 1e-3
loss_fn = torch.nn.MSELoss(size_average=False)
out_target = Variable( torch.FloatTensor([[0.0 , 1.0, 0.0]] ) , requires_grad=False)
print "target output::: ", out_target
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
#print "train iteration ", i, ": input data: ", line_tensor[i]
output, hidden = rnn(line_tensor[i], hidden)
loss = loss_fn(output, out_target)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
#print "parameter: ", p, " gradient: ", p.grad.data
p.data.add_(-learning_rate, p.grad.data)
return output, loss.data[0]
current_loss = 0
n_iters = 500
for iter in range(1, n_iters + 1):
inp = Variable(torch.randn(100,1,10) + 5)
output, loss = train(out_target, inp)
current_loss += loss
if iter % 1 == 0:
print "weights: ",rnn.i2h.weight
print "LOSS: ", loss
print output
如图所示,损失保持在 6 以上并且永远不会下降。另请注意,我将所有随机输入正态分布偏置 5,因此它们大多是正数,因此应该存在接近目标输出的权重分布
在这个例子中,我做错了什么,没有输出接近目标?
你的固定输出是:
torch.FloatTensor([[0.0, 1.0, 0.0]])
但是您在 RNN 中使用以下作为最后一层:
self.softmax = nn.LogSoftmax(dim=1)
LogSoftmax
returns 值在 [0, 1]
中吗?虽然,您可以使用 Softmax
,但我建议您使用 sign 函数并将 -1
转换为 0。