Multicell LSTM RNN returns nan 训练误差
Multicell LSTM RNN returns nan training error
我正在尝试训练这个 Multi Cell RNN 网络(对于训练,您可以忽略 m_t+1 -> m_t 部分)
使用 4 个 LSTM 单元层。编码器和解码器只是全连接层。
G_t 和 m_t 分别是大小为 6、69 的浮点组。 P_t, m_t+1 也是那个大小。这个 RNN 的时间步长是 48。
但出于某种原因,我的训练根本行不通。我很想知道我的代码有什么问题。
成本函数看起来像这样
n_steps = 48
n_neurons = 512
n_layers = 4
NUM_OF_INPUTS = 6 + 69
NUM_OF_OUTPUTS = 6 + 69
EPOCHS = 50
sample_size = 12494
batch_size = 128
total_batch = int(sample_size / batch_size)
global_step = tf.Variable(0, trainable=False)
prop_valid = 0.1
time_stamp = 48
def mini_batch(data, bs, i):
return data[i*bs : i*bs+bs,:,:]
#Both X_data_np and Y_data_np are three dimensional, which is the required dimension for the inputs of tf.nn.dynamic_rnn
X_data_np = np.load('X_data.npy')
Y_data_np = np.load('Y_data.npy')
data = np.concatenate([X_data_np, Y_data_np], axis=-1)
np.random.shuffle(data)
#standardize data
mean = np.mean(data)
data = data - mean
std = np.std(data)
data = data / std
train_size = int(sample_size * (1 - prop_valid))
valid_size = int((sample_size - train_size))
train_input = data[:train_size, :, :NUM_OF_INPUTS]
train_label = data[:train_size, :, NUM_OF_INPUTS:]
valid_input = data[train_size:train_size + valid_size, :,:NUM_OF_INPUTS]
valid_label = data[train_size:train_size + valid_size, :,NUM_OF_INPUTS:]
X = tf.placeholder(tf.float32, [None, n_steps, NUM_OF_INPUTS])
Y = tf.placeholder(tf.float32, [None, n_steps, NUM_OF_OUTPUTS])
encoded_inputs = tf.layers.dense(X, 256)
layers = [tf.contrib.rnn.LSTMCell(num_units = n_neurons, activation=tf.nn.tanh) for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
outputs, _ = tf.nn.dynamic_rnn(multi_layer_cell, encoded_inputs, dtype=tf.float32)
prediction = tf.layers.dense(outputs, NUM_OF_OUTPUTS)
Y = tf.placeholder(tf.float32, [None, n_steps, NUM_OF_OUTPUTS]) #(?, 48, 75)
distance = tf.norm(prediction[:,:,6:75] - Y[:,:,6:75], axis = 2) # (?, 48)
distance_square = tf.square(distance)
#Add all the sum
reduced_distance = tf.math.reduce_sum(distance_square, axis= 1) # (?, )
#Mean of all mini batch data
train_loss = tf.math.reduce_mean(reduced_distance, axis= 0) # ()
learning_rate = 0.001
trainOptimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_loss, global_step=global_step)
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
for epoch in range(EPOCHS):
for batch_idx in range(total_batch):
train_batch_input = mini_batch(train_input, batch_size, batch_idx)
train_batch_label = mini_batch(train_label, batch_size, batch_idx)
_, loss= sess.run([trainOptimizer, train_loss], feed_dict={X:train_batch_input,Y:train_batch_label})
if (epoch+1) % 10 == 0:
prediction2 = sess.run(prediction, feed_dict={X:valid_input})
valid_error = np.mean(np.sum(np.square(np.linalg.norm(prediction2[:,:,6:75] - valid_label[:,:,6:75], axis = 2)), axis = 1), axis = 0)
print("Epoch: %05d tL: %.4f vE: %.4f" % (epoch+1, loss, valid_error))
结果如下
Epoch: 00010 tL: nan vE: 4.3044
Epoch: 00020 tL: nan vE: 4.3114
Epoch: 00030 tL: nan vE: 4.2962
Epoch: 00040 tL: nan vE: 4.3009
Epoch: 00050 tL: nan vE: 4.2899
无论训练数据多小,训练损失总是 nan,所以我认为根本问题出在我训练它的代码中。验证错误不是 nan,所以我想数据本身不包含 nan。
我的代码中是否存在我没有解决的关键问题?任何帮助,将不胜感激!提前致谢。
验证错误显示正常值而训练错误没有显示正常值的原因是因为我正在制作包含 nan 值的小批量。
显然
sample_size = 12494
batch_size = 128
total_batch = int(sample_size / batch_size)
train_size = int(sample_size * (1 - prop_valid))
和
for batch_idx in range(total_batch):
train_batch_input = mini_batch(train_input, batch_size, batch_idx)
train_batch_label = mini_batch(train_label, batch_size, batch_idx)
没有意义。 total_batch
应该是 int(train_size / batch_size)
很难找到这个原因,因为当数组切片越界时,numpy 不会 return 任何错误。
总之,希望对以后遇到类似问题的人有所帮助!
我正在尝试训练这个 Multi Cell RNN 网络(对于训练,您可以忽略 m_t+1 -> m_t 部分)
使用 4 个 LSTM 单元层。编码器和解码器只是全连接层。 G_t 和 m_t 分别是大小为 6、69 的浮点组。 P_t, m_t+1 也是那个大小。这个 RNN 的时间步长是 48。 但出于某种原因,我的训练根本行不通。我很想知道我的代码有什么问题。
成本函数看起来像这样
n_steps = 48
n_neurons = 512
n_layers = 4
NUM_OF_INPUTS = 6 + 69
NUM_OF_OUTPUTS = 6 + 69
EPOCHS = 50
sample_size = 12494
batch_size = 128
total_batch = int(sample_size / batch_size)
global_step = tf.Variable(0, trainable=False)
prop_valid = 0.1
time_stamp = 48
def mini_batch(data, bs, i):
return data[i*bs : i*bs+bs,:,:]
#Both X_data_np and Y_data_np are three dimensional, which is the required dimension for the inputs of tf.nn.dynamic_rnn
X_data_np = np.load('X_data.npy')
Y_data_np = np.load('Y_data.npy')
data = np.concatenate([X_data_np, Y_data_np], axis=-1)
np.random.shuffle(data)
#standardize data
mean = np.mean(data)
data = data - mean
std = np.std(data)
data = data / std
train_size = int(sample_size * (1 - prop_valid))
valid_size = int((sample_size - train_size))
train_input = data[:train_size, :, :NUM_OF_INPUTS]
train_label = data[:train_size, :, NUM_OF_INPUTS:]
valid_input = data[train_size:train_size + valid_size, :,:NUM_OF_INPUTS]
valid_label = data[train_size:train_size + valid_size, :,NUM_OF_INPUTS:]
X = tf.placeholder(tf.float32, [None, n_steps, NUM_OF_INPUTS])
Y = tf.placeholder(tf.float32, [None, n_steps, NUM_OF_OUTPUTS])
encoded_inputs = tf.layers.dense(X, 256)
layers = [tf.contrib.rnn.LSTMCell(num_units = n_neurons, activation=tf.nn.tanh) for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
outputs, _ = tf.nn.dynamic_rnn(multi_layer_cell, encoded_inputs, dtype=tf.float32)
prediction = tf.layers.dense(outputs, NUM_OF_OUTPUTS)
Y = tf.placeholder(tf.float32, [None, n_steps, NUM_OF_OUTPUTS]) #(?, 48, 75)
distance = tf.norm(prediction[:,:,6:75] - Y[:,:,6:75], axis = 2) # (?, 48)
distance_square = tf.square(distance)
#Add all the sum
reduced_distance = tf.math.reduce_sum(distance_square, axis= 1) # (?, )
#Mean of all mini batch data
train_loss = tf.math.reduce_mean(reduced_distance, axis= 0) # ()
learning_rate = 0.001
trainOptimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_loss, global_step=global_step)
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
for epoch in range(EPOCHS):
for batch_idx in range(total_batch):
train_batch_input = mini_batch(train_input, batch_size, batch_idx)
train_batch_label = mini_batch(train_label, batch_size, batch_idx)
_, loss= sess.run([trainOptimizer, train_loss], feed_dict={X:train_batch_input,Y:train_batch_label})
if (epoch+1) % 10 == 0:
prediction2 = sess.run(prediction, feed_dict={X:valid_input})
valid_error = np.mean(np.sum(np.square(np.linalg.norm(prediction2[:,:,6:75] - valid_label[:,:,6:75], axis = 2)), axis = 1), axis = 0)
print("Epoch: %05d tL: %.4f vE: %.4f" % (epoch+1, loss, valid_error))
结果如下
Epoch: 00010 tL: nan vE: 4.3044
Epoch: 00020 tL: nan vE: 4.3114
Epoch: 00030 tL: nan vE: 4.2962
Epoch: 00040 tL: nan vE: 4.3009
Epoch: 00050 tL: nan vE: 4.2899
无论训练数据多小,训练损失总是 nan,所以我认为根本问题出在我训练它的代码中。验证错误不是 nan,所以我想数据本身不包含 nan。 我的代码中是否存在我没有解决的关键问题?任何帮助,将不胜感激!提前致谢。
验证错误显示正常值而训练错误没有显示正常值的原因是因为我正在制作包含 nan 值的小批量。
显然
sample_size = 12494
batch_size = 128
total_batch = int(sample_size / batch_size)
train_size = int(sample_size * (1 - prop_valid))
和
for batch_idx in range(total_batch):
train_batch_input = mini_batch(train_input, batch_size, batch_idx)
train_batch_label = mini_batch(train_label, batch_size, batch_idx)
没有意义。 total_batch
应该是 int(train_size / batch_size)
很难找到这个原因,因为当数组切片越界时,numpy 不会 return 任何错误。
总之,希望对以后遇到类似问题的人有所帮助!