MNIST分类:准确率低(10%),损失无变化

MNIST Classification: low accuracy (10%) and no change in loss

我正在学习 TensorFlow 并且厌倦了在 mnist 数据库上应用。 我的问题是(见附图):

尽管:

完整代码:https://github.com/vibhorj/tf > mnist-2.py

1) 以下是图层的定义方式:

K,L,M,N=200,100,60,30
""" Layer 1 """
with tf.name_scope('L1'):
    w1 = tf.Variable(initial_value = tf.truncated_normal([28*28,K],mean=0,stddev=0.1), name = 'w1')
    b1 = tf.Variable(initial_value = tf.truncated_normal([K],mean=0,stddev=0.1), name = 'b1')
""" Layer 2 """
with tf.name_scope('L2'):
    w2 = tf.Variable(initial_value =tf.truncated_normal([K,L],mean=0,stddev=0.1), name = 'w2')
    b2 = tf.Variable(initial_value = tf.truncated_normal([L],mean=0,stddev=0.1), name = 'b2')
""" Layer 3 """
with tf.name_scope('L3'):
    w3 = tf.Variable(initial_value = tf.truncated_normal([L,M],mean=0,stddev=0.1), name = 'w3')
    b3 = tf.Variable(initial_value = tf.truncated_normal([M],mean=0,stddev=0.1), name = 'b3')
""" Layer 4 """
with tf.name_scope('L4'):
    w4 = tf.Variable(initial_value = tf.truncated_normal([M,N],mean=0,stddev=0.1), name = 'w4')
    b4 = tf.Variable(initial_value = tf.truncated_normal([N],mean=0,stddev=0.1), name = 'b4')
""" Layer output """
with tf.name_scope('L_out'):
    w_out = tf.Variable(initial_value = tf.truncated_normal([N,10],mean=0,stddev=0.1), name = 'w_out')
    b_out = tf.Variable(initial_value = tf.truncated_normal([10],mean=0,stddev=0.1), name = 'b_out')

2) 损失函数

Y1 = tf.nn.sigmoid(tf.add(tf.matmul(X,w1),b1), name='Y1')
Y2 = tf.nn.sigmoid(tf.add(tf.matmul(Y1,w2),b2), name='Y2')
Y3 = tf.nn.sigmoid(tf.add(tf.matmul(Y2,w3),b3), name='Y3')
Y4 = tf.nn.sigmoid(tf.add(tf.matmul(Y3,w4),b4), name='Y4')

Y_pred_logits = tf.add(tf.matmul(Y4, w_out),b_out,name='logits')
Y_pred_prob = tf.nn.softmax(Y_pred_logits, name='probs')

error = -tf.matmul(Y
              , tf.reshape(tf.log(Y_pred_prob),[10,-1]), name ='err')
loss = tf.reduce_mean(error, name = 'loss')

3)优化函数

opt = tf.train.GradientDescentOptimizer(0.1)
grads_and_vars = opt.compute_gradients(loss)
ctr = tf.Variable(0.0, name='ctr')
z = opt.apply_gradients(grads_and_vars, global_step=ctr)  

4)张量板代码:

evt_file = tf.summary.FileWriter('/Users/vibhorj/python/-tf/g_mnist')
evt_file.add_graph(tf.get_default_graph())

s1 = tf.summary.scalar(name='accuracy', tensor=accuracy)
s2 = tf.summary.scalar(name='loss', tensor=loss)
m1 = tf.summary.merge([s1,s2])

5) 运行 会话(测试数据为 mnist.test.images & mnist.test.labels

with tf.Session() as sess:
    sess.run(tf.variables_initializer(tf.global_variables()))
    for i in range(300):
       """ calc. accuracy on test data - TENSORBOARD before iteration beings """
       summary = sess.run(m1, feed_dict=test_data)
       evt_file.add_summary(summary, sess.run(ctr))
       evt_file.flush()

       """ fetch train data """        
       a_train, b_train = mnist.train.next_batch(batch_size=100)
       train_data = {X: a_train , Y: b_train}

       """ train """
       sess.run(z, feed_dict = train_data)

感谢您抽出宝贵时间提供有关它的任何见解。我完全不知道如何继续进行下去(甚至尝试使用 random_normal 初始化 w 和 b,使用学习率 [0.1,0.01, 0.001])

干杯!

请考虑

  1. 将偏差初始化为零
  2. 使用 ReLU 单元而不是 sigmoid - 避免饱和
  3. 使用 Adam 优化器 - 学习速度更快

感觉你的人脉挺大的。您可以使用较小的网络。

K,L,M,N=200,100,60,30
""" Layer 1 """
with tf.name_scope('L1'):
    w1 = tf.Variable(initial_value = tf.truncated_normal([28*28,K],mean=0,stddev=0.1), name = 'w1')
    b1 = tf.zeros([K])#tf.Variable(initial_value = tf.truncated_normal([K],mean=0,stddev=0.01), name = 'b1')
""" Layer 2 """
with tf.name_scope('L2'):
    w2 = tf.Variable(initial_value =tf.truncated_normal([K,L],mean=0,stddev=0.1), name = 'w2')
    b2 = tf.zeros([L])#tf.Variable(initial_value = tf.truncated_normal([L],mean=0,stddev=0.01), name = 'b2')
""" Layer 3 """
with tf.name_scope('L3'):
    w3 = tf.Variable(initial_value = tf.truncated_normal([L,M],mean=0,stddev=0.1), name = 'w3')
    b3 = tf.zeros([M]) #tf.Variable(initial_value = tf.truncated_normal([M],mean=0,stddev=0.01), name = 'b3')
""" Layer 4 """
with tf.name_scope('L4'):
    w4 = tf.Variable(initial_value = tf.truncated_normal([M,N],mean=0,stddev=0.1), name = 'w4')
    b4 = tf.zeros([N])#tf.Variable(initial_value = tf.truncated_normal([N],mean=0,stddev=0.1), name = 'b4')
""" Layer output """
with tf.name_scope('L_out'):
    w_out = tf.Variable(initial_value = tf.truncated_normal([N,10],mean=0,stddev=0.1), name = 'w_out')
    b_out = tf.zeros([10])#tf.Variable(initial_value = tf.truncated_normal([10],mean=0,stddev=0.1), name = 'b_out')


Y1 = tf.nn.relu(tf.add(tf.matmul(X,w1),b1), name='Y1')
Y2 = tf.nn.relu(tf.add(tf.matmul(Y1,w2),b2), name='Y2')
Y3 = tf.nn.relu(tf.add(tf.matmul(Y2,w3),b3), name='Y3')
Y4 = tf.nn.relu(tf.add(tf.matmul(Y3,w4),b4), name='Y4')

Y_pred_logits = tf.add(tf.matmul(Y4, w_out),b_out,name='logits')

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=Y_pred_logits, name='xentropy'))
opt = tf.train.GradientDescentOptimizer(0.01)
grads_and_vars = opt.compute_gradients(loss)
ctr = tf.Variable(0.0, name='ctr', trainable=False)
train_op = opt.minimize(loss, global_step=ctr)

for v in tf.trainable_variables():
  print v.op.name

with tf.Session() as sess:
    sess.run(tf.variables_initializer(tf.global_variables()))
    for i in range(3000):
       """ calc. accuracy on test data - TENSORBOARD before iteration beings """
       #summary = sess.run(m1, feed_dict=test_data)
       #evt_file.add_summary(summary, sess.run(ctr))
       #evt_file.flush()

       """ fetch train data """
       a_train, b_train = mnist.train.next_batch(batch_size=100)
       train_data = {X: a_train , Y: b_train}

       """ train """
       l = sess.run(loss, feed_dict = train_data)
       print l
       sess.run(train_op, feed_dict = train_data)