如何从 variable_scope 初始化变量?

How to initialize variables from variable_scope?

如标题所述,如何在 Tensorflow 中初始化来自 variable_scope 的变量?我不知道这是必要的,因为我认为它是一个常数。但是,当我尝试预测 运行 和 Android 上的 session 时的输出时,出现错误:

Error during inference: Failed precondition: Attempting to use uninitialized value weights0

[[Node: weights0/read = Identity[T=DT_FLOAT, _class=["loc:@weights0"], _device="/job:localhost/replica:0/task:0/cpu:0"](weights0)]]

我尝试使用 tf.Variable(即 'h1': tf.Variable(vs.get_variable("weights0", [n_input, n_hidden_1], initializer=tf.contrib.layers.xavier_initializer())))设置变量,但在尝试生成 protobuf 文件时出现错误“在检查点文件中找不到张量名称 'Variable'”。

片段

def reg_perceptron(t, weights, biases):
    t = tf.nn.relu(tf.add(tf.matmul(t, weights['h1']), biases['b1']), name = "layer_1")
    t = tf.nn.sigmoid(tf.add(tf.matmul(t, weights['h2']), biases['b2']), name = "layer_2")
    t = tf.add(tf.matmul(t, weights['hOut'], name="LOut_MatMul"), biases['bOut'], name="LOut_Add")

    return tf.reshape(t, [-1], name="Y_GroundTruth")

g = tf.Graph()
with g.as_default():
   ...
   rg_weights = {
    'h1': vs.get_variable("weights0", [n_input, n_hidden_1], initializer=tf.contrib.layers.xavier_initializer()),
    'h2': vs.get_variable("weights1", [n_hidden_1, n_hidden_2], initializer=tf.contrib.layers.xavier_initializer()),
    'hOut': vs.get_variable("weightsOut", [n_hidden_2, 1], initializer=tf.contrib.layers.xavier_initializer())
    }


    rg_biases = {
    'b1': vs.get_variable("bias0", [n_hidden_1], initializer=init_ops.constant_initializer(bias_start)),
    'b2': vs.get_variable("bias1", [n_hidden_2], initializer=init_ops.constant_initializer(bias_start)),
    'bOut': vs.get_variable("biasOut", [1], initializer=init_ops.constant_initializer(bias_start))
    }

    pred = reg_perceptron(_x, rg_weights, rg_biases)
    ...
...

g_2 = tf.Graph()
with g_2.as_default():
    ...
    rg_weights_2 = {
    'h1': vs.get_variable("weights0", [n_input, n_hidden_1], initializer=tf.contrib.layers.xavier_initializer()),
    'h2': vs.get_variable("weights1", [n_hidden_1, n_hidden_2], initializer=tf.contrib.layers.xavier_initializer()),
    'hOut': vs.get_variable("weightsOut", [n_hidden_2, 1], initializer=tf.contrib.layers.xavier_initializer())
    }

    rg_biases_2 = {
    'b1': vs.get_variable("bias0", [n_hidden_1], initializer=init_ops.constant_initializer(bias_start)),
    'b2': vs.get_variable("bias1", [n_hidden_2], initializer=init_ops.constant_initializer(bias_start)),
    'bOut': vs.get_variable("biasOut", [1], initializer=init_ops.constant_initializer(bias_start))
    }

    pred_2 = reg_perceptron(_x_2, rg_weights_2, rg_biases_2)
    ...

编辑

我是否以错误的方式创建了 protobuf 文件? 我用于生成 .PB 的代码可以找到 here returns

(这里蓝线代表目标值,绿线代表预测值。)

而我应该得到

相反(来自 http://pastebin.com/RUFa9NkN)尽管两个代码使用相同的输入和模型。

如果您想使用 variable_scope,请尝试以下操作:

def initialize_variable(vs_name, ...): # other necessary arguments
    # a function for initialize variables    
    with tf.variable_scope(vs_name, reuse=None) as vs:
        h1 = tf.get_variable("weights0", [n_input, n_hidden_1], initializer=tf.contrib.layers.xavier_initializer())
        h2 = tf.get_variable("weights1", [n_hidden_1, n_hidden_2], initializer=tf.contrib.layers.xavier_initializer())
        hout = tf.get_variable("weightsOut", [n_hidden_2, 1], initializer=tf.contrib.layers.xavier_initializer())
        b1 = tf.get_variable("bias0", [n_hidden_1], initializer=init_ops.constant_initializer(bias_start))
        b2 = tf.get_variable("bias1", [n_hidden_2], initializer=init_ops.constant_initializer(bias_start))
        bout = tf.get_variable("biasOut", [1], initializer=init_ops.constant_initializer(bias_start))
        vs.reuse_variables()

然后在图中,首先使用上面的函数初始化变量,然后提取变量。

g = tf.Graph()
with g.as_default():
  initialize_variable(vs_name, ...) #fill in other necessary arguments
  with tf.variable_scope(vs_name, reuse=True):
      rg_weights = {'h1' : tf.get_variable("weights0"),
                    'h2' : tf.get_variable("weights1"),
                    'hout' : tf.get_variable("weightsOut")}
      rg_biases = {'b1' : tf.get_variable("bias0"),
                   'b2' : tf.get_variable("bias1"),
                   'bOut': tf.get_variable("biasOut")}
  pred = reg_perceptron(_x, rg_weights, rg_biases)

如果您不想涉及 variable_scope,请尝试以下操作...虽然以下需要输入初始张量并且不接受初始化器。

g = tf.Graph()
with g.as_default():
   ...
   rg_weights = {'h1': tf.Variable(tf.truncated_normal([n_input, n_hidden1], mean, stddev), name='weights0')
                 'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],mean, stddev), name="weights1"),
                 'hOut': tf.Variable(tf.truncated_normal([n_hidden_2, 1],mean, stddev), name="weightsOut")}
   ...

这里有一些documentation and example关于 TensorFlow 中的变量共享