TensorFlow:"Attempting to use uninitialized value" 变量初始化
TensorFlow: "Attempting to use uninitialized value" in variable initialization
我正在尝试使用 TensorFlow 在 Python 中实施多元线性回归,但 运行 遇到了一些逻辑和实施问题。我的代码抛出以下错误:
Attempting to use uninitialized value Variable
Caused by op u'Variable/read'
理想情况下 weights
输出应该是 [2, 3]
def hypothesis_function(input_2d_matrix_trainingexamples,
output_matrix_of_trainingexamples,
initial_parameters_of_hypothesis_function,
learning_rate, num_steps):
# calculate num attributes and num examples
number_of_attributes = len(input_2d_matrix_trainingexamples[0])
number_of_trainingexamples = len(input_2d_matrix_trainingexamples)
#Graph inputs
x = []
for i in range(0, number_of_attributes, 1):
x.append(tf.placeholder("float"))
y_input = tf.placeholder("float")
# Create Model and Set Model weights
parameters = []
for i in range(0, number_of_attributes, 1):
parameters.append(
tf.Variable(initial_parameters_of_hypothesis_function[i]))
#Contruct linear model
y = tf.Variable(parameters[0], "float")
for i in range(1, number_of_attributes, 1):
y = tf.add(y, tf.multiply(x[i], parameters[i]))
# Minimize the mean squared errors
loss = tf.reduce_mean(tf.square(y - y_input))
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optimizer.minimize(loss)
#Initialize the variables
init = tf.initialize_all_variables()
# launch the graph
session = tf.Session()
session.run(init)
for step in range(1, num_steps + 1, 1):
for i in range(0, number_of_trainingexamples, 1):
feed = {}
for j in range(0, number_of_attributes, 1):
array = [input_2d_matrix_trainingexamples[i][j]]
feed[j] = array
array1 = [output_matrix_of_trainingexamples[i]]
feed[number_of_attributes] = array1
session.run(train, feed_dict=feed)
for i in range(0, number_of_attributes - 1, 1):
print (session.run(parameters[i]))
array = [[0.0, 1.0, 2.0], [0.0, 2.0, 3.0], [0.0, 4.0, 5.0]]
hypothesis_function(array, [8.0, 13.0, 23.0], [1.0, 1.0, 1.0], 0.01, 200)
代码示例并非 100% 清楚,但如果列表 initial_parameters_of_hypothesis_function
是 tf.Variable
个对象的列表,那么行 session.run(init)
将失败,因为 TensorFlow 不是(还)足够聪明,可以找出变量初始化中的依赖关系。要解决此问题,您应该将创建 parameters
的循环更改为使用 initial_parameters_of_hypothesis_function[i].initialized_value()
,这会添加必要的依赖项:
parameters = []
for i in range(0, number_of_attributes, 1):
parameters.append(tf.Variable(
initial_parameters_of_hypothesis_function[i].initialized_value()))
运行这个:
init = tf.global_variables_initializer()
sess.run(init)
或者(取决于您拥有的 TF 版本):
init = tf.initialize_all_variables()
sess.run(init)
我想给出我的解决方案,当我将行 [session = tf.Session()]
替换为 [sess = tf.InteractiveSession()]
时它起作用了。希望这对其他人有用。
还有一个错误发生,与调用初始化全局变量时的顺序有关。我的代码示例有类似的错误 FailedPreconditionError(回溯见上文):尝试使用未初始化的值 W
def linear(X, n_input, n_output, activation = None):
W = tf.Variable(tf.random_normal([n_input, n_output], stddev=0.1), name='W')
b = tf.Variable(tf.constant(0, dtype=tf.float32, shape=[n_output]), name='b')
if activation != None:
h = tf.nn.tanh(tf.add(tf.matmul(X, W),b), name='h')
else:
h = tf.add(tf.matmul(X, W),b, name='h')
return h
from tensorflow.python.framework import ops
ops.reset_default_graph()
g = tf.get_default_graph()
print([op.name for op in g.get_operations()])
with tf.Session() as sess:
# RUN INIT
sess.run(tf.global_variables_initializer())
# But W hasn't in the graph yet so not know to initialize
# EVAL then error
print(linear(np.array([[1.0,2.0,3.0]]).astype(np.float32), 3, 3).eval())
你应该改成关注
from tensorflow.python.framework import ops
ops.reset_default_graph()
g = tf.get_default_graph()
print([op.name for op in g.get_operations()])
with tf.Session() as
# NOT RUNNING BUT ASSIGN
l = linear(np.array([[1.0,2.0,3.0]]).astype(np.float32), 3, 3)
# RUN INIT
sess.run(tf.global_variables_initializer())
print([op.name for op in g.get_operations()])
# ONLY EVAL AFTER INIT
print(l.eval(session=sess))
通常有两种初始化变量的方法,1)使用sess.run(tf.global_variables_initializer())
,如前面的答案所述; 2)从检查点加载图形。
你可以这样做:
sess = tf.Session(config=config)
saver = tf.train.Saver(max_to_keep=3)
try:
saver.restore(sess, tf.train.latest_checkpoint(FLAGS.model_dir))
# start from the latest checkpoint, the sess will be initialized
# by the variables in the latest checkpoint
except ValueError:
# train from scratch
init = tf.global_variables_initializer()
sess.run(init)
而第三种方法是使用tf.train.Supervisor。 session 将是
Create a session on 'master', recovering or initializing the model as needed, or wait for a session to be ready.
sv = tf.train.Supervisor([parameters])
sess = sv.prepare_or_wait_for_session()
运行 两者:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
我正在尝试使用 TensorFlow 在 Python 中实施多元线性回归,但 运行 遇到了一些逻辑和实施问题。我的代码抛出以下错误:
Attempting to use uninitialized value Variable
Caused by op u'Variable/read'
理想情况下 weights
输出应该是 [2, 3]
def hypothesis_function(input_2d_matrix_trainingexamples,
output_matrix_of_trainingexamples,
initial_parameters_of_hypothesis_function,
learning_rate, num_steps):
# calculate num attributes and num examples
number_of_attributes = len(input_2d_matrix_trainingexamples[0])
number_of_trainingexamples = len(input_2d_matrix_trainingexamples)
#Graph inputs
x = []
for i in range(0, number_of_attributes, 1):
x.append(tf.placeholder("float"))
y_input = tf.placeholder("float")
# Create Model and Set Model weights
parameters = []
for i in range(0, number_of_attributes, 1):
parameters.append(
tf.Variable(initial_parameters_of_hypothesis_function[i]))
#Contruct linear model
y = tf.Variable(parameters[0], "float")
for i in range(1, number_of_attributes, 1):
y = tf.add(y, tf.multiply(x[i], parameters[i]))
# Minimize the mean squared errors
loss = tf.reduce_mean(tf.square(y - y_input))
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optimizer.minimize(loss)
#Initialize the variables
init = tf.initialize_all_variables()
# launch the graph
session = tf.Session()
session.run(init)
for step in range(1, num_steps + 1, 1):
for i in range(0, number_of_trainingexamples, 1):
feed = {}
for j in range(0, number_of_attributes, 1):
array = [input_2d_matrix_trainingexamples[i][j]]
feed[j] = array
array1 = [output_matrix_of_trainingexamples[i]]
feed[number_of_attributes] = array1
session.run(train, feed_dict=feed)
for i in range(0, number_of_attributes - 1, 1):
print (session.run(parameters[i]))
array = [[0.0, 1.0, 2.0], [0.0, 2.0, 3.0], [0.0, 4.0, 5.0]]
hypothesis_function(array, [8.0, 13.0, 23.0], [1.0, 1.0, 1.0], 0.01, 200)
代码示例并非 100% 清楚,但如果列表 initial_parameters_of_hypothesis_function
是 tf.Variable
个对象的列表,那么行 session.run(init)
将失败,因为 TensorFlow 不是(还)足够聪明,可以找出变量初始化中的依赖关系。要解决此问题,您应该将创建 parameters
的循环更改为使用 initial_parameters_of_hypothesis_function[i].initialized_value()
,这会添加必要的依赖项:
parameters = []
for i in range(0, number_of_attributes, 1):
parameters.append(tf.Variable(
initial_parameters_of_hypothesis_function[i].initialized_value()))
运行这个:
init = tf.global_variables_initializer()
sess.run(init)
或者(取决于您拥有的 TF 版本):
init = tf.initialize_all_variables()
sess.run(init)
我想给出我的解决方案,当我将行 [session = tf.Session()]
替换为 [sess = tf.InteractiveSession()]
时它起作用了。希望这对其他人有用。
还有一个错误发生,与调用初始化全局变量时的顺序有关。我的代码示例有类似的错误 FailedPreconditionError(回溯见上文):尝试使用未初始化的值 W
def linear(X, n_input, n_output, activation = None):
W = tf.Variable(tf.random_normal([n_input, n_output], stddev=0.1), name='W')
b = tf.Variable(tf.constant(0, dtype=tf.float32, shape=[n_output]), name='b')
if activation != None:
h = tf.nn.tanh(tf.add(tf.matmul(X, W),b), name='h')
else:
h = tf.add(tf.matmul(X, W),b, name='h')
return h
from tensorflow.python.framework import ops
ops.reset_default_graph()
g = tf.get_default_graph()
print([op.name for op in g.get_operations()])
with tf.Session() as sess:
# RUN INIT
sess.run(tf.global_variables_initializer())
# But W hasn't in the graph yet so not know to initialize
# EVAL then error
print(linear(np.array([[1.0,2.0,3.0]]).astype(np.float32), 3, 3).eval())
你应该改成关注
from tensorflow.python.framework import ops
ops.reset_default_graph()
g = tf.get_default_graph()
print([op.name for op in g.get_operations()])
with tf.Session() as
# NOT RUNNING BUT ASSIGN
l = linear(np.array([[1.0,2.0,3.0]]).astype(np.float32), 3, 3)
# RUN INIT
sess.run(tf.global_variables_initializer())
print([op.name for op in g.get_operations()])
# ONLY EVAL AFTER INIT
print(l.eval(session=sess))
通常有两种初始化变量的方法,1)使用sess.run(tf.global_variables_initializer())
,如前面的答案所述; 2)从检查点加载图形。
你可以这样做:
sess = tf.Session(config=config)
saver = tf.train.Saver(max_to_keep=3)
try:
saver.restore(sess, tf.train.latest_checkpoint(FLAGS.model_dir))
# start from the latest checkpoint, the sess will be initialized
# by the variables in the latest checkpoint
except ValueError:
# train from scratch
init = tf.global_variables_initializer()
sess.run(init)
而第三种方法是使用tf.train.Supervisor。 session 将是
Create a session on 'master', recovering or initializing the model as needed, or wait for a session to be ready.
sv = tf.train.Supervisor([parameters])
sess = sv.prepare_or_wait_for_session()
运行 两者:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())