optimizers minimize error: 'float' object has no attribute 'dtype'
optimizers minimize error: 'float' object has no attribute 'dtype'
我是tensorflow的初学者。 tensorflow 2.0的梯度计算存在一些问题。有人可以帮助我吗?
这是我的代码。错误提示为:
if not t.dtype.is_floating:
AttributeError: 'float' object has no attribute 'dtype'
我试过:
w = tf.Variable([1.0,1.0],dtype = tf.float32)
消息变为:
TypeError: 'tensorflow.python.framework.ops.EagerTensor' object is not callable
import tensorflow as tf
import numpy as np
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10
# w = tf.Variable([1.0,1.0],dtype = tf.float32)
w = [1.0,1.0]https://www.cybertec-postgresql.com/en/?p=9102&preview=true
opt=tf.keras.optimizers.SGD(0.1)
mse=tf.keras.losses.MeanSquaredError()
for i in range(20):
print("epoch:",i,"w:", w)
with tf.GradientTape() as tape:
logit = w[0] * train_X + w[1]
loss= mse(train_Y,logit)
w = opt.minimize(loss, var_list=w)
我不知道如何修复it.Thank你有任何意见。
您没有正确使用GradientTape
。我已经演示了您应该如何应用它的代码。
我创建了一个模拟您的 w
变量的单单元致密层模型。
import tensorflow as tf
import numpy as np
train_X = np.linspace(-1, 1, 100)
train_X = np.expand_dims(train_X, axis=-1)
print(train_X.shape) # (100, 1)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10
print(train_Y.shape) # (100, 1)
# First create a model with one unit of dense and one bias
input = tf.keras.layers.Input(shape=(1,))
w = tf.keras.layers.Dense(1)(input) # use_bias is True by default
model = tf.keras.Model(inputs=input, outputs=w)
opt=tf.keras.optimizers.SGD(0.1)
mse=tf.keras.losses.MeanSquaredError()
for i in range(20):
print('Epoch: ', i)
with tf.GradientTape() as grad_tape:
logits = model(train_X, training=True)
model_loss = mse(train_Y, logits)
print('Loss =', model_loss.numpy())
gradients = grad_tape.gradient(model_loss, model.trainable_variables)
opt.apply_gradients(zip(gradients, model.trainable_variables))
我是tensorflow的初学者。 tensorflow 2.0的梯度计算存在一些问题。有人可以帮助我吗?
这是我的代码。错误提示为:
if not t.dtype.is_floating:
AttributeError: 'float' object has no attribute 'dtype'
我试过:
w = tf.Variable([1.0,1.0],dtype = tf.float32)
消息变为:
TypeError: 'tensorflow.python.framework.ops.EagerTensor' object is not callable
import tensorflow as tf
import numpy as np
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10
# w = tf.Variable([1.0,1.0],dtype = tf.float32)
w = [1.0,1.0]https://www.cybertec-postgresql.com/en/?p=9102&preview=true
opt=tf.keras.optimizers.SGD(0.1)
mse=tf.keras.losses.MeanSquaredError()
for i in range(20):
print("epoch:",i,"w:", w)
with tf.GradientTape() as tape:
logit = w[0] * train_X + w[1]
loss= mse(train_Y,logit)
w = opt.minimize(loss, var_list=w)
我不知道如何修复it.Thank你有任何意见。
您没有正确使用GradientTape
。我已经演示了您应该如何应用它的代码。
我创建了一个模拟您的 w
变量的单单元致密层模型。
import tensorflow as tf
import numpy as np
train_X = np.linspace(-1, 1, 100)
train_X = np.expand_dims(train_X, axis=-1)
print(train_X.shape) # (100, 1)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10
print(train_Y.shape) # (100, 1)
# First create a model with one unit of dense and one bias
input = tf.keras.layers.Input(shape=(1,))
w = tf.keras.layers.Dense(1)(input) # use_bias is True by default
model = tf.keras.Model(inputs=input, outputs=w)
opt=tf.keras.optimizers.SGD(0.1)
mse=tf.keras.losses.MeanSquaredError()
for i in range(20):
print('Epoch: ', i)
with tf.GradientTape() as grad_tape:
logits = model(train_X, training=True)
model_loss = mse(train_Y, logits)
print('Loss =', model_loss.numpy())
gradients = grad_tape.gradient(model_loss, model.trainable_variables)
opt.apply_gradients(zip(gradients, model.trainable_variables))