使用@tf.function 时,在 Tensorflow 2.0 中训练从相同 class 定义的多个模型失败

Training multiple models defined from the same class in Tensorflow 2.0 fails when using @tf.function

我正在使用 Tensorflow 2.1 创建自定义模型和自定义训练循环。我的目标是比较神经网络不同配置的准确性。具体来说,在这种情况下,我正在比较具有不同潜在维度的自动编码器的重建误差。因此,我正在为一个潜在维度训练我的网络,然后计算测试误差,然后我为另一个潜在维度重做这个过程,依此类推。通过这个过程,我想创建这样的图:

绘图示例:

为了加快训练速度,我想在训练循环的反向传播部分使用 @tf.function 装饰器。但是,当我尝试训练多个不同的网络时,循环遍历潜在维度时出现错误。见下文:

ValueError: in converted code:

    <ipython-input-19-78bafad21717>:41 grad  *
        loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:778 __call__
        outputs = call_fn(cast_inputs, *args, **kwargs)
    <ipython-input-19-78bafad21717>:33 call  *
        x_enc = self.encoder(inp)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:778 __call__
        outputs = call_fn(cast_inputs, *args, **kwargs)
    <ipython-input-19-78bafad21717>:9 call  *
        x = self.dense1(inp)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:748 __call__
        self._maybe_build(inputs)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:2116 _maybe_build
        self.build(input_shapes)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/layers/core.py:1113 build
        trainable=True)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:446 add_weight
        caching_device=caching_device)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/training/tracking/base.py:744 _add_variable_with_custom_getter
        **kwargs_for_getter)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer_utils.py:142 make_variable
        shape=variable_shape if variable_shape else None)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:258 __call__
        return cls._variable_v1_call(*args, **kwargs)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:219 _variable_v1_call
        shape=shape)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:65 getter
        return captured_getter(captured_previous, **kwargs)
    /tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/def_function.py:502 invalid_creator_scope
        "tf.function-decorated function tried to create "

    ValueError: tf.function-decorated function tried to create variables on non-first call.

当我删除@tf.function 装饰器时,我没有得到这个错误。我相信当我使用装饰器时它是否与 Tensorflow 创建计算图有关,并且当我创建我的网络的另一个实例时该图仍然存在。因此,由于旧图与网络的新实例不匹配而引发错误。但我对此完全不确定,因为我相信我在这里遗漏了一些关于 Tensorflow 的基本知识!

下面是我的代码重现错误的一个非常简单的版本。我试图删除代码中所有不必要的部分,以使其更易于阅读和调试。此外,为了这个问题,我正在生成一个非常简单的训练和测试集。 我已经尝试过 tf.keras.backend.clear_session() 函数但没有任何运气。

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# Encoder
class build_encoder(tf.keras.Model):
  def __init__(self,latent_dim):
      super(build_encoder, self).__init__()

      self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
      self.dense2 = tf.keras.layers.Dense(latent_dim, activation='relu',use_bias=True)

  def call(self, inp):
      x = self.dense1(inp)
      x = self.dense2(x)
      return x

# Decoder
class build_decoder(tf.keras.Model):
  def __init__(self,):
      super(build_decoder, self).__init__()

      self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
      self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)

  def call(self, inp):
      x = self.dense1(inp)
      x = self.dense2(x)
      return x

# Full Autoencoder
class Autoencoder(tf.keras.Model):
  def __init__(self,latent_dim=5):
      super(Autoencoder, self).__init__()

      self.encoder = build_encoder(latent_dim)
      self.decoder = build_decoder()

  def call(self, inp):
      x_enc = self.encoder(inp)
      x_dec = self.decoder(x_enc)
      return x_dec

#### Here is the backpropagation with @tf.function decorator ####
@tf.function
def grad(model, inputs):
    with tf.GradientTape() as tape:
        loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
    return loss_value, tape.gradient(loss_value, model.trainable_variables)

# Training loop function
def train(x_train, model, num_epochs, batch_size,optimizer):

    train_loss = []

    for epoch in range(num_epochs):
        tf.random.shuffle(x_train)
        for i in range(0, len(x_train), batch_size):
            x_inp = x_train[i: i + batch_size]
            loss_value, grads = grad(model, x_inp)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

        train_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_train, model(x_train))).numpy())

        if epoch % 100 == 0:
            print("Epoch: {}, Train loss: {:.9f}".format(epoch, train_loss[epoch]))

    return train_loss

#### Generating simple training and test data
num_train = 10000
num_test = 1000

x_train = s = np.random.uniform(0,1,(num_train,10)).astype(np.float32)
x_train[:,6:10] = 0

x_test = s = np.random.uniform(0,1,(num_test,10)).astype(np.float32)
x_test[:,6:10] = 0
###

batch_size = 8
num_epochs = 10000

test_loss = []

# Looping over the latent dimensions
for latent_dim in range(1,10):

  model = Autoencoder(latent_dim=3) # Creating an instance of my Autoencoder
  optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
  train_loss = train(x_train, model=model, num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network

  test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model(x_test))).numpy())

plt.figure()
plt.plot(test_loss,linewidth=1.5)
plt.grid(True)
plt.show()

您提供的代码片段有误。

我将最后一个密集层单元从 6 更改为 10

# Decoder
class build_decoder(tf.keras.Model):
  def __init__(self,):
      super(build_decoder, self).__init__()

      self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
      self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)

  def call(self, inp):
      x = self.dense1(inp)
      x = self.dense2(x)
      return x

关于你关于训练多模型的问题。

报错信息"ValueError: tf.function-decorated function tried to create variables on non-first call"表示@tf.function修饰的函数正在创建一个新变量在其下一次迭代中,这是不允许的,因为此函数已转换为图表。

我修改了你的反向传播方法,我把你原来的代码注释掉了,观察区别。

#### Here is the backpropagation with @tf.function decorator ####
# @tf.function
# def grad(model, inputs):
#     with tf.GradientTape() as tape:
#         loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
#     return loss_value, tape.gradient(loss_value, model.trainable_variables)

@tf.function
def MSE(y_true, y_pred):
  return tf.keras.losses.MSE(y_true, y_pred)

def backprop(inputs, model):
  with tf.GradientTape() as tape:
    loss_value = MSE(inputs, model(inputs))
  return loss_value, tape.gradient(loss_value, model.trainable_variables)

def gradient_func(model, inputs):
  return backprop(inputs, model)

原始代码的罪魁祸首是调用 model(inputs) 作为损失函数的输入,当您在函数中装饰 @tf.function它继承了里面所有的函数,这意味着损失函数被优化了。

还有一种无需重写单个变量即可训练多个模型的方法,就是将它们放入数组中。

model_array = [0]
# Looping over the latent dimensions
for latent_dim in range(1,10):
  model_array.append(Autoencoder(latent_dim))
 # Creating an instance of my Autoencoder
  optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
  train_loss = train(x_train, model=model_array[latent_dim], num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network
  test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model_array[latent_dim](x_test))).numpy())

这会将模型排列成数组,更容易访问和调试。

这里是完整的修改代码。

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# Encoder
class build_encoder(tf.keras.Model):
  def __init__(self,latent_dim):
      super(build_encoder, self).__init__()

      self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
      self.dense2 = tf.keras.layers.Dense(latent_dim, activation='relu',use_bias=True)

  def call(self, inp):
      x = self.dense1(inp)
      x = self.dense2(x)
      return x

# Decoder
class build_decoder(tf.keras.Model):
  def __init__(self,):
      super(build_decoder, self).__init__()

      self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
      self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)

  def call(self, inp):
      x = self.dense1(inp)
      x = self.dense2(x)
      return x

# Full Autoencoder
class Autoencoder(tf.keras.Model):
  def __init__(self,latent_dim=5):
      super(Autoencoder, self).__init__()

      self.encoder = build_encoder(latent_dim)
      self.decoder = build_decoder()

  def call(self, inp):
      x_enc = self.encoder(inp)
      x_dec = self.decoder(x_enc)
      return x_dec

#### Here is the backpropagation with @tf.function decorator ####
# @tf.function
# def grad(model, inputs):
#     with tf.GradientTape() as tape:
#         loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
#     return loss_value, tape.gradient(loss_value, model.trainable_variables)

@tf.function
def MSE(y_true, y_pred):
  return tf.keras.losses.MSE(y_true, y_pred)

def backprop(inputs, model):
  with tf.GradientTape() as tape:
    loss_value = MSE(inputs, model(inputs))
  return loss_value, tape.gradient(loss_value, model.trainable_variables)

def gradient_func(model, inputs):
  return backprop(inputs, model)

# Training loop function
def train(x_train, model, num_epochs, batch_size,optimizer):

    train_loss = []

    for epoch in range(num_epochs):
        tf.random.shuffle(x_train)
        for i in range(0, len(x_train), batch_size):
            x_inp = x_train[i: i + batch_size]
            loss_value, grads = gradient_func(model, x_inp)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))
        train_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_train, model(x_train))).numpy())

        if epoch % 100 == 0:
            print("Epoch: {}, Train loss: {:.9f}".format(epoch, train_loss[epoch]))

    return train_loss

#### Generating simple training and test data
num_train = 10000
num_test = 1000

x_train = s = np.random.uniform(0,1,(num_train,10)).astype(np.float32)
x_train[:,6:10] = 0

x_test = s = np.random.uniform(0,1,(num_test,10)).astype(np.float32)
x_test[:,6:10] = 0
###

batch_size = 8
num_epochs = 10000

test_loss = []

model_array = [0]
# Looping over the latent dimensions
for latent_dim in range(1,10):
  model_array.append(Autoencoder(latent_dim))
 # Creating an instance of my Autoencoder
  optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
  train_loss = train(x_train, model=model_array[latent_dim], num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network
  test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model_array[latent_dim](x_test))).numpy())

plt.figure()
plt.plot(range(1,10),test_loss,linewidth=1.5)
plt.grid(True)
plt.show()

在此 link 中的 TF 文档中也有关于 @tf.functionAutoGraphs 的简短讨论.

欢迎提问,希望对您有所帮助。