如何将以下 tf 1.x 代码转换为 tf 2.0(对现有代码进行最少的更改)
How to Convert the following tf 1.x code to tf 2.0 (With minimal changes to existing code)
我正在将代码从 tensorflow 1.x
迁移到 tensorflow-2.0
。我用过tensorflow-2.0
里提供的转换脚本,还不错。但是脚本无法转换 tf.contrib
模块的代码。我想让下面的代码 tensorflow-2.0
兼容。
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1 - (1.0 - dropout_prob))
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
我遇到的错误:
1) 在弃用模块 tf.contrib.layer_norm
中使用成员 tf.contrib.layers.layer_norm
我在网上搜索找到了这个github issue
但是目前还不清楚如何迁移。
提前致谢。
对于层规范化,迁移到 Keras 层对我有用,并为我提供了类似的微调模型性能。
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, dropout_prob) # tf 2.10
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
input_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, name=name, epsilon=1e-12, dtype=tf.float32)
return input_layer_norm(input_tensor)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
# output_tensor = tf.keras.layers.Dropout(rate=dropout_prob)(output_tensor)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
需要注意的是 tf.nn.dropout
采用 dropout 概率 而不是 keep 概率 TF1.x 版本,否则,对于默认的 10% BERT 丢弃率,您将丢弃 90% 的层输出。您可以在此处参考官方变压器编码器中的更多详细信息。
https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/transformer_encoder.py
我正在将代码从 tensorflow 1.x
迁移到 tensorflow-2.0
。我用过tensorflow-2.0
里提供的转换脚本,还不错。但是脚本无法转换 tf.contrib
模块的代码。我想让下面的代码 tensorflow-2.0
兼容。
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1 - (1.0 - dropout_prob))
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
我遇到的错误:
1) 在弃用模块 tf.contrib.layer_norm
中使用成员 tf.contrib.layers.layer_norm我在网上搜索找到了这个github issue
但是目前还不清楚如何迁移。
提前致谢。
对于层规范化,迁移到 Keras 层对我有用,并为我提供了类似的微调模型性能。
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, dropout_prob) # tf 2.10
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
input_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, name=name, epsilon=1e-12, dtype=tf.float32)
return input_layer_norm(input_tensor)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
# output_tensor = tf.keras.layers.Dropout(rate=dropout_prob)(output_tensor)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
需要注意的是 tf.nn.dropout
采用 dropout 概率 而不是 keep 概率 TF1.x 版本,否则,对于默认的 10% BERT 丢弃率,您将丢弃 90% 的层输出。您可以在此处参考官方变压器编码器中的更多详细信息。
https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/transformer_encoder.py