GradientTape 计算损失函数中的显着性
GradientTape compute saliency in the loss function
我正在尝试构建一个 LSTM 网络来 class 化句子,并使用显着性 为 class 化提供解释。这个网络必须从真实的 class y_true
以及他不应该注意的词中学习 Z
(二进制掩码)。
This paper 启发了我们想出损失函数。这是我希望损失函数的样子:
Coût de classification
在代码中转换为 classification_loss
和 Coût d'explication (saillance)
转换为 saliency_loss
(这与输入的输出梯度相同)下面。我尝试在 Keras 中使用自定义模型实现它,以 Tensorflow 作为后端:
loss_tracker = metrics.Mean(name="loss")
classification_loss_tracker = metrics.Mean(name="classification_loss")
saliency_loss_tracker = metrics.Mean(name="saliency_loss")
accuracy_tracker = metrics.CategoricalAccuracy(name="accuracy")
class CustomSequentialModel(Sequential):
def _train_test_step(self, data, training):
# Unpack the data
X = data[0]["X"]
Z = data[0]["Z"] # binary mask (1 for important words)
y_true = data[1]
# gradient tape requires "float32" instead of "int32"
# X.shape = (None, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM)
X = tf.cast(X, tf.float32)
# Persitent=True because we call the `gradient` more than once
with GradientTape(persistent=True) as tape:
# The tape will record everything that happens to X
# for automatic differentiation later on (used to compute saliency)
tape.watch(X)
# Forward pass
y_pred = self(X, training=training)
# (1) Compute the classification_loss
classification_loss = K.mean(
categorical_crossentropy(y_true, y_pred)
)
# (2) Compute the saliency loss
# (2.1) Compute the gradient of output wrt the maximum probability
log_prediction_proba = K.log(K.max(y_pred))
# (2.2) Compute the gradient of the output wrt the input
# saliency.shape is (None, MAX_SEQUENCE_LENGTH, None)
# why isn't it (None, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM) ?!
saliency = tape.gradient(log_prediction_proba, X)
# (2.3) Sum along the embedding dimension
saliency = K.sum(saliency, axis=2)
# (2.4) Sum with the binary mask
saliency_loss = K.sum(K.square(saliency)*(1-Z))
# => ValueError: No gradients provided for any variable
loss = classification_loss + saliency_loss
trainable_vars = self.trainable_variables
# ValueError caused by the '+ saliency_loss'
gradients = tape.gradient(loss, trainable_vars)
del tape # garbage collection
if training:
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics
saliency_loss_tracker.update_state(saliency_loss)
classification_loss_tracker.update_state(classification_loss)
loss_tracker.update_state(loss)
accuracy_tracker.update_state(y_true, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def train_step(self, data):
return self._train_test_step(data, True)
def test_step(self, data):
return self._train_test_step(data, False)
@property
def metrics(self):
return [
loss_tracker,
classification_loss_tracker,
saliency_loss_tracker,
accuracy_tracker
]
我设法计算了 classification_loss
和 saliency_loss
,并且我得到了一个标量值。然而,这行得通:tape.gradient(classification_loss, trainable_vars)
但这行不通 tape.gradient(classification_loss + saliency_loss, trainable_vars)
并抛出 ValueError: No gradients provided for any variable
.
尝试用@tf.function
装饰train_step()
您正在磁带上下文之外进行计算(在第一个 gradient
调用之后),然后尝试在之后采用更多梯度。这是行不通的;所有区分的操作都需要在上下文管理器内部发生。我建议使用两个嵌套磁带按如下方式重组您的代码:
with GradientTape() as loss_tape:
with GradientTape() as saliency_tape:
# The tape will record everything that happens to X
# for automatic differentiation later on (used to compute saliency)
saliency_tape.watch(X)
# Forward pass
y_pred = self(X, training=training)
# (2) Compute the saliency loss
# (2.1) Compute the gradient of output wrt the maximum probability
log_prediction_proba = K.log(K.max(y_pred))
# (2.2) Compute the gradient of the output wrt the input
# saliency.shape is (None, MAX_SEQUENCE_LENGTH, None)
# why isn't it (None, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM) ?!
saliency = saliency_tape.gradient(log_prediction_proba, X)
# (2.3) Sum along the embedding dimension
saliency = K.sum(saliency, axis=2)
# (2.4) Sum with the binary mask
saliency_loss = K.sum(K.square(saliency)*(1-Z))
# (1) Compute the classification_loss
classification_loss = K.mean(
categorical_crossentropy(y_true, y_pred)
)
loss = classification_loss + saliency_loss
trainable_vars = self.trainable_variables
gradients = loss_tape.gradient(loss, trainable_vars)
现在我们有一个磁带负责计算显着性输入的梯度。我们在它周围有 另一个 磁带,它跟踪这些操作,然后可以计算梯度的梯度(即显着性梯度)。该磁带还计算分类损失的梯度。我在外部磁带上下文中移动了分类损失,因为内部磁带不需要它。还要注意两个损失的相加是如何在外带的上下文中发生的——一切都必须在那里发生,否则计算图是lost/incomplete并且梯度不能被计算。
我正在尝试构建一个 LSTM 网络来 class 化句子,并使用显着性 为 class 化提供解释。这个网络必须从真实的 class y_true
以及他不应该注意的词中学习 Z
(二进制掩码)。
This paper 启发了我们想出损失函数。这是我希望损失函数的样子:
Coût de classification
在代码中转换为 classification_loss
和 Coût d'explication (saillance)
转换为 saliency_loss
(这与输入的输出梯度相同)下面。我尝试在 Keras 中使用自定义模型实现它,以 Tensorflow 作为后端:
loss_tracker = metrics.Mean(name="loss")
classification_loss_tracker = metrics.Mean(name="classification_loss")
saliency_loss_tracker = metrics.Mean(name="saliency_loss")
accuracy_tracker = metrics.CategoricalAccuracy(name="accuracy")
class CustomSequentialModel(Sequential):
def _train_test_step(self, data, training):
# Unpack the data
X = data[0]["X"]
Z = data[0]["Z"] # binary mask (1 for important words)
y_true = data[1]
# gradient tape requires "float32" instead of "int32"
# X.shape = (None, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM)
X = tf.cast(X, tf.float32)
# Persitent=True because we call the `gradient` more than once
with GradientTape(persistent=True) as tape:
# The tape will record everything that happens to X
# for automatic differentiation later on (used to compute saliency)
tape.watch(X)
# Forward pass
y_pred = self(X, training=training)
# (1) Compute the classification_loss
classification_loss = K.mean(
categorical_crossentropy(y_true, y_pred)
)
# (2) Compute the saliency loss
# (2.1) Compute the gradient of output wrt the maximum probability
log_prediction_proba = K.log(K.max(y_pred))
# (2.2) Compute the gradient of the output wrt the input
# saliency.shape is (None, MAX_SEQUENCE_LENGTH, None)
# why isn't it (None, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM) ?!
saliency = tape.gradient(log_prediction_proba, X)
# (2.3) Sum along the embedding dimension
saliency = K.sum(saliency, axis=2)
# (2.4) Sum with the binary mask
saliency_loss = K.sum(K.square(saliency)*(1-Z))
# => ValueError: No gradients provided for any variable
loss = classification_loss + saliency_loss
trainable_vars = self.trainable_variables
# ValueError caused by the '+ saliency_loss'
gradients = tape.gradient(loss, trainable_vars)
del tape # garbage collection
if training:
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics
saliency_loss_tracker.update_state(saliency_loss)
classification_loss_tracker.update_state(classification_loss)
loss_tracker.update_state(loss)
accuracy_tracker.update_state(y_true, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def train_step(self, data):
return self._train_test_step(data, True)
def test_step(self, data):
return self._train_test_step(data, False)
@property
def metrics(self):
return [
loss_tracker,
classification_loss_tracker,
saliency_loss_tracker,
accuracy_tracker
]
我设法计算了 classification_loss
和 saliency_loss
,并且我得到了一个标量值。然而,这行得通:tape.gradient(classification_loss, trainable_vars)
但这行不通 tape.gradient(classification_loss + saliency_loss, trainable_vars)
并抛出 ValueError: No gradients provided for any variable
.
尝试用@tf.function
train_step()
您正在磁带上下文之外进行计算(在第一个 gradient
调用之后),然后尝试在之后采用更多梯度。这是行不通的;所有区分的操作都需要在上下文管理器内部发生。我建议使用两个嵌套磁带按如下方式重组您的代码:
with GradientTape() as loss_tape:
with GradientTape() as saliency_tape:
# The tape will record everything that happens to X
# for automatic differentiation later on (used to compute saliency)
saliency_tape.watch(X)
# Forward pass
y_pred = self(X, training=training)
# (2) Compute the saliency loss
# (2.1) Compute the gradient of output wrt the maximum probability
log_prediction_proba = K.log(K.max(y_pred))
# (2.2) Compute the gradient of the output wrt the input
# saliency.shape is (None, MAX_SEQUENCE_LENGTH, None)
# why isn't it (None, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM) ?!
saliency = saliency_tape.gradient(log_prediction_proba, X)
# (2.3) Sum along the embedding dimension
saliency = K.sum(saliency, axis=2)
# (2.4) Sum with the binary mask
saliency_loss = K.sum(K.square(saliency)*(1-Z))
# (1) Compute the classification_loss
classification_loss = K.mean(
categorical_crossentropy(y_true, y_pred)
)
loss = classification_loss + saliency_loss
trainable_vars = self.trainable_variables
gradients = loss_tape.gradient(loss, trainable_vars)
现在我们有一个磁带负责计算显着性输入的梯度。我们在它周围有 另一个 磁带,它跟踪这些操作,然后可以计算梯度的梯度(即显着性梯度)。该磁带还计算分类损失的梯度。我在外部磁带上下文中移动了分类损失,因为内部磁带不需要它。还要注意两个损失的相加是如何在外带的上下文中发生的——一切都必须在那里发生,否则计算图是lost/incomplete并且梯度不能被计算。