Keras 中的自定义损失函数,根据 y_pred 和 y_true 的值进行惩罚
Custom loss functions in Keras with penalty depending on the values of y_pred and y_true
我需要 Keras 中的自定义损失函数来解决回归问题。
我必须预测两个值(y1,y2)但是我想惩罚错误如果:
if y1_pred > v1 and y1_true < v1:
or
if y2_pred < v2 and y2_true > v2:
我需要类似于:
if y1_pred > v1 and y1_true < v1:
p = 1 + (k * (y1_pred-y1_true))
K.mean(K.square(y1_pred-y1_true) * p)
else:
K.mean(K.square(y1_pred-y1_true))
if y2_pred < v2 and y2_true > v2:
p = 1 + (k * (y2_true-y2_pred))
K.mean(K.square(y2_pred-y2_true) * p)
else:
K.mean(K.square(y2_pred-y2_true))
v1、v2 和 k 是常量。
尝试 tf.where
:
import tensorflow as tf
def custom_loss1(v1 = 0.7, v2 = 1, k =0.5):
def combined_loss(y1_true, y1_pred):
return tf.where(tf.logical_and(tf.greater(y1_pred, v1), tf.less(y1_true, v1)),
tf.reduce_mean(tf.math.square(y1_pred - y1_true) * (1 + (k * (y1_pred - y1_true)))),
tf.reduce_mean(tf.math.square(y1_pred - y1_true)))
return combined_loss
def custom_loss2(v1 = 0.7, v2 = 1, k =0.5):
def combined_loss(y2_true, y2_pred):
return tf.where(tf.logical_and(tf.less(y2_pred, v2), tf.greater(y2_true, v2)),
tf.reduce_mean(tf.math.square(y2_pred-y2_true) * (1 + (k * (y2_true - y2_pred)))),
tf.reduce_mean(tf.math.square(y2_pred-y2_true)))
return combined_loss
inputs = tf.keras.layers.Input((5,))
x = tf.keras.layers.Dense(1, activation = 'relu', name='loss1')(inputs)
y = tf.keras.layers.Dense(1, activation = 'tanh', name='loss2')(inputs)
model = tf.keras.Model(inputs, [x, y])
model.compile(optimizer='adam', loss = {'loss1': custom_loss1(), 'loss2': custom_loss2()})
model.fit(tf.random.normal((10, 5)), [tf.random.normal((10, 1)), tf.random.normal((10, 1))], batch_size=2, epochs=5)
我需要 Keras 中的自定义损失函数来解决回归问题。 我必须预测两个值(y1,y2)但是我想惩罚错误如果:
if y1_pred > v1 and y1_true < v1:
or
if y2_pred < v2 and y2_true > v2:
我需要类似于:
if y1_pred > v1 and y1_true < v1:
p = 1 + (k * (y1_pred-y1_true))
K.mean(K.square(y1_pred-y1_true) * p)
else:
K.mean(K.square(y1_pred-y1_true))
if y2_pred < v2 and y2_true > v2:
p = 1 + (k * (y2_true-y2_pred))
K.mean(K.square(y2_pred-y2_true) * p)
else:
K.mean(K.square(y2_pred-y2_true))
v1、v2 和 k 是常量。
尝试 tf.where
:
import tensorflow as tf
def custom_loss1(v1 = 0.7, v2 = 1, k =0.5):
def combined_loss(y1_true, y1_pred):
return tf.where(tf.logical_and(tf.greater(y1_pred, v1), tf.less(y1_true, v1)),
tf.reduce_mean(tf.math.square(y1_pred - y1_true) * (1 + (k * (y1_pred - y1_true)))),
tf.reduce_mean(tf.math.square(y1_pred - y1_true)))
return combined_loss
def custom_loss2(v1 = 0.7, v2 = 1, k =0.5):
def combined_loss(y2_true, y2_pred):
return tf.where(tf.logical_and(tf.less(y2_pred, v2), tf.greater(y2_true, v2)),
tf.reduce_mean(tf.math.square(y2_pred-y2_true) * (1 + (k * (y2_true - y2_pred)))),
tf.reduce_mean(tf.math.square(y2_pred-y2_true)))
return combined_loss
inputs = tf.keras.layers.Input((5,))
x = tf.keras.layers.Dense(1, activation = 'relu', name='loss1')(inputs)
y = tf.keras.layers.Dense(1, activation = 'tanh', name='loss2')(inputs)
model = tf.keras.Model(inputs, [x, y])
model.compile(optimizer='adam', loss = {'loss1': custom_loss1(), 'loss2': custom_loss2()})
model.fit(tf.random.normal((10, 5)), [tf.random.normal((10, 1)), tf.random.normal((10, 1))], batch_size=2, epochs=5)