Tensorflow Keras 多输入模型

Tensorflow Keras multiple input model

我需要针对两个文本列输入(而不是一列)调整此模型

tfhub_handle_encoder = \
    "https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1"
tfhub_handle_preprocess = \
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"

def build_classifier_model():

text_input = tf.keras.layers.Input(
    shape=(), dtype=tf.string, name='text')

preprocessing_layer = hub.KerasLayer(
    tfhub_handle_preprocess, name='preprocessing')

encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
    tfhub_handle_encoder, trainable=True, name='BERT_encoder')

outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(
    6, activation='softmax', name='classifier')(net)
model = tf.keras.Model(text_input, net)

loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
    learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
    optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model

history = classifier_model.fit(
    x=X_train['f'].values,
    y=y_train_c,
    validation_data=(X_valid['f'].values, y_valid_c),
    epochs=15)

这似乎是教程中的模型:https://www.tensorflow.org/text/tutorials/classify_text_with_bert

我已经尝试修改两个输入层的代码,但出现错误,因为在连接后存在错误的张量维度:

def build_classifier_model():

input1 = tf.keras.layers.Input(
    shape=(), dtype=tf.string, name='text')

input2 = tf.keras.layers.Input(
    shape=(), dtype=tf.string, name='text1')
text_input = tf.keras.layers.concatenate([input1, input2], axis=-1)


preprocessing_layer = hub.KerasLayer(
    tfhub_handle_preprocess, name='preprocessing')

encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
    tfhub_handle_encoder, trainable=True, name='BERT_encoder')

outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(
    6, activation='softmax', name='classifier')(net)
model = tf.keras.Model([input1, input2], net)

loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
    learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
    optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model

错误:

InvalidArgumentError:  logits and labels must be broadcastable: logits_size=[64,6] labels_size=[32,6]
     [[node categorical_crossentropy/softmax_cross_entropy_with_logits (defined at tmp/ipykernel_39/1837193519.py:5) ]] [Op:__inference_train_function_271676]

如果使用与另一个维度的连接,则模型无法编译

奇怪的是,在你的模型中用 tf.strings.join 替换你的 Concatenation 层似乎有效:

def build_classifier_model():

  input1 = tf.keras.layers.Input(
      shape=(), dtype=tf.string, name='text')

  input2 = tf.keras.layers.Input(
      shape=(), dtype=tf.string, name='text1')
  text_input = tf.strings.join([input1, input2])

  preprocessing_layer = hub.KerasLayer(
      tfhub_handle_preprocess, name='preprocessing')

  encoder_inputs = preprocessing_layer(text_input)
  encoder = hub.KerasLayer(
      tfhub_handle_encoder, trainable=True, name='BERT_encoder')

  outputs = encoder(encoder_inputs)
  net = outputs['pooled_output']
  net = tf.keras.layers.Dropout(0.1)(net)
  output = tf.keras.layers.Dense(
      6, activation='softmax', name='classifier')(net)
  model = tf.keras.Model([input1, input2], output)

  loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
  metric = tf.metrics.CategoricalAccuracy('accuracy')
  optimizer = Adam(
      learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
  model.compile(
      optimizer=optimizer, loss=loss, metrics=metric)
  model.summary()
  return model
Epoch 1/5
 497/1094 [============>.................] - ETA: 2:14 - loss: 1.8664 - accuracy: 0.1641

您也可以考虑简单地执行 text_input = input1 + input2 ,因为 Concatenation 层似乎弄乱了批次维度。或者您可以将每个输入提供给您的 encoder 然后连接结果:

def build_classifier_model():

  input1 = tf.keras.layers.Input(
      shape=(), dtype=tf.string, name='text')

  input2 = tf.keras.layers.Input(
      shape=(), dtype=tf.string, name='text1')

  preprocessing_layer = hub.KerasLayer(
      tfhub_handle_preprocess, name='preprocessing')

  encoder_input1 = preprocessing_layer(input1)
  encoder_input2 = preprocessing_layer(input2)
  encoder = hub.KerasLayer(
      tfhub_handle_encoder, trainable=True, name='BERT_encoder')

  output1 = encoder(encoder_input1)
  output2 = encoder(encoder_input2)

  net = tf.keras.layers.Concatenate(axis=-1)([output1['pooled_output'], output2['pooled_output']])
  net = tf.keras.layers.Dropout(0.1)(net)
  output = tf.keras.layers.Dense(
      6, activation='softmax', name='classifier')(net)
  model = tf.keras.Model([input1, input2], output)

  loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
  metric = tf.metrics.CategoricalAccuracy('accuracy')
  optimizer = Adam(
      learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
  model.compile(
      optimizer=optimizer, loss=loss, metrics=metric)
  model.summary()
  return model