RNN 和 CNN-RNN 不会正确训练,总是预测一个 class
RNN and CNN-RNN won't train correctly, always predict one class
我目前正在开发一个模型,使用深度学习算法从文本中检测情绪。我有一个相对较小的标记数据集(~7500),其中有 7 种不同的情绪 classes。我开发了一个 CNN 并达到了 ~63% 的准确度,但是当我尝试应用一个 RNN,使用 LSTM 和一个 CNN-RNN,也使用 LSTM,它们似乎根本没有正确训练并且总是最终预测同样的class。我相信我的模型基本上是合理的,但在参数方面存在一些错误。我将数据集分成 85% 用于训练,另外 20% 用于验证,其余 15% 用于测试。我的嵌入矩阵是使用 Google News word2vec 中的单词表示开发的,单词索引是使用 keras Tokenizer 开发的。
数据集细分:
感动
愤怒1086
厌恶1074
恐惧1086
内疚1062
欢乐1089
悲伤1080
耻辱1058
CNN 实施
def make_model(kernel_sizes, num_filters, dropout, hidden_units):
submodels = []
for kernel_size in kernel_sizes:
submodel = Sequential()
submodel.add(Embedding(input_dim = input_dim,
output_dim = output_dim,
weights = [embedding_matrix],
input_length = max_len,
trainable = True))
submodel.add(Conv1D(filters=num_filters, kernel_size=kernel_size, padding='same',activation='relu',strides=1))
submodel.add(GlobalMaxPooling1D())
submodels.append(submodel)
submodel_outputs = [model.output for model in submodels]
submodel_inputs = [model.input for model in submodels]
merged = Concatenate(axis=1)(submodel_outputs)
x = Dropout(dropout)(merged)
if(hidden_units > 0):
x = Dense(hidden_units, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(7,activation='softmax', kernel_initializer="uniform")(x)
out = Activation('sigmoid')(x)
model = Model(submodel_inputs, out)
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['acc'])
return model
def fit_model(model, kernel_sizes, num_epochs, batch_size, x_train, y_train):
x_train = [x_train]*len(kernel_sizes)
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.2)
return history
kernel_sizes = [2,6]
num_filters = 100
dropout = 0.6
num_hidden = 270
callbacks = callbacks_list
num_epochs = 15
batch_size = 64
model = make_model(kernel_sizes, num_filters, dropout, num_hidden)
print(model.summary())
history = fit_model(model, kernel_sizes, num_epochs, batch_size, x_train, y_train)
型号:"model_1"
层(类型)输出形状参数#连接到
embedding_1_input (输入层) (None, 179) 0
embedding_2_input (输入层) (None, 179) 0
embedding_1(嵌入)(None, 179, 300) 2729400 embedding_1_input[0][0]
embedding_2(嵌入)(None, 179, 300) 2729400 embedding_2_input[0][0]
conv1d_1 (Conv1D) (None, 179, 100) 60100 embedding_1[0][0]
conv1d_2 (Conv1D) (None, 179, 100) 180100 embedding_2[0][0]
global_max_pooling1d_1 (GlobalM (None, 100) 0 conv1d_1[0][0]
global_max_pooling1d_2 (GlobalM (None, 100) 0 conv1d_2[0][0]
concatenate_1(连接)(None, 200) 0 global_max_pooling1d_1[0][0]
global_max_pooling1d_2[0][0]
dropout_1 (辍学) (None, 200) 0 concatenate_1[0][0]
dense_1 (密集) (None, 270) 54270 dropout_1[0][0]
dropout_2 (辍学) (None, 270) 0 dense_1[0][0]
dense_2 (密集) (None, 7) 1897 dropout_2[0][0]
activation_1 (激活) (None, 7) 0 dense_2[0][0]
总参数:5,755,167
可训练参数:5,755,167
不可训练的参数:0
Training and Validation results for CNN
CNN confusion matrix
RNN 实现
def make_model(lstm_units, dropout, hidden_units):
model = Sequential()
model.add(Embedding(input_dim = input_dim,
output_dim = output_dim,
weights = [embedding_matrix],
input_length = max_len,
trainable = False))
model.add(LSTM(lstm_units))
model.add(Dropout(dropout))
if(hidden_units > 0):
model.add(Dense(hidden_units, activation='elu'))
model.add(Dropout(dropout))
model.add(Dense(7,activation='softmax', kernel_initializer="uniform"))
model.add(Activation('sigmoid'))
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['acc'])
return model
lstm_units = 120
dropout = 0.5
hidden_units = 550
callbacks = [tensorboard, early]
num_epochs = 20
batch_size = 60
model = make_model(lstm_units, dropout, hidden_units)
print(model.summary())
history = fit_model(model, num_epochs, batch_size, x_train, y_train)
型号:"sequential_6"
图层(类型)输出形状参数#
embedding_6(嵌入)(None, 179, 300) 2729400
lstm_8 (LSTM) (None, 120) 202080
dropout_5 (辍学) (None, 120) 0
dense_6 (密集) (None, 550) 66550
dropout_6 (辍学) (None, 550) 0
dense_7 (密集) (None, 7) 3857
activation_3 (激活) (None, 7) 0
总参数:3,001,887
可训练参数:272,487
不可训练的参数:2,729,400
RNN training and validation scores
RNN confusion matrix
CNN-RNN 实现
def make_model(kernel_sizes, num_filters, dropout, hidden_units, lstm_units):
submodels = []
for kernel_size in kernel_sizes:
submodel = Sequential()
submodel.add(Embedding(input_dim = input_dim,
output_dim = output_dim,
weights = [embedding_matrix],
input_length = max_len,
trainable = True))
submodel.add(Conv1D(filters=num_filters, kernel_size=kernel_size, padding='same',activation='relu',strides=1))
submodel.add(MaxPooling1D(pool_size=2, strides = 2))
submodel.add(Dropout(dropout))
submodel.add(LSTM(lstm_units))
submodels.append(submodel)
submodel_outputs = [model.output for model in submodels]
submodel_inputs = [model.input for model in submodels]
merged = Concatenate(axis=1)(submodel_outputs)
x = Dropout(dropout)(merged)
if(hidden_units > 0):
x = Dense(hidden_units, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(7,activation='softmax', kernel_initializer="uniform")(x)
out = Activation('sigmoid')(x)
model = Model(submodel_inputs, out)
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['acc'])
return model
kernel_sizes = [2,3,6]
num_filters = 100
dropout = 0.6
num_hidden = 270
lstm_units = 80
callbacks = [tensorboard, early]
num_epochs = 20
batch_size = 64
model = make_model(kernel_sizes, num_filters, dropout, num_hidden, lstm_units)
print(model.summary())
history = fit_model(model, kernel_sizes, num_epochs, batch_size, x_train, y_train)
型号:"model_2"
层(类型)输出形状参数#连接到
embedding_8_input (输入层) (None, 179) 0
embedding_9_input (输入层) (None, 179) 0
embedding_10_input (InputLayer) (None, 179) 0
embedding_8(嵌入)(None, 179, 300) 2729400 embedding_8_input[0][0]
embedding_9(嵌入)(None, 179, 300) 2729400 embedding_9_input[0][0]
embedding_10(嵌入)(None, 179, 300) 2729400 embedding_10_input[0][0]
conv1d_8 (Conv1D) (None, 179, 100) 60100 embedding_8[0][0]
conv1d_9 (Conv1D) (None, 179, 100) 90100 embedding_9[0][0]
conv1d_10 (Conv1D) (None, 179, 100) 180100 embedding_10[0][0]
max_pooling1d_7 (MaxPooling1D) (None, 89, 100) 0 conv1d_8[0][0]
max_pooling1d_8 (MaxPooling1D) (None, 89, 100) 0 conv1d_9[0][0]
max_pooling1d_9 (MaxPooling1D) (None, 89, 100) 0 conv1d_10[0][0]
dropout_9 (辍学) (None, 89, 100) 0 max_pooling1d_7[0][0]
dropout_10(辍学)(None, 89, 100) 0 max_pooling1d_8[0][0]
dropout_11(辍学)(None, 89, 100) 0 max_pooling1d_9[0][0]
lstm_2 (LSTM) (None, 80) 57920 dropout_9[0][0]
lstm_3 (LSTM) (None, 80) 57920 dropout_10[0][0]
lstm_4 (LSTM) (None, 80) 57920 dropout_11[0][0]
concatenate_3(连接)(None, 240) 0 lstm_2[0][0]
lstm_3[0][0]
lstm_4[0][0]
dropout_12 (辍学) (None, 240) 0 concatenate_3[0][0]
dense_3 (密集) (None, 270) 65070 dropout_12[0][0]
dropout_13 (辍学) (None, 270) 0 dense_3[0][0]
dense_4 (密集) (None, 7) 1897 dropout_13[0][0]
activation_2 (激活) (None, 7) 0 dense_4[0][0]
总参数:8,759,227
可训练参数:8,759,227
不可训练的参数:0
CNN-RNN training and validation scores
CNN-RNN confusion matrix
我知道神经网络没有神奇的公式,也没有放之四海而皆准的方法,我只是在寻找一些指导,以解决我在实施 CNN-RNN 和 RNN 时可能犯的错误。
对于任何格式错误提前致歉,因为这是我提出的第一个问题。如果需要任何其他信息,请告诉我。
非常感谢。
我不能说这会解决你所有的问题,但绝对错误的是你在 softmax 激活后立即重复使用 sigmoid 激活,而你的分类问题有 7 类。 sigmoid激活只能分开两个类.
例如:
model.add(Dense(7,activation='softmax', kernel_initializer="uniform"))
model.add(Activation('sigmoid'))
你应该删除 sigmoid 激活函数 3 次。
首先,您的 CNN 实现过于热情,您是通过试验多种设计得出的架构还是只是选择了它?
Usually, when multiple heads are chosen they are fed a slightly variation of the input, not the exact same copy so maybe your multi-head design is not the most optimal choice, it introduces too many unnecessary parameters and can lead过度拟合和你的损失曲线是显而易见的。
您使用了分类交叉熵,但在 softmax 之后使用了 sigmoid,这也不是事情的完成方式。只需使用 softmax 激活并摆脱 sigmoid。
是测试集的混淆矩阵吗?然后,您的测试拆分似乎太简单了,因为模型过度拟合,应该表现不佳。因此,通过确保训练和测试中没有太多相似的数据来尝试找到更好的测试拆分。
在转向复杂的模型之前,最好先微调您的简单模型。由于您的 LSTM 模型表现不佳,因此尝试更复杂的模型 (CNN-LSTM) 没有意义。您的 LSTM 模型没有收敛,原因可能有很多(最明显的是激活层的使用不正确)。
def make_model(lstm_units, dropout, hidden_units):
model = Sequential()
model.add(Embedding(input_dim = input_dim,
output_dim = output_dim,
weights = [embedding_matrix],
input_length = max_len,
trainable = False))
model.add(LSTM(lstm_units, return_sequences = True, recurrent_dropout = 0.2))
model.add(Dropout(dropout))
model.add(LSTM(lstm_units, recurrent_dropout = 0.2))
model.add(Dropout(dropout))
model.add(Dense(7, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['acc'])
return model
通过摆脱 FC 层使其成为完全基于 LSTM 的模型,也从较小的 LSTM 单元开始,如 8, 16, 32, ...
如需进一步改进,您可以执行以下操作。
0) 摆脱手套嵌入并使用您自己的可学习嵌入。
1) 通过网络进行超参数搜索以找到最佳模型。
有很多库,但我发现这个非常灵活。 https://github.com/keras-team/keras-tuner
用pip安装即可。
这是一个演示代码。
from tensorflow import keras
from tensorflow.keras import layers
from kerastuner.tuners import RandomSearch
def build_model(hp):
model = keras.Sequential()
model.add(layers.Embedding(input_dim=hp.Int('input_dim',
min_value=5000,
max_value=10000,
step = 1000),
output_dim=hp.Int('output_dim',
min_value=200,
max_value=800,
step = 100),
input_length = 400))
model.add(layers.Convolution1D(
filters=hp.Int('filters',
min_value=32,
max_value=512,
step = 32),
kernel_size=hp.Int('kernel_size',
min_value=3,
max_value=11,
step = 2),
padding='same',
activation='relu')),
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling1D())
model.add(layers.Flatten())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(units=hp.Int('units',
min_value=64,
max_value=256,
step=32),
activation='relu'))
model.add(layers.Dropout(0.4))
model.add(layers.Dense(7, activation='softmax'))
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice('learning_rate',
values=[1e-2, 1e-3, 1e-4])),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
directory='my_dir',
project_name='helloworld')
tuner.search_space_summary()
## The following lines are based on your model
tuner.search(x, y,
epochs=5,
validation_data=(val_x, val_y))
models = tuner.get_best_models(num_models=2)
如果您想提取更有意义的特征,我发现一种很有前途的方法是提取预训练的 BERT 特征,然后使用 CNN/LSTM.
进行训练。
这是一个很好的入门知识库 -
https://github.com/UKPLab/sentence-transformers
一旦你从 BERT/XLNet 得到句子嵌入,你就可以使用这些特征来训练另一个与你正在使用的 CNN 相似的 CNN,除了可能会去掉嵌入层,因为它很昂贵。
我目前正在开发一个模型,使用深度学习算法从文本中检测情绪。我有一个相对较小的标记数据集(~7500),其中有 7 种不同的情绪 classes。我开发了一个 CNN 并达到了 ~63% 的准确度,但是当我尝试应用一个 RNN,使用 LSTM 和一个 CNN-RNN,也使用 LSTM,它们似乎根本没有正确训练并且总是最终预测同样的class。我相信我的模型基本上是合理的,但在参数方面存在一些错误。我将数据集分成 85% 用于训练,另外 20% 用于验证,其余 15% 用于测试。我的嵌入矩阵是使用 Google News word2vec 中的单词表示开发的,单词索引是使用 keras Tokenizer 开发的。
数据集细分:
感动
愤怒1086
厌恶1074
恐惧1086
内疚1062
欢乐1089
悲伤1080
耻辱1058
CNN 实施
def make_model(kernel_sizes, num_filters, dropout, hidden_units):
submodels = []
for kernel_size in kernel_sizes:
submodel = Sequential()
submodel.add(Embedding(input_dim = input_dim,
output_dim = output_dim,
weights = [embedding_matrix],
input_length = max_len,
trainable = True))
submodel.add(Conv1D(filters=num_filters, kernel_size=kernel_size, padding='same',activation='relu',strides=1))
submodel.add(GlobalMaxPooling1D())
submodels.append(submodel)
submodel_outputs = [model.output for model in submodels]
submodel_inputs = [model.input for model in submodels]
merged = Concatenate(axis=1)(submodel_outputs)
x = Dropout(dropout)(merged)
if(hidden_units > 0):
x = Dense(hidden_units, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(7,activation='softmax', kernel_initializer="uniform")(x)
out = Activation('sigmoid')(x)
model = Model(submodel_inputs, out)
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['acc'])
return model
def fit_model(model, kernel_sizes, num_epochs, batch_size, x_train, y_train):
x_train = [x_train]*len(kernel_sizes)
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.2)
return history
kernel_sizes = [2,6]
num_filters = 100
dropout = 0.6
num_hidden = 270
callbacks = callbacks_list
num_epochs = 15
batch_size = 64
model = make_model(kernel_sizes, num_filters, dropout, num_hidden)
print(model.summary())
history = fit_model(model, kernel_sizes, num_epochs, batch_size, x_train, y_train)
型号:"model_1"
层(类型)输出形状参数#连接到
embedding_1_input (输入层) (None, 179) 0
embedding_2_input (输入层) (None, 179) 0
embedding_1(嵌入)(None, 179, 300) 2729400 embedding_1_input[0][0]
embedding_2(嵌入)(None, 179, 300) 2729400 embedding_2_input[0][0]
conv1d_1 (Conv1D) (None, 179, 100) 60100 embedding_1[0][0]
conv1d_2 (Conv1D) (None, 179, 100) 180100 embedding_2[0][0]
global_max_pooling1d_1 (GlobalM (None, 100) 0 conv1d_1[0][0]
global_max_pooling1d_2 (GlobalM (None, 100) 0 conv1d_2[0][0]
concatenate_1(连接)(None, 200) 0 global_max_pooling1d_1[0][0]
global_max_pooling1d_2[0][0]
dropout_1 (辍学) (None, 200) 0 concatenate_1[0][0]
dense_1 (密集) (None, 270) 54270 dropout_1[0][0]
dropout_2 (辍学) (None, 270) 0 dense_1[0][0]
dense_2 (密集) (None, 7) 1897 dropout_2[0][0]
activation_1 (激活) (None, 7) 0 dense_2[0][0]
总参数:5,755,167 可训练参数:5,755,167 不可训练的参数:0
Training and Validation results for CNN
CNN confusion matrix
RNN 实现
def make_model(lstm_units, dropout, hidden_units):
model = Sequential()
model.add(Embedding(input_dim = input_dim,
output_dim = output_dim,
weights = [embedding_matrix],
input_length = max_len,
trainable = False))
model.add(LSTM(lstm_units))
model.add(Dropout(dropout))
if(hidden_units > 0):
model.add(Dense(hidden_units, activation='elu'))
model.add(Dropout(dropout))
model.add(Dense(7,activation='softmax', kernel_initializer="uniform"))
model.add(Activation('sigmoid'))
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['acc'])
return model
lstm_units = 120
dropout = 0.5
hidden_units = 550
callbacks = [tensorboard, early]
num_epochs = 20
batch_size = 60
model = make_model(lstm_units, dropout, hidden_units)
print(model.summary())
history = fit_model(model, num_epochs, batch_size, x_train, y_train)
型号:"sequential_6"
图层(类型)输出形状参数#
embedding_6(嵌入)(None, 179, 300) 2729400
lstm_8 (LSTM) (None, 120) 202080
dropout_5 (辍学) (None, 120) 0
dense_6 (密集) (None, 550) 66550
dropout_6 (辍学) (None, 550) 0
dense_7 (密集) (None, 7) 3857
activation_3 (激活) (None, 7) 0
总参数:3,001,887 可训练参数:272,487 不可训练的参数:2,729,400
RNN training and validation scores
RNN confusion matrix
CNN-RNN 实现
def make_model(kernel_sizes, num_filters, dropout, hidden_units, lstm_units):
submodels = []
for kernel_size in kernel_sizes:
submodel = Sequential()
submodel.add(Embedding(input_dim = input_dim,
output_dim = output_dim,
weights = [embedding_matrix],
input_length = max_len,
trainable = True))
submodel.add(Conv1D(filters=num_filters, kernel_size=kernel_size, padding='same',activation='relu',strides=1))
submodel.add(MaxPooling1D(pool_size=2, strides = 2))
submodel.add(Dropout(dropout))
submodel.add(LSTM(lstm_units))
submodels.append(submodel)
submodel_outputs = [model.output for model in submodels]
submodel_inputs = [model.input for model in submodels]
merged = Concatenate(axis=1)(submodel_outputs)
x = Dropout(dropout)(merged)
if(hidden_units > 0):
x = Dense(hidden_units, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(7,activation='softmax', kernel_initializer="uniform")(x)
out = Activation('sigmoid')(x)
model = Model(submodel_inputs, out)
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['acc'])
return model
kernel_sizes = [2,3,6]
num_filters = 100
dropout = 0.6
num_hidden = 270
lstm_units = 80
callbacks = [tensorboard, early]
num_epochs = 20
batch_size = 64
model = make_model(kernel_sizes, num_filters, dropout, num_hidden, lstm_units)
print(model.summary())
history = fit_model(model, kernel_sizes, num_epochs, batch_size, x_train, y_train)
型号:"model_2"
层(类型)输出形状参数#连接到
embedding_8_input (输入层) (None, 179) 0
embedding_9_input (输入层) (None, 179) 0
embedding_10_input (InputLayer) (None, 179) 0
embedding_8(嵌入)(None, 179, 300) 2729400 embedding_8_input[0][0]
embedding_9(嵌入)(None, 179, 300) 2729400 embedding_9_input[0][0]
embedding_10(嵌入)(None, 179, 300) 2729400 embedding_10_input[0][0]
conv1d_8 (Conv1D) (None, 179, 100) 60100 embedding_8[0][0]
conv1d_9 (Conv1D) (None, 179, 100) 90100 embedding_9[0][0]
conv1d_10 (Conv1D) (None, 179, 100) 180100 embedding_10[0][0]
max_pooling1d_7 (MaxPooling1D) (None, 89, 100) 0 conv1d_8[0][0]
max_pooling1d_8 (MaxPooling1D) (None, 89, 100) 0 conv1d_9[0][0]
max_pooling1d_9 (MaxPooling1D) (None, 89, 100) 0 conv1d_10[0][0]
dropout_9 (辍学) (None, 89, 100) 0 max_pooling1d_7[0][0]
dropout_10(辍学)(None, 89, 100) 0 max_pooling1d_8[0][0]
dropout_11(辍学)(None, 89, 100) 0 max_pooling1d_9[0][0]
lstm_2 (LSTM) (None, 80) 57920 dropout_9[0][0]
lstm_3 (LSTM) (None, 80) 57920 dropout_10[0][0]
lstm_4 (LSTM) (None, 80) 57920 dropout_11[0][0]
concatenate_3(连接)(None, 240) 0 lstm_2[0][0]
lstm_3[0][0]
lstm_4[0][0]
dropout_12 (辍学) (None, 240) 0 concatenate_3[0][0]
dense_3 (密集) (None, 270) 65070 dropout_12[0][0]
dropout_13 (辍学) (None, 270) 0 dense_3[0][0]
dense_4 (密集) (None, 7) 1897 dropout_13[0][0]
activation_2 (激活) (None, 7) 0 dense_4[0][0]
总参数:8,759,227 可训练参数:8,759,227 不可训练的参数:0
CNN-RNN training and validation scores CNN-RNN confusion matrix
我知道神经网络没有神奇的公式,也没有放之四海而皆准的方法,我只是在寻找一些指导,以解决我在实施 CNN-RNN 和 RNN 时可能犯的错误。
对于任何格式错误提前致歉,因为这是我提出的第一个问题。如果需要任何其他信息,请告诉我。
非常感谢。
我不能说这会解决你所有的问题,但绝对错误的是你在 softmax 激活后立即重复使用 sigmoid 激活,而你的分类问题有 7 类。 sigmoid激活只能分开两个类.
例如:
model.add(Dense(7,activation='softmax', kernel_initializer="uniform"))
model.add(Activation('sigmoid'))
你应该删除 sigmoid 激活函数 3 次。
首先,您的 CNN 实现过于热情,您是通过试验多种设计得出的架构还是只是选择了它?
Usually, when multiple heads are chosen they are fed a slightly variation of the input, not the exact same copy so maybe your multi-head design is not the most optimal choice, it introduces too many unnecessary parameters and can lead过度拟合和你的损失曲线是显而易见的。
您使用了分类交叉熵,但在 softmax 之后使用了 sigmoid,这也不是事情的完成方式。只需使用 softmax 激活并摆脱 sigmoid。
是测试集的混淆矩阵吗?然后,您的测试拆分似乎太简单了,因为模型过度拟合,应该表现不佳。因此,通过确保训练和测试中没有太多相似的数据来尝试找到更好的测试拆分。
在转向复杂的模型之前,最好先微调您的简单模型。由于您的 LSTM 模型表现不佳,因此尝试更复杂的模型 (CNN-LSTM) 没有意义。您的 LSTM 模型没有收敛,原因可能有很多(最明显的是激活层的使用不正确)。
def make_model(lstm_units, dropout, hidden_units):
model = Sequential()
model.add(Embedding(input_dim = input_dim,
output_dim = output_dim,
weights = [embedding_matrix],
input_length = max_len,
trainable = False))
model.add(LSTM(lstm_units, return_sequences = True, recurrent_dropout = 0.2))
model.add(Dropout(dropout))
model.add(LSTM(lstm_units, recurrent_dropout = 0.2))
model.add(Dropout(dropout))
model.add(Dense(7, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['acc'])
return model
通过摆脱 FC 层使其成为完全基于 LSTM 的模型,也从较小的 LSTM 单元开始,如 8, 16, 32, ...
如需进一步改进,您可以执行以下操作。
0) 摆脱手套嵌入并使用您自己的可学习嵌入。
1) 通过网络进行超参数搜索以找到最佳模型。
有很多库,但我发现这个非常灵活。 https://github.com/keras-team/keras-tuner
用pip安装即可。
这是一个演示代码。
from tensorflow import keras
from tensorflow.keras import layers
from kerastuner.tuners import RandomSearch
def build_model(hp):
model = keras.Sequential()
model.add(layers.Embedding(input_dim=hp.Int('input_dim',
min_value=5000,
max_value=10000,
step = 1000),
output_dim=hp.Int('output_dim',
min_value=200,
max_value=800,
step = 100),
input_length = 400))
model.add(layers.Convolution1D(
filters=hp.Int('filters',
min_value=32,
max_value=512,
step = 32),
kernel_size=hp.Int('kernel_size',
min_value=3,
max_value=11,
step = 2),
padding='same',
activation='relu')),
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling1D())
model.add(layers.Flatten())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(units=hp.Int('units',
min_value=64,
max_value=256,
step=32),
activation='relu'))
model.add(layers.Dropout(0.4))
model.add(layers.Dense(7, activation='softmax'))
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice('learning_rate',
values=[1e-2, 1e-3, 1e-4])),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
directory='my_dir',
project_name='helloworld')
tuner.search_space_summary()
## The following lines are based on your model
tuner.search(x, y,
epochs=5,
validation_data=(val_x, val_y))
models = tuner.get_best_models(num_models=2)
如果您想提取更有意义的特征,我发现一种很有前途的方法是提取预训练的 BERT 特征,然后使用 CNN/LSTM.
进行训练。这是一个很好的入门知识库 - https://github.com/UKPLab/sentence-transformers
一旦你从 BERT/XLNet 得到句子嵌入,你就可以使用这些特征来训练另一个与你正在使用的 CNN 相似的 CNN,除了可能会去掉嵌入层,因为它很昂贵。