连接两个 CNN 分支的输入错误

Input error concatenating two CNN branches

我正在尝试使用具有多个 类 的 CNN 实现 3D 面部识别算法。我有一个用于 rgb 图像的图像生成器和一个用于深度图像(灰度)的图像生成器。由于我有两个不同的输入,我制作了两个不同的 CNN 模型,一个形状为(高度,宽度,3),另一个形状为(高度,宽度,1)。独立地,我可以将模型与其各自的图像生成器相匹配,但是在连接两个分支并合并两个图像生成器之后,我收到了这个警告和错误:

WARNING:tensorflow:Model was constructed with shape (None, 400, 400, 1) for input KerasTensor(type_spec=TensorSpec(shape=(None, 400, 400, 1), dtype=tf.float32, name='Depth_Input_input'), name='Depth_Input_input', description="created by layer 'Depth_Input_input'"), but it was called on an input with incompatible shape (None, None)

"ValueError: Input 0 of layer Depth_Input is incompatible with the layer: : expected min_ndim=4, found ndim=2. Full shape received: (None, None)"

我该怎么做才能解决这个问题?谢谢

这是我的代码:

height=400
width=400
shape=(height,width)

    # ########################### RGB ############################
model_rgb = tf.keras.models.Sequential()

model_rgb.add(Conv2D(filters=16, kernel_size=3, activation='relu', name="RGB_Input", input_shape=(height,width, 3)))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Dropout(0.3))

model_rgb.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))

model_rgb.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))

model_rgb.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))

model_rgb.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
#model_rgb.add(Dropout(0.2))

model_rgb.add(Conv2D(filters=128, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
#model_rgb.add(Dropout(0.2))

model_rgb.add(Flatten())
model_rgb.add(Dense(units=512, activation='relu'))
model_rgb.add(Dropout(0.3))
model_rgb.add(Dense(units=128, activation='relu'))
model_rgb.add(Dropout(0.3))

# ########################### DEPTH ###########################
model_depth = tf.keras.models.Sequential()

model_depth.add(Conv2D(filters=16, kernel_size=3, activation='relu', name="Depth_Input", input_shape=(height, width, 1)))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Dropout(0.3))
                   
model_depth.add(Conv2D(filters=16, kernel_size=3, activation='relu'))                      
model_depth.add(MaxPooling2D(pool_size=2))

model_depth.add(Conv2D(filters=32, kernel_size=3, activation='relu'))                      
model_depth.add(MaxPooling2D(pool_size=2))

model_depth.add(Conv2D(filters=32, kernel_size=3, activation='relu'))                      
model_depth.add(MaxPooling2D(pool_size=2))

model_depth.add(Conv2D(filters=64, kernel_size=3, activation='relu'))                
model_depth.add(MaxPooling2D(pool_size=2))

model_depth.add(Conv2D(filters=64, kernel_size=3, activation='relu'))                      
model_depth.add(MaxPooling2D(pool_size=2))

model_depth.add(Flatten())
model_depth.add(Dense(units=512, activation='relu'))
model_depth.add(Dropout(0.3))
model_depth.add(Dense(units=128, activation='relu'))
model_depth.add(Dropout(0.3))

#### Concatenating branches ####

merge = Concatenate()([model_rgb.output, model_depth.output])
merged_out = Dense(units=16, activation='relu')(merge)
merged_out = Dense(units=2, activation='softmax')(merged_out)

merged_model = Model([model_rgb.input, model_depth.input], merged_out) 

merged_model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])


history_merged = merged_model.fit(gen_flow,
                                  epochs=70,
                                  shuffle=True,
                                  )

这是生成器的代码:

train_datagen = ImageDataGenerator(rescale=1./255,
                                    rotation_range=20, 
                                    width_shift_range=0.4, 
                                    height_shift_range=0.4, 
                                    shear_range=0.4, 
                                    zoom_range=0.4, 
                                    horizontal_flip=True,
                                    fill_mode='nearest')

val_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

# ########################### RGB ###########################
print("RGB Generators: \n")
train_generator_rgb = train_datagen.flow_from_directory(directory=train_data_rgb, target_size=shape,
                                                        class_mode='categorical', 
                                                        batch_size=16)

val_generator_rgb = val_datagen.flow_from_directory(directory=val_data_rgb, 
                                                    target_size=shape,
                                                    class_mode='categorical', 
                                                    batch_size=12)
# ########################### --- ###########################

# ########################### DEPTH ###########################
print("\n\nDepth Generators: \n")
train_generator_depth = train_datagen.flow_from_directory(directory=train_data_depth, 
                                                          target_size=shape, 
                                                          color_mode="grayscale",
                                                          class_mode='categorical', 
                                                          batch_size=16)

val_generator_depth = val_datagen.flow_from_directory(directory=val_data_depth, 
                                                      target_size=shape, 
                                                      color_mode="grayscale",
                                                      class_mode='categorical', 
                                                      batch_size=12)
# ########################### ----- ###########################

def gen_flow_for_two_inputs(X1, X2):
    
    while True:
            X1i = train_generator_rgb.next()
            X2i = train_generator_depth.next()
            yield [X1i[0], X2i[1]], X1i[1]

# Create generator
gen_flow = gen_flow_for_two_inputs(train_data_rgb, train_data_depth)

合并分支的绘制模型:

来自评论

The problem was with the union of the generators in the function gen_flow_for_two_inputs(X1, X2). The correct form is yield [X1i[0], X2i[0]], X1i[1] instead of yield [X1i[0], X2i[1]], X1i[1] (paraphrased from sergio_baixo)

生成器的工作代码

train_datagen = ImageDataGenerator(rescale=1./255,
                                    rotation_range=20, 
                                    width_shift_range=0.4, 
                                    height_shift_range=0.4, 
                                    shear_range=0.4, 
                                    zoom_range=0.4, 
                                    horizontal_flip=True,
                                    fill_mode='nearest')

val_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

# ########################### RGB ###########################
print("RGB Generators: \n")
train_generator_rgb = train_datagen.flow_from_directory(directory=train_data_rgb, target_size=shape,
                                                        class_mode='categorical', 
                                                        batch_size=16)

val_generator_rgb = val_datagen.flow_from_directory(directory=val_data_rgb, 
                                                    target_size=shape,
                                                    class_mode='categorical', 
                                                    batch_size=12)
# ########################### --- ###########################

# ########################### DEPTH ###########################
print("\n\nDepth Generators: \n")
train_generator_depth = train_datagen.flow_from_directory(directory=train_data_depth, 
                                                          target_size=shape, 
                                                          color_mode="grayscale",
                                                          class_mode='categorical', 
                                                          batch_size=16)

val_generator_depth = val_datagen.flow_from_directory(directory=val_data_depth, 
                                                      target_size=shape, 
                                                      color_mode="grayscale",
                                                      class_mode='categorical', 
                                                      batch_size=12)
# ########################### ----- ###########################

def gen_flow_for_two_inputs(X1, X2):
    
    while True:
            X1i = train_generator_rgb.next()
            X2i = train_generator_depth.next()
            yield [X1i[0], X2i[0]], X1i[1]

# Create generator
gen_flow = gen_flow_for_two_inputs(train_data_rgb, train_data_depth)