测试精度增加不超过 45%

Test accuracy not increasing more than 45%

我正在尝试为图像 classification 构建一个 CONV2D 网络。我通过抓取 9 classes 飞机、动物、鸟类、汽车、花卉、人、轮船、traffic_signs、火车的图像生成了自己的数据集。图像的分辨率为 612 x 400 plus(400 在 430 和 480 之间变化)。为了方便系统资源,我已将图像调整为 100x100。我设计了网络,训练准确率超过 90%,但测试准确率增加不超过 45%。

知道我做错了什么吗?

代码如下

import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import PIL
import tensorflow as tf
import pathlib
import requests
import urllib
import time

from bs4 import BeautifulSoup
from tensorflow import keras

from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from keras.optimizers import *
from keras.losses import sparse_categorical_crossentropy

data_dir = pathlib.Path('D:/mixed images')
data_dir

len(list(data_dir.glob('*/*.jpg')))

# planes = list(data_dir.glob('airplanes/*.jpg'))

# PIL.Image.open(planes[10])

img_list = [list(data_dir.glob('airplanes/*')),
            list(data_dir.glob('animals/*')),
            list(data_dir.glob('birds/*')),
            list(data_dir.glob('cars/*')),
            list(data_dir.glob('flowers/*')),
            list(data_dir.glob('people/*')),
            list(data_dir.glob('ships/*')),
            list(data_dir.glob('traffic_signs/*')),
            list(data_dir.glob('trains/*'))]

obj_list = os.listdir(data_dir)

obj_img_dict = dict(zip(obj_list,img_list))

obj_label_dict = dict(zip(obj_list,[0,1,2,3,4,5,6,7,8]))
obj_label_dict

X = []
y = []

for image_name,images in obj_img_dict.items():
    for image in images:
        img = cv2.imread(str(image))
        resized_img = cv2.resize(img,(100,100))
        X.append(resized_img)
        y.append(obj_label_dict[image_name])

X = np.array(X)
y = np.array(y)

from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=42)

X_train_scaled = X_train/255
X_test_scaled = X_test/255

obj_classes = 9

model = keras.Sequential()
        
# building the convolution layers
model.add(keras.layers.Conv2D(32,(3,3),input_shape= (100,100,3),padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Conv2D(64,(3,3), padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Conv2D(128,(3,3), padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Conv2D(256,(3,3), padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Conv2D(512,(3,3), padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Flatten())
    
# building the dense layers
model.add(keras.layers.Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(keras.layers.Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(keras.layers.Dense(256, activation='relu'))
# model.add(Dropout(0.6))
model.add(keras.layers.Dense(128, activation='relu'))
# model.add(Dropout(0.6))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(obj_classes,activation='softmax'))

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam', metrics=['accuracy'])


model.fit(X_train_scaled, y_train, batch_size=64,epochs=50, verbose=2)

model.evaluate(X_test_scaled,y_test)

尝试对模型进行以下修改:

  • 减少参数个数,以免under-fit
  • 将参数的数量从一层分布到另一层尽可能均匀
  • 在卷积之间使用批量归一化
  • 最后只用了3个dense层
  • 卷积层和最终密集层之间的参数数量之比接近 50/50

一个好的候选模型是:

model = keras.Sequential()
# building the convolution layers
model.add(keras.layers.Conv2D(32,(3,3),input_shape= (100,100,3),padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(32,(3,3), padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(64,(3,3), padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(64,(3,3), padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(128,(3,3), padding='same',activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Flatten())
    
# building the dense layers
model.add(keras.layers.Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(keras.layers.Dense(128, activation='relu'))
# model.add(Dropout(0.5)) # optional
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(obj_classes,activation='softmax'))

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam', metrics=['accuracy'])