训练 CNN 后精度低
Low accuracy after training a CNN
我尝试使用 Keras 训练一个对手写数字进行分类的 CNN 模型,但我在训练中的准确率很低(低于 10%)并且出现了很大的错误。
我尝试了一个没有结论的简单神经网络,但效果不佳。
这是我的代码。
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
#Explore data
print(y_train[12])
print(np.shape(x_train))
print(np.shape(x_test))
#we have 60000 imae for the training and 10000 for testing
# Scaling data
x_train = x_train/255
y_train = y_train/255
#reshape the data
x_train = x_train.reshape(60000,28,28,1)
x_test = x_test.reshape(10000,28,28,1)
y_train = y_train.reshape(60000,1)
y_test = y_test.reshape(10000,1)
#Create a model
model = keras.Sequential([
keras.layers.Conv2D(64,(3,3),(1,1),padding = "same",input_shape=(28,28,1)),
keras.layers.MaxPooling2D(pool_size = (2,2),padding = "valid"),
keras.layers.Conv2D(32,(3,3),(1,1),padding = "same"),
keras.layers.MaxPooling2D(pool_size = (2,2),padding = "valid"),
keras.layers.Flatten(),
keras.layers.Dense(128,activation = "relu"),
keras.layers.Dense(10,activation = "softmax")])
model.compile(optimizer = "adam",
loss = "sparse_categorical_crossentropy",
metrics = ['accuracy'])
model.fit(x_train,y_train,epochs=10)
test_loss,test_acc = model.evaluate(x_test,y_test)
print("\ntest accuracy:",test_acc)
有人可以建议我如何改进我的模型吗?
您的问题在这里:
x_train = x_train/255
y_train = y_train/255 # makes no sense
您应该重新缩放 x_test
,而不是 y_train
。
x_train = x_train/255
x_test = x_test/255
这可能只是您的错字。更改这些行,您将获得 95% 以上的准确率。
您的模型存在缩放问题,请尝试使用 tf 2.0
x_train /= 255
x_test /= 255
你不需要缩放所有测试数据
正如你所做的那样:
x_train = x_train/255
y_train = y_train/255
之后,我们可以将标签转换为单热编码
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
这有助于:
loss='categorical_crossentropy',
Sequential API 允许我们将图层堆叠在彼此之上。唯一的缺点是我们在使用这些模型时不能有多个输出或输入。尽管如此,我们可以创建一个 Sequential 对象并使用 add() 函数向我们的模型添加层。
尝试使用更多 API 使您的模型更加平滑和准确,因为使用 add 函数存在于 Tf 2.0
因为我们可以给 Conv2D 4 时间来使其平滑:
seq_model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu',
input_shape=x_train.shape[1:]))
seq_model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
seq_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
seq_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
在代码中你可以使用 dropout :
seq_model.add(Dropout(rate=0.25))
完整模型:
%tensorflow_version 2.x
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
seq_model = Sequential()
seq_model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu',
input_shape=x_train.shape[1:]))
seq_model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
seq_model.add(MaxPool2D(pool_size=(2, 2)))
seq_model.add(Dropout(rate=0.25))
seq_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
seq_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
seq_model.add(MaxPool2D(pool_size=(2, 2)))
seq_model.add(Dropout(rate=0.25))
seq_model.add(Flatten())
seq_model.add(Dense(256, activation='relu'))
seq_model.add(Dropout(rate=0.5))
seq_model.add(Dense(10, activation='softmax'))
seq_model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
epochsz = 3 # number of epch
batch_sizez = 32 # the batch size ,can be 64 , 128 so other
seq_model.fit(x_train,y_train, batch_size=batch_sizez, epochs=epochsz)
结果:
Train on 60000 samples
Epoch 1/3
60000/60000 [==============================] - 186s 3ms/sample - loss: 0.1379 - accuracy: 0.9588
Epoch 2/3
60000/60000 [==============================] - 187s 3ms/sample - loss: 0.0677 - accuracy: 0.9804
Epoch 3/3
60000/60000 [==============================] - 187s 3ms/sample - loss: 0.0540 - accuracy: 0.9840
我尝试使用 Keras 训练一个对手写数字进行分类的 CNN 模型,但我在训练中的准确率很低(低于 10%)并且出现了很大的错误。 我尝试了一个没有结论的简单神经网络,但效果不佳。
这是我的代码。
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
#Explore data
print(y_train[12])
print(np.shape(x_train))
print(np.shape(x_test))
#we have 60000 imae for the training and 10000 for testing
# Scaling data
x_train = x_train/255
y_train = y_train/255
#reshape the data
x_train = x_train.reshape(60000,28,28,1)
x_test = x_test.reshape(10000,28,28,1)
y_train = y_train.reshape(60000,1)
y_test = y_test.reshape(10000,1)
#Create a model
model = keras.Sequential([
keras.layers.Conv2D(64,(3,3),(1,1),padding = "same",input_shape=(28,28,1)),
keras.layers.MaxPooling2D(pool_size = (2,2),padding = "valid"),
keras.layers.Conv2D(32,(3,3),(1,1),padding = "same"),
keras.layers.MaxPooling2D(pool_size = (2,2),padding = "valid"),
keras.layers.Flatten(),
keras.layers.Dense(128,activation = "relu"),
keras.layers.Dense(10,activation = "softmax")])
model.compile(optimizer = "adam",
loss = "sparse_categorical_crossentropy",
metrics = ['accuracy'])
model.fit(x_train,y_train,epochs=10)
test_loss,test_acc = model.evaluate(x_test,y_test)
print("\ntest accuracy:",test_acc)
有人可以建议我如何改进我的模型吗?
您的问题在这里:
x_train = x_train/255
y_train = y_train/255 # makes no sense
您应该重新缩放 x_test
,而不是 y_train
。
x_train = x_train/255
x_test = x_test/255
这可能只是您的错字。更改这些行,您将获得 95% 以上的准确率。
您的模型存在缩放问题,请尝试使用 tf 2.0
x_train /= 255
x_test /= 255
你不需要缩放所有测试数据 正如你所做的那样:
x_train = x_train/255
y_train = y_train/255
之后,我们可以将标签转换为单热编码
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
这有助于:
loss='categorical_crossentropy',
Sequential API 允许我们将图层堆叠在彼此之上。唯一的缺点是我们在使用这些模型时不能有多个输出或输入。尽管如此,我们可以创建一个 Sequential 对象并使用 add() 函数向我们的模型添加层。 尝试使用更多 API 使您的模型更加平滑和准确,因为使用 add 函数存在于 Tf 2.0 因为我们可以给 Conv2D 4 时间来使其平滑:
seq_model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu',
input_shape=x_train.shape[1:]))
seq_model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
seq_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
seq_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
在代码中你可以使用 dropout :
seq_model.add(Dropout(rate=0.25))
完整模型:
%tensorflow_version 2.x
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
seq_model = Sequential()
seq_model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu',
input_shape=x_train.shape[1:]))
seq_model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
seq_model.add(MaxPool2D(pool_size=(2, 2)))
seq_model.add(Dropout(rate=0.25))
seq_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
seq_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
seq_model.add(MaxPool2D(pool_size=(2, 2)))
seq_model.add(Dropout(rate=0.25))
seq_model.add(Flatten())
seq_model.add(Dense(256, activation='relu'))
seq_model.add(Dropout(rate=0.5))
seq_model.add(Dense(10, activation='softmax'))
seq_model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
epochsz = 3 # number of epch
batch_sizez = 32 # the batch size ,can be 64 , 128 so other
seq_model.fit(x_train,y_train, batch_size=batch_sizez, epochs=epochsz)
结果:
Train on 60000 samples
Epoch 1/3
60000/60000 [==============================] - 186s 3ms/sample - loss: 0.1379 - accuracy: 0.9588
Epoch 2/3
60000/60000 [==============================] - 187s 3ms/sample - loss: 0.0677 - accuracy: 0.9804
Epoch 3/3
60000/60000 [==============================] - 187s 3ms/sample - loss: 0.0540 - accuracy: 0.9840