由于 RAM 使用过多,我的 google colab 会话崩溃了
My google colab session is crashing due to excessive RAM usage
我正在使用 2403 张 1280x720 像素的图像训练 CNN。这是我的代码 运行:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Activation,Dense,Flatten,Dropout
model = keras.Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(1280,720,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
'/gdrive/MyDrive/shot/training',
target_size=(1280, 720),
batch_size=640,
class_mode='categorical')
history = model.fit(
train_generator,
steps_per_epoch= 2403//640,
epochs= 15,
)
会话在第一个纪元之前崩溃。我可以做些什么来减少 RAM 的使用?我还有哪些其他选择?
似乎您的批次大小很大,占用了所有 RAM。所以我建议先尝试使用较小的批处理大小,例如 32 或 64。而且您的图像大小太大,您可以先缩小它以进行实验。
train_generator = train_datagen.flow_from_directory(
'/gdrive/MyDrive/shot/training',
target_size=(256, 256), # -> Change the image size
batch_size=32, # -> Reduce batch size
class_mode='categorical'
)
我正在使用 2403 张 1280x720 像素的图像训练 CNN。这是我的代码 运行:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Activation,Dense,Flatten,Dropout
model = keras.Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(1280,720,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
'/gdrive/MyDrive/shot/training',
target_size=(1280, 720),
batch_size=640,
class_mode='categorical')
history = model.fit(
train_generator,
steps_per_epoch= 2403//640,
epochs= 15,
)
会话在第一个纪元之前崩溃。我可以做些什么来减少 RAM 的使用?我还有哪些其他选择?
似乎您的批次大小很大,占用了所有 RAM。所以我建议先尝试使用较小的批处理大小,例如 32 或 64。而且您的图像大小太大,您可以先缩小它以进行实验。
train_generator = train_datagen.flow_from_directory(
'/gdrive/MyDrive/shot/training',
target_size=(256, 256), # -> Change the image size
batch_size=32, # -> Reduce batch size
class_mode='categorical'
)