从 R 中 Google CloudML 的训练中获取模型
Get model from training on Google CloudML in R
求助!我使用 cloudml_train("model.R", master_type = "complex_model_m_p100")
在 CloudML 上训练了一个模型。现在需要经过训练的模型。我没有在我的模型中指定任何适合保存的东西......假设它会 return 最后一个时期之后的权重 job_collect()
.
job_collect()
执行 return 训练输入 jobDir: gs://project/r-cloudml/staging
有什么方法可以得到模型的权重吗?或者使用可与 google 配合使用的回调来设置脚本?这是脚本
library(keras)
load("sspr.ndvi.tensor.RData")
load("sspr.highdem.tensor.RData")
load("sspr.lowdem.tensor.RData")
load("yspr.ndvi.tensor.RData")
load("yspr.highdem.tensor.RData")
load("yspr.lowdem.tensor.RData")
#model!
highres.crop.input<-layer_input(shape = c(51,51,1),name = "highres.crop_input")
lowdem.input<-layer_input(shape = c(51,51,1),name = "lowdem.input")
lowdem_output<-lowdem.input %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 14,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_batch_normalization() %>%
layer_average_pooling_2d(pool_size = c(17,17)) %>%
layer_upsampling_2d(size = c(51,51),name = "lowdem_output")
inception_input0<- highres.crop.input %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(2, 2), filter = 16,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input0")
inception_output0<-inception_input0 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 16,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 16,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output0")
cnn_inter_output0<-layer_add(c(inception_input0,inception_output0,lowdem_output)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output0")
added_inception_highres0<-layer_add(c(highres.crop.input,cnn_inter_output0)) %>%
layer_conv_2d(kernel_size = c(1,1),filters = 4,
activation = "relu",padding = "same",name = "added_inception_highres0")
#### 1 ####
inception_input1<- added_inception_highres0 %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(3, 3), filter = 8,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input1")
inception_output1<-inception_input1 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 8,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 8,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output1")
cnn_inter_output1<-layer_add(c(inception_input1,inception_output1)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output1")
added_inception_highres1<-cnn_inter_output1 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 2,
activation = "relu",padding = "same",name = "added_inception_highres1")
#### 2 ####
inception_input2<- added_inception_highres1 %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(3, 3), filter = 8,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input2")
inception_output2<-inception_input2 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 8,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 8,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output2")
cnn_inter_output2<-layer_add(c(inception_input2,inception_output2)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output2")
added_inception_highres2<-cnn_inter_output2 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same",name = "added_inception_highres2")
incept_dual<-keras_model(
inputs = c(highres.crop.input,lowdem.input),
outputs = added_inception_highres2
)
summary(incept_dual)
incept_dual %>% compile(loss = 'mse',
optimizer = 'Nadam',
metric = "mse")
incept_dual %>% fit(
x = list(highres.crop_input = sspr.highdem.tensor, lowdem.input = sspr.lowdem.tensor),
y = list(added_inception_highres2 = sspr.ndvi.tensor),
epochs = 1000,
batch_size = 32,
validation_data=list(list(yspr.highdem.tensor,yspr.lowdem.tensor),yspr.ndvi.tensor),
shuffle = TRUE
)
看来您想使用 R 代码从 gs://project/r-cloudml/staging 加载模型来分析权重。
cloudml R 库具有 gs_copy
函数(https://cran.r-project.org/web/packages/cloudml/cloudml.pdf 的第 6 页),这可能会有所帮助。
您可能需要使用 gcloud auth
授权访问 GCS。然后你应该能够使用 gs_copy(gs://project/r-cloudml/staging, /local/directory)
将保存的模型移动到 R 环境(如 Jupyter 或 RStudio)
从那里您应该能够使用正常的 Keras R 库命令 load/analyze 权重模型。 https://keras.rstudio.com/articles/tutorial_save_and_restore.html
答案是在脚本中定义没有父路径的文件名
checkpoint_path="five_epoch_checkpoint.ckpt"
lastditch_callback <- callback_model_checkpoint(
filepath = checkpoint_path,
save_weights_only = TRUE,
save_best_only = FALSE,
save_freq = 5,
period = 5,
verbose = 0
)
best_path = "best.ckpt"
bestmod_callback <- callback_model_checkpoint(
filepath = best_path,
save_weights_only = TRUE,
save_best_only = TRUE,
mode = "auto",
verbose = 0
)
incept_dual %>% fit(
x = list(highres.crop_input = sspr.highdem.tensor, lowdem.input = sspr.lowdem.tensor),
y = list(prediction = sspr.ndvi.tensor),
epochs = 50,
batch_size = 32,
validation_data=list(list(yspr.highdem.tensor,yspr.lowdem.tensor),yspr.ndvi.tensor),
callbacks = list(lastditch_callback,bestmod_callback),
shuffle = TRUE
)
save_model_hdf5(incept_dual,"incept_dual.h5")
five_epoch_checkpoint.ckpt
、best.ckpt
和 incept_dual.h5
都将出现在模型结果自动保存到的 google 存储桶中。不幸的是,我无法检索模型,但我现在可以在以后的运行中保存检查点和最终模型。
求助!我使用 cloudml_train("model.R", master_type = "complex_model_m_p100")
在 CloudML 上训练了一个模型。现在需要经过训练的模型。我没有在我的模型中指定任何适合保存的东西......假设它会 return 最后一个时期之后的权重 job_collect()
.
job_collect()
执行 return 训练输入 jobDir: gs://project/r-cloudml/staging
有什么方法可以得到模型的权重吗?或者使用可与 google 配合使用的回调来设置脚本?这是脚本
library(keras)
load("sspr.ndvi.tensor.RData")
load("sspr.highdem.tensor.RData")
load("sspr.lowdem.tensor.RData")
load("yspr.ndvi.tensor.RData")
load("yspr.highdem.tensor.RData")
load("yspr.lowdem.tensor.RData")
#model!
highres.crop.input<-layer_input(shape = c(51,51,1),name = "highres.crop_input")
lowdem.input<-layer_input(shape = c(51,51,1),name = "lowdem.input")
lowdem_output<-lowdem.input %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 14,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_batch_normalization() %>%
layer_average_pooling_2d(pool_size = c(17,17)) %>%
layer_upsampling_2d(size = c(51,51),name = "lowdem_output")
inception_input0<- highres.crop.input %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(2, 2), filter = 16,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input0")
inception_output0<-inception_input0 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 16,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 16,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output0")
cnn_inter_output0<-layer_add(c(inception_input0,inception_output0,lowdem_output)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output0")
added_inception_highres0<-layer_add(c(highres.crop.input,cnn_inter_output0)) %>%
layer_conv_2d(kernel_size = c(1,1),filters = 4,
activation = "relu",padding = "same",name = "added_inception_highres0")
#### 1 ####
inception_input1<- added_inception_highres0 %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(3, 3), filter = 8,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input1")
inception_output1<-inception_input1 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 8,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 8,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output1")
cnn_inter_output1<-layer_add(c(inception_input1,inception_output1)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output1")
added_inception_highres1<-cnn_inter_output1 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 2,
activation = "relu",padding = "same",name = "added_inception_highres1")
#### 2 ####
inception_input2<- added_inception_highres1 %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(3, 3), filter = 8,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input2")
inception_output2<-inception_input2 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 8,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 8,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output2")
cnn_inter_output2<-layer_add(c(inception_input2,inception_output2)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output2")
added_inception_highres2<-cnn_inter_output2 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same",name = "added_inception_highres2")
incept_dual<-keras_model(
inputs = c(highres.crop.input,lowdem.input),
outputs = added_inception_highres2
)
summary(incept_dual)
incept_dual %>% compile(loss = 'mse',
optimizer = 'Nadam',
metric = "mse")
incept_dual %>% fit(
x = list(highres.crop_input = sspr.highdem.tensor, lowdem.input = sspr.lowdem.tensor),
y = list(added_inception_highres2 = sspr.ndvi.tensor),
epochs = 1000,
batch_size = 32,
validation_data=list(list(yspr.highdem.tensor,yspr.lowdem.tensor),yspr.ndvi.tensor),
shuffle = TRUE
)
看来您想使用 R 代码从 gs://project/r-cloudml/staging 加载模型来分析权重。
cloudml R 库具有 gs_copy
函数(https://cran.r-project.org/web/packages/cloudml/cloudml.pdf 的第 6 页),这可能会有所帮助。
您可能需要使用 gcloud auth
授权访问 GCS。然后你应该能够使用 gs_copy(gs://project/r-cloudml/staging, /local/directory)
将保存的模型移动到 R 环境(如 Jupyter 或 RStudio)
从那里您应该能够使用正常的 Keras R 库命令 load/analyze 权重模型。 https://keras.rstudio.com/articles/tutorial_save_and_restore.html
答案是在脚本中定义没有父路径的文件名
checkpoint_path="five_epoch_checkpoint.ckpt"
lastditch_callback <- callback_model_checkpoint(
filepath = checkpoint_path,
save_weights_only = TRUE,
save_best_only = FALSE,
save_freq = 5,
period = 5,
verbose = 0
)
best_path = "best.ckpt"
bestmod_callback <- callback_model_checkpoint(
filepath = best_path,
save_weights_only = TRUE,
save_best_only = TRUE,
mode = "auto",
verbose = 0
)
incept_dual %>% fit(
x = list(highres.crop_input = sspr.highdem.tensor, lowdem.input = sspr.lowdem.tensor),
y = list(prediction = sspr.ndvi.tensor),
epochs = 50,
batch_size = 32,
validation_data=list(list(yspr.highdem.tensor,yspr.lowdem.tensor),yspr.ndvi.tensor),
callbacks = list(lastditch_callback,bestmod_callback),
shuffle = TRUE
)
save_model_hdf5(incept_dual,"incept_dual.h5")
five_epoch_checkpoint.ckpt
、best.ckpt
和 incept_dual.h5
都将出现在模型结果自动保存到的 google 存储桶中。不幸的是,我无法检索模型,但我现在可以在以后的运行中保存检查点和最终模型。