每个时期覆盖情节

Overwrite plot every epoch

我写了一个具有神经网络近似多项式的小脚本,并绘制了每个时期的结果,但问题是我希望每次迭代新图都会覆盖以前的情节,所以我可以看到它是如何随着训练而变化的。

我在网上搜索了一下,发现我需要使用 ion() 或 isinteractive() 或 clear(),但我都试过了,还是不行。

编辑: 为了澄清起见,我使用的是 Jupyter notebook,所以我希望它能在这个平台上工作。

这是我的代码:

import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from numpy import asarray
from matplotlib import pyplot
from tensorflow.keras.layers import Conv1D
import tensorflow

class myCallback(tensorflow.keras.callbacks.Callback):
    def on_train_begin(self, logs={}):    
        pyplot.ion()

        
        
    def on_epoch_end(self, epoch, logs=None):
        yhat = model.predict(x)
        # inverse transforms
        x_plot = scale_x.inverse_transform(x)
        y_plot = scale_y.inverse_transform(y)
        yhat_plot = scale_y.inverse_transform(yhat)
        # report model error
        print('MSE: %.3f' % mean_squared_error(y_plot, yhat_plot))
        # plot x vs y
        plt = pyplot.scatter(x_plot,y_plot, label='Actual')
        # plot x vs yhat
        pyplot.scatter(x_plot,yhat_plot, label='Predicted')
        pyplot.title('Input (x) versus Output (y)')
        pyplot.xlabel('Input Variable (x)')
        pyplot.ylabel('Output Variable (y)')
        pyplot.legend()
        pyplot.show()
        
 


# define the dataset
x = asarray([i for i in range(-50,51)])
y = asarray([i**3 for i in x])
print(x.min(), x.max(), y.min(), y.max())
# reshape arrays into into rows and cols
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
# separately scale the input and output variables
scale_x = MinMaxScaler()
x = scale_x.fit_transform(x)
scale_y = MinMaxScaler()
y = scale_y.fit_transform(y)
print(x.min(), x.max(), y.min(), y.max())
# design the neural network model
model = Sequential()
model.add(Dense(10, input_dim=1, activation='relu', kernel_initializer='he_uniform'))
#Conv1D(32, 5, activation='relu')
model.add(Dense(10, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(1))
opt = tensorflow.keras.optimizers.Adam(learning_rate=0.01)
# define the loss function and optimization algorithm
model.compile(loss='mse', optimizer=opt)
# ft the model on the training dataset
model.fit(x, y, epochs=10, batch_size=10, verbose=0, callbacks=[myCallback()])
# make predictions for the input data

非常感谢您的帮助!

你在每个 epoch 之后都会得到一个新的情节,但变化并不真正可见,因为你的模型太弱了。这是一个有显着差异的例子:

import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from numpy import asarray
from matplotlib import pyplot
import tensorflow

from IPython.display import clear_output

class myCallback(tensorflow.keras.callbacks.Callback):

    def on_epoch_end(self, epoch, logs=None):
        clear_output(wait=True)
        yhat = model.predict(x)
        # inverse transforms
        x_plot = scale_x.inverse_transform(x)
        y_plot = scale_y.inverse_transform(y)
        yhat_plot = scale_y.inverse_transform(yhat)
        # report model error
        print('MSE: %.3f' % mean_squared_error(y_plot, yhat_plot))
        # plot x vs y
        plt = pyplot.scatter(x_plot,y_plot, label='Actual')
        # plot x vs yhat
        pyplot.scatter(x_plot,yhat_plot, label='Predicted')
        pyplot.title('Input (x) versus Output (y)')
        pyplot.xlabel('Input Variable (x)')
        pyplot.ylabel('Output Variable (y)')
        pyplot.legend()
        pyplot.show()
        
# define the dataset
x = asarray([i for i in range(-50,51)])
y = asarray([i**3 for i in x])
print(x.shape)
print(x.min(), x.max(), y.min(), y.max())
# reshape arrays into into rows and cols
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
# separately scale the input and output variables
scale_x = MinMaxScaler()
x = scale_x.fit_transform(x)
scale_y = MinMaxScaler()
y = scale_y.fit_transform(y)
print(x.min(), x.max(), y.min(), y.max())
# design the neural network model
model = Sequential()
model.add(Dense(64, input_dim=1, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1))
opt = tensorflow.keras.optimizers.Adam(learning_rate=0.01)
# define the loss function and optimization algorithm
model.compile(loss='mse', optimizer=opt)
# ft the model on the training dataset
model.fit(x, y, epochs=50, batch_size=10, verbose=0, callbacks=[myCallback()])
# make predictions for the input data

这是最后一个纪元的情节: