Tensorflow - 训练后检索 weights/biases 训练后的前馈神经网络
Tensorflow - Retrieving weights/biases of the trained feedforward neural network after training
我目前正在尝试创建一个简单的 Web 应用程序,用于使用 Flask 进行交互式神经网络训练。我正在努力解决的问题是在训练前馈神经网络后检索隐藏层的权重——我的目标是为 Tensorflow's Playground.
创建一个真正的后端
考虑以下权重初始化:
# Weight initializations
tW1 = init_weights(shape=(n_features, hidden_nodes))
tW2 = init_weights(shape=(hidden_nodes, output_nodes))
如何在 Tensorflow 中完成训练后检索 tW1
和 tW2
的计算权重?
这是代码示例:
def retrieve_data():
"""Retrieves the data - to be expanded for custom database access + S3 retrieval + URL"""
result = pd.read_csv('snp_data.csv', parse_dates=['Date'], index_col=['Date'])
return result
def get_columns(data, columns):
features = data.ix[:, columns]
return features
def preprocess(data):
"""Data preprocessing"""
result = (data - data.mean()) / data.std(ddof=0)
result = result.fillna(0)
return result
def init_weights(shape):
""" Weights initialization """
weights = tf.random_normal(shape=shape, stddev=0.1)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""Forward propagation"""
h = tf.nn.relu(tf.matmul(X, w_1))
y_hat = tf.matmul(h, w_2)
return y_hat
# @app.route('/train')
def train():
data = retrieve_data()
train_x = get_columns(data, columns=['Open', 'Close'])
train_x = preprocess(data=train_x).as_matrix().astype(np.float32)
train_x = train_x[:(len(train_x) - (len(train_x) % 32))]
train_y = get_columns(data, columns=['Adj Close']).as_matrix().astype(np.float32)
train_y = train_y[:(len(train_y) - (len(train_y) % 32))]
# Number of input nodes
n_features = train_x.shape[1]
# Number of output nodes
output_nodes = train_y.shape[1]
# Number of hidden nodes
hidden_nodes = 20
# TF Placeholders for the inputs and outputs
tx = tf.placeholder(tf.float32, shape=(None, n_features))
ty = tf.placeholder(tf.float32, shape=(None, output_nodes))
# Weight initializations
tW1 = init_weights(shape=(n_features, hidden_nodes))
tW2 = init_weights(shape=(hidden_nodes, output_nodes))
# Forward propagation
y_hat = forwardprop(tx, tW1, tW2)
# Backward Propagation
tMSE = tf.reduce_mean(tf.square(y_hat - ty))
learning_rate = 0.001
tOptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
tOptimize = tOptimizer.minimize(tMSE)
batch_size = 32
n_epochs = 8
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i_e in range(n_epochs):
for i in range(0, train_x.shape[0], batch_size):
batch_X = train_x[i:i + batch_size, ...]
batch_y = train_y[i:i + batch_size]
_, loss = sess.run([tOptimize, tMSE], feed_dict={tx: batch_X, ty: batch_y})
print(i, loss)
return 'Flask Dockerized'
在 for
循环完成后,这应该与 final_tW1, final_tW2 = sess.run([tW1, tW2])
一样简单。您不需要提供任何内容,因为变量会保留自己的值,不依赖于占位符。
我目前正在尝试创建一个简单的 Web 应用程序,用于使用 Flask 进行交互式神经网络训练。我正在努力解决的问题是在训练前馈神经网络后检索隐藏层的权重——我的目标是为 Tensorflow's Playground.
创建一个真正的后端考虑以下权重初始化:
# Weight initializations
tW1 = init_weights(shape=(n_features, hidden_nodes))
tW2 = init_weights(shape=(hidden_nodes, output_nodes))
如何在 Tensorflow 中完成训练后检索 tW1
和 tW2
的计算权重?
这是代码示例:
def retrieve_data():
"""Retrieves the data - to be expanded for custom database access + S3 retrieval + URL"""
result = pd.read_csv('snp_data.csv', parse_dates=['Date'], index_col=['Date'])
return result
def get_columns(data, columns):
features = data.ix[:, columns]
return features
def preprocess(data):
"""Data preprocessing"""
result = (data - data.mean()) / data.std(ddof=0)
result = result.fillna(0)
return result
def init_weights(shape):
""" Weights initialization """
weights = tf.random_normal(shape=shape, stddev=0.1)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""Forward propagation"""
h = tf.nn.relu(tf.matmul(X, w_1))
y_hat = tf.matmul(h, w_2)
return y_hat
# @app.route('/train')
def train():
data = retrieve_data()
train_x = get_columns(data, columns=['Open', 'Close'])
train_x = preprocess(data=train_x).as_matrix().astype(np.float32)
train_x = train_x[:(len(train_x) - (len(train_x) % 32))]
train_y = get_columns(data, columns=['Adj Close']).as_matrix().astype(np.float32)
train_y = train_y[:(len(train_y) - (len(train_y) % 32))]
# Number of input nodes
n_features = train_x.shape[1]
# Number of output nodes
output_nodes = train_y.shape[1]
# Number of hidden nodes
hidden_nodes = 20
# TF Placeholders for the inputs and outputs
tx = tf.placeholder(tf.float32, shape=(None, n_features))
ty = tf.placeholder(tf.float32, shape=(None, output_nodes))
# Weight initializations
tW1 = init_weights(shape=(n_features, hidden_nodes))
tW2 = init_weights(shape=(hidden_nodes, output_nodes))
# Forward propagation
y_hat = forwardprop(tx, tW1, tW2)
# Backward Propagation
tMSE = tf.reduce_mean(tf.square(y_hat - ty))
learning_rate = 0.001
tOptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
tOptimize = tOptimizer.minimize(tMSE)
batch_size = 32
n_epochs = 8
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i_e in range(n_epochs):
for i in range(0, train_x.shape[0], batch_size):
batch_X = train_x[i:i + batch_size, ...]
batch_y = train_y[i:i + batch_size]
_, loss = sess.run([tOptimize, tMSE], feed_dict={tx: batch_X, ty: batch_y})
print(i, loss)
return 'Flask Dockerized'
在 for
循环完成后,这应该与 final_tW1, final_tW2 = sess.run([tW1, tW2])
一样简单。您不需要提供任何内容,因为变量会保留自己的值,不依赖于占位符。