TensorBoard-01-网络结构
官方的例子,for me, 一个小白,还是有点复杂,于是,我只好把它拆分一下,自我学习,大佬们就直接跳过把
本篇不进行训练,只搭建网络结构。并在 tensorboard 中展示
本文打算搭建一个2个隐层的神经网络结构
import tensorflow as tf
# 函数准备
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
with tf.name_scope(layer_name):
with tf.name_scope('weight'):
w = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope('bias'):
b = tf.Variable(tf.truncated_normal([output_dim], stddev=0.1))
with tf.name_scope('wx_plus_b'):
out = tf.add(tf.matmul(input_tensor,w), b)
with tf.name_scope('activation'):
out = act(out)
return out
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x_input')
y = tf.placeholder(tf.float32, [None, 10], name='y_input')
# 搭建网络
# first layer
hidden_1 = nn_layer(input_tensor=x, input_dim=784, output_dim=128, layer_name='layer_01')
# Dropper layer
with tf.name_scope('dropout'):
dropped = tf.nn.dropout(hidden_1, keep_prob=1.0)
# second layer => output
pre = nn_layer(input_tensor=dropped, input_dim=128, output_dim=10, layer_name='layer_02')
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.square(pre - y), name='my_loss')
with tf.name_scope('train'):
optm = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
sess = tf.Session()
summary_writer = tf.summary.FileWriter(logdir='logs/', graph=sess.graph)
sess.run(tf.global_variables_initializer())
输出的结果如图所示:
Reference