在PyCharm中写下如下代码,当做练习
import tensorflow as tf
def add_layer(inputs, in_size, out_size, activation_function=None): # 添加神经层
with tf.name_scope('layer'):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W') # 权重,矩阵大写W,in_size,out_size代表行列矩阵
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size])+0.1, name='b') # 因为biases不为0, so +0.1
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.matmul(inputs, Weights)+biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# # 定义两个输入
with tf.name_scope('inputs'):
xs = tf.compat.v1.placeholder(tf.float32, [None, 1], name='x_input')
ys = tf.compat.v1.placeholder(tf.float32, [None, 1], name='y_input')
# # 定义了两个神经层
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu) # 隐藏层 ,10个神经元
prediction = add_layer(l1, 10, 1, activation_function=None) # 输出层
# 预测值与真实值之间的误差
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=[1]))
with tf.name_scope('train'):
train_step = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(loss) # 对误差进行更正,0.1为学习效率,minimize(loss)减少学习误差
#init = tf.compat.v1.global_variables_initializer() # 初始变量
sess = tf.compat.v1.Session()
writer = tf.compat.v1.summary.FileWriter("logs/", sess.graph) # graph表示整个框架
# 最重要的一步
sess.run(tf.compat.v1.global_variables_initializer())
运行之后在PyCharm左边生成logs文件夹
接下来就是如何使用TensorBoard
打开cmd窗口,找到TensorBoard所在的文件夹,复制路径。
接着找到你想要打开可视化文件所在的位置,复制文件路径,输入cmd----》tensorboard --logdir=你的路径。
注意:如果上述http://yxz-PC:6006/不起作用,而你的操作系统是Windows可以试试 localhost:6006。
import tensorflow as tf
import numpy as np
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None): # 添加神经层
layer_name = 'layer%s'% n_layer
with tf.name_scope('layer'):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W') # 权重,矩阵大写W,in_size,out_size代表行列矩阵
tf.compat.v1.summary.histogram(layer_name + '/weights', Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size])+0.1, name='b') # 因为biases不为0, so +0.1
tf.compat.v1.summary.histogram(layer_name + '/biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.matmul(inputs, Weights)+biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.compat.v1.summary.histogram(layer_name + '/outputs', outputs)
return outputs
# 生成一些真实的数据
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# # 定义两个输入
with tf.name_scope('inputs'):
xs = tf.compat.v1.placeholder(tf.float32, [None, 1], name='x_input')
ys = tf.compat.v1.placeholder(tf.float32, [None, 1], name='y_input')
# # 定义了两个神经层
l1 = add_layer(xs, 1, 10, n_layer=1, activation_function=tf.nn.relu) # 隐藏层 ,10个神经元
prediction = add_layer(l1, 10, 1, n_layer=2, activation_function=None) # 输出层
# 预测值与真实值之间的误差
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=[1]))
tf.compat.v1.summary.scalar('loss', loss)
with tf.name_scope('train'):
train_step = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(loss) # 对误差进行更正,0.1为学习效率,minimize(loss)减少学习误差
#init = tf.compat.v1.global_variables_initializer() # 初始变量
sess = tf.compat.v1.Session()
merged = tf.compat.v1.summary.merge_all()
writer = tf.compat.v1.summary.FileWriter("logs/", sess.graph) # graph表示整个框架
# 最重要的一步
sess.run(tf.compat.v1.global_variables_initializer())
for i in range(100):
sess.run(train_step, feed_dict={xs:x_data, ys:y_data})
if i%5 == 0:
result = sess.run(merged, feed_dict={xs:x_data, ys:y_data})
writer.add_summary(result, i)
上述代码是实现的图形更多,可以观察loss的形状。