import tensorflow as tf import numpy as np import os """神经网络Demo,构造一个一元二次方程y=ax^2+b""" """构造一个-1到1之间300个点,并将其转化为300×1的二维数组""" x_data=np.linspace(-1,1,300)[:,np.newaxis] """加入一些噪声点,使它与x_data具有相同的维度,拟合均值为0,方差为0.05的正态分布""" noise=np.random.normal(0,0.05,x_data.shape) """y=ax^2-0.5 +噪声""" y_data=np.square(x_data)-0.5+noise """定义x和y作为占位符作为将要输入神经网络的变量""" xs=tf.placeholder(tf.float32,[None,1]) ys=tf.placeholder(tf.float32,[None,1]) """ :param inputs: 输入数据 in_size:输入维度 out_size:输出维度 activation_function:激活函数 """ def add_layer(inputs,in_size,out_size,activation_function=None): """构造权重:in_size×out_size大小的矩阵""" weigths=tf.Variable(tf.random_normal([in_size,out_size])) """构造偏置:1×out_size矩阵""" biases=tf.Variable(tf.zeros([1,out_size])+0.1) """矩阵相乘""" wx_plus_b=tf.matmul(inputs,weigths)+biases if activation_function is None: outputs=wx_plus_b else: outputs=activation_function(wx_plus_b) return outputs """隐藏层""" h1=add_layer(xs, 1, 20, activation_function=tf.nn.relu) """输出层:有一个神经元""" prediction=add_layer(h1, 20, 1, activation_function=None) """定义损失函数:计算输出层的预测值和真实值间的误差,对二者差的平方求和再取平均,得到损失函数""" loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1])) """运用梯度下降法,以0.1的学习速率最小化损失""" train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss) """初始化所有变量""" init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) """训练100次,每隔50次输出训练的损失值""" for i in range(100): sess.run(train_step, feed_dict={xs: x_data, ys: y_data}) if i % 10 == 0: print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))