tensorflow minist 简单神经网络实现
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#载入数据集
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
#每个批次的大小
batch_size = 10
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
#定义两个placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#创建一个简单的神经网络
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化变量
init = tf.global_variables_initializer()
#结果存放在一个布尔型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(init)
for epoch in range(21):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
代码采用两层神经网络 ,代价函数采用二次平方误差,优化器采用梯度下降法
运行结果:
Iter 0,Testing Accuracy 0.9047
Iter 1,Testing Accuracy 0.9124
Iter 2,Testing Accuracy 0.9171
Iter 3,Testing Accuracy 0.9199
Iter 4,Testing Accuracy 0.922
Iter 5,Testing Accuracy 0.921
Iter 6,Testing Accuracy 0.9229
Iter 7,Testing Accuracy 0.9233
Iter 8,Testing Accuracy 0.9233
Iter 9,Testing Accuracy 0.9246
Iter 10,Testing Accuracy 0.9247
Iter 11,Testing Accuracy 0.9257
Iter 12,Testing Accuracy 0.927
Iter 13,Testing Accuracy 0.9267
Iter 14,Testing Accuracy 0.9281
Iter 15,Testing Accuracy 0.9278
Iter 16,Testing Accuracy 0.9285
Iter 17,Testing Accuracy 0.9289
Iter 18,Testing Accuracy 0.9283
Iter 19,Testing Accuracy 0.9284
Iter 20,Testing Accuracy 0.9294
改变代价函数使用交叉熵代价函数,收敛速度加快
#二次代价函数
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
运行结果如下:
Iter 0,Testing Accuracy 0.8235
Iter 1,Testing Accuracy 0.8911
Iter 2,Testing Accuracy 0.9021
Iter 3,Testing Accuracy 0.9057
Iter 4,Testing Accuracy 0.9084
Iter 5,Testing Accuracy 0.911
Iter 6,Testing Accuracy 0.9121
Iter 7,Testing Accuracy 0.9139
Iter 8,Testing Accuracy 0.9153
Iter 9,Testing Accuracy 0.9149
Iter 10,Testing Accuracy 0.9179
Iter 11,Testing Accuracy 0.9177
Iter 12,Testing Accuracy 0.918
Iter 13,Testing Accuracy 0.919
Iter 14,Testing Accuracy 0.9206
Iter 15,Testing Accuracy 0.92
Iter 16,Testing Accuracy 0.9202
Iter 17,Testing Accuracy 0.9202
Iter 18,Testing Accuracy 0.9202
Iter 19,Testing Accuracy 0.9209
Iter 20,Testing Accuracy 0.9209
改变优化器
#二次代价函数
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#使用梯度下降法
# train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
train_step = tf.train.AdamOptimizer(1e-2).minimize(loss)
运行结果:
Iter 0,Testing Accuracy 0.9222
Iter 1,Testing Accuracy 0.9256
Iter 2,Testing Accuracy 0.93
Iter 3,Testing Accuracy 0.931
Iter 4,Testing Accuracy 0.9274
Iter 5,Testing Accuracy 0.9315
Iter 6,Testing Accuracy 0.9299
Iter 7,Testing Accuracy 0.9309
Iter 8,Testing Accuracy 0.9281
Iter 9,Testing Accuracy 0.9296
Iter 10,Testing Accuracy 0.932
Iter 11,Testing Accuracy 0.9289
Iter 12,Testing Accuracy 0.9325
Iter 13,Testing Accuracy 0.9312
Iter 14,Testing Accuracy 0.9319
Iter 15,Testing Accuracy 0.9298
Iter 16,Testing Accuracy 0.9311
Iter 17,Testing Accuracy 0.9312
Iter 18,Testing Accuracy 0.9312
Iter 19,Testing Accuracy 0.931
Iter 20,Testing Accuracy 0.9292