import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data/',one_hot=True)
def weight_variable(shape):
initial_value=tf.truncated_normal(shape=shape,stddev=0.1)
return tf.Variable(initial_value)
def bias_variable(shape):
initial_value=tf.constant(0.2,shape=shape)
return tf.Variable(initial_value)
def con2d(x,w):
return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding="SAME")
def pool(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
x=tf.placeholder(tf.float32,[None,784])
reshape_x=tf.reshape(x,[-1,28,28,1])
w1=weight_variable([5,5,1,32])
b1=bias_variable([32])
out_conv1=tf.nn.relu(con2d(reshape_x,w1)+b1)
pool_conv1=pool(out_conv1)
w2=weight_variable([5,5,32,64])
b2=bias_variable([64])
out_conv2=tf.nn.relu(con2d(pool_conv1,w2)+b2)
pool_conv2=pool(out_conv2)
out_reshape=tf.reshape(pool_conv2,[-1,7*7*64])
fc1=weight_variable([7*7*64,1024])
bfc1=bias_variable([1024])
out_fc1=tf.nn.relu(tf.matmul(out_reshape,fc1)+bfc1)
keep_prob_ratio=tf.placeholder('float')
b_fc2=bias_variable([10])
pure_input=(tf.matmul(out_dropout,fc2)+b_fc2)
y=tf.nn.softmax(pure_input)
y_=tf.placeholder(tf.float32,[None,10])
cross_entropy=-tf.reduce_sum(y_*tf.log(y))
train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y,1),tf.argmax(y_,1)),"float"))
initialize=tf.initialize_all_variables()
shape=tf.shape(fc2)
with tf.Session() as sess:
sess.run(initialize)
for i in range(20000):
batch_x, batch_y_ = mnist.train.next_batch(10)
sess.run(train_step,feed_dict={x:batch_x,y_:batch_y_,keep_prob_ratio:0.5})
if(i%100==0):
test_x,test_y=mnist.test.next_batch(100)
result=sess.run(accuracy, feed_dict={x: test_x, y_: test_y, keep_prob_ratio: 1.0})
print ("accuracy:%.4f"%result)
print ("final accuracy:%.4f" %sess.run(accuracy,feed_dict={x:test_x,y_:test_y,keep_prob_ratio:1.0}))
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data/',one_hot=True)
def weight_variable(shape):
initial_value=tf.truncated_normal(shape=shape,stddev=0.1)
return tf.Variable(initial_value)
def bias_variable(shape):
initial_value=tf.constant(0.2,shape=shape)
return tf.Variable(initial_value)
def con2d(x,w):
return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding="SAME")
def pool(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
x=tf.placeholder(tf.float32,[None,784])
reshape_x=tf.reshape(x,[-1,28,28,1])
w1=weight_variable([5,5,1,32])
b1=bias_variable([32])
out_conv1=tf.nn.relu(con2d(reshape_x,w1)+b1)
pool_conv1=pool(out_conv1)
w2=weight_variable([5,5,32,64])
b2=bias_variable([64])
out_conv2=tf.nn.relu(con2d(pool_conv1,w2)+b2)
pool_conv2=pool(out_conv2)
out_reshape=tf.reshape(pool_conv2,[-1,7*7*64])
fc1=weight_variable([7*7*64,1024])
bfc1=bias_variable([1024])
out_fc1=tf.nn.relu(tf.matmul(out_reshape,fc1)+bfc1)
keep_prob_ratio=tf.placeholder('float')
out_dropout=tf.nn.dropout(out_fc1,keep_prob_ratio)
fc2=weight_variable([1024,10])b_fc2=bias_variable([10])
pure_input=(tf.matmul(out_dropout,fc2)+b_fc2)
y=tf.nn.softmax(pure_input)
y_=tf.placeholder(tf.float32,[None,10])
cross_entropy=-tf.reduce_sum(y_*tf.log(y))
train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y,1),tf.argmax(y_,1)),"float"))
initialize=tf.initialize_all_variables()
shape=tf.shape(fc2)
with tf.Session() as sess:
sess.run(initialize)
for i in range(20000):
batch_x, batch_y_ = mnist.train.next_batch(10)
sess.run(train_step,feed_dict={x:batch_x,y_:batch_y_,keep_prob_ratio:0.5})
if(i%100==0):
test_x,test_y=mnist.test.next_batch(100)
result=sess.run(accuracy, feed_dict={x: test_x, y_: test_y, keep_prob_ratio: 1.0})
print ("accuracy:%.4f"%result)
print ("final accuracy:%.4f" %sess.run(accuracy,feed_dict={x:test_x,y_:test_y,keep_prob_ratio:1.0}))
本文介绍了一种使用TensorFlow实现的手写数字识别系统。该系统通过构建卷积神经网络(CNN)对MNIST数据集进行训练,并采用梯度下降优化器最小化交叉熵损失函数。文中展示了如何定义权重、偏置变量,执行卷积和池化操作,以及如何通过全连接层进行分类。
3130

被折叠的 条评论
为什么被折叠?



