Tensorflow入门5-Mnist手写数字识别(CNN)

MNIST手写数字识别
本文详细介绍使用卷积神经网络对MNIST手写数字数据集进行分类预测的过程,包括数据集下载、预处理、网络搭建及训练,最终实现高达96.4%的识别准确率。

一、mnist数据集下载
链接:https://pan.baidu.com/s/1cqLjY790dzJXr1My6Yt4VQ
提取码:nobn
将数据集放在项目文件夹下,每次运行无需下载,节省时间
#input_data读取的是压缩包,所以minist四个文件夹无需解压
使用下面语句调用mnist数据集

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('mnist', one_hot=True)

二、使用卷积神经网络对MNIST进行分类预测
使用卷积>>池化>>卷积>>池化>>两层神经网络>>softmax多分类器

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('mnist', one_hot=True)

#计算准确率
def compute_accuracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
    correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
    return result
    
#权重初始化
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)
    
#偏置系数初始化
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
    
#定义卷积层
def conv2d(x, W):
    # stride [1, x_movement, y_movement, 1]
    # Must have strides[0] = strides[3] = 1
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
    #其中strides(batch,heihgt,weight,channels)batch和channnels为1指不对其进行操作。height和weight为1是每步步长都为1.格式为[1,x_movement,y_movement.1]
    
#定义池化层
def max_pool_2x2(x):
    # stride [1, x_movement, y_movement, 1]
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

# 使用占位符定义输入数据
xs = tf.placeholder(tf.float32, [None, 784]) # 28x28
ys = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
x_image=tf.reshape(xs,[-1,28,28,1])#-1表示自动计算维度,28*28是高和宽,1是灰度
#print(x_image.shape)#[n_sanples,28,28,1]

## conv1 layer ##
W_conv1=weight_variable([5,5,1,32])#patch5*5,1个通道,32个过滤器
b_conv1=bias_variable([32])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)#output size(28*28*32)
h_pool1=max_pool_2x2(h_conv1)#output size(14*14*32)

## conv2 layer ##
W_conv2=weight_variable([5,5,32,64])
b_conv2=bias_variable([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2=max_pool_2x2(h_conv2)

## func1 layer 全连接层##
W_fc1=weight_variable([7*7*64,1024])    #1024个神经元个数
b_fc1=bias_variable([1024])
#[n_samples,7,7,64]->>[n_samples,7*7*64]
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)   #使用dropout防止过拟合,系数keep_prob设置为0.5

## func2 layer ##
W_fc2=weight_variable([1024,10])
b_fc2=bias_variable([10])
#输出perdiction,使用softmax多分类器
prediction=tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)

# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
                                              reduction_indices=[1]))       # loss损失函数
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)   #Adam优化器,学习率为0.0004

init = tf.global_variables_initializer()
with tf.Session() as sess:    
    sess.run(init)
    for i in range(1000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
        if i % 50 == 0:
            print(compute_accuracy(mnist.test.images[:1000], mnist.test.labels[:1000]))

最终运行的结果,最终准确率在0.964
0.105
0.75
0.846
0.889
0.898
0.91
0.922
0.926
0.937
0.944
0.947
0.949
0.946
0.957
0.955
0.958
0.963
0.961
0.964
0.964

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值