卷积神经网络初探

部署运行你感兴趣的模型镜像

前言

深度学习在计算机视觉上有很大突破,经典的数据集也有很多,比如:ImageNet(李飞飞带领团队收集)、CIFARCOCO(微软赞助)、Open Image(谷歌开源的),而卷积的概念不是深度学习出现才有的,而是计算机图形学中早就有的概念,我认为它能够提取图像有用的信息。其实这里面的东西还是比较复杂的,这一篇算是稍微开个头吧。
MNIST作为入门案例,我们继续以它作为实践例子来实践深度学习的模型。

卷积计算

这里写图片描述
比如我们可以用这种3*3的垂直过滤器和水平过滤器来提取图片的边缘检测,通过最后的加权和来的到最后结果。
池化层的计算也差不多,不同的是,使用最大值操作的池化层称为最大池化层,使用平均值的称为平均池化层,但目前实践中我们一般用最大池化层。

LeNet-5 模型

这是Yann LeCun教授1998年在论文中提出,感兴趣的可以看这里
这里写图片描述
这是论文中模型图的概要,首先输入图片的亮度矩阵,经过第一个卷积层过滤器的尺寸5*5,深度为6,不使用全0填充,计算后的结果就是28*28,深度依旧为6.然后经过长宽为2,步长为2的过滤器,所以长宽变为原来一半,池化层不改变深度。后面还有一个卷积层和一个池化层,最后又经过两层120和84节点数的全连接层,最后输出10个分类的概率。

代码实现

mnist_infernece.py代码

import tensorflow as tf
INPUT_NODE=784#也就是图片28*28*1,由于只有一个通道
OUTPUT_NODE=10#输出分类
IMAGE_SIZE=28#输入图片像素大小
NUM_CHANNELS=1#通道数
NUM_LABELS=10#10个分类标签

CONV1_DEEP=32#第一个卷积层深度
CONV1_SIZE=5#卷积层的长宽

CONV2_DEEP=64#第二个卷积层深度
CONV2_SIZE=5#卷积层长宽

FC_SIZE=512#全连接层节点数

def get_weight_variable(shape,regularizer):#是否正则化
    weights=tf.get_variable("weights",shape,initializer=tf.truncated_normal_initializer(stddev=0.1))
    if regularizer !=None:
        tf.add_to_collection('losses',regularizer(weights))
    return weights

def inference(input_tensor,train,regularizer):#定义LeNet-5模型
    with tf.variable_scope('layer1-conv1'):#命名空间定义第一个卷积层
        conv1_weights=tf.get_variable("weight",[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],initializer=tf.truncated_normal_initializer(stddev=0.1))#卷积层的参数定义成四维矩阵,由卷积层长宽,图片的通道数,由于手写图片是灰白,所以通道数始终是1,以及第一个卷积层深度构成。
        conv1_biases=tf.get_variable("bias",[CONV1_DEEP],initializer=tf.constant_initializer(0.0))#偏差b
        conv1=tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='SAME')#padding=‘SAME’使用0填充,可以保证结果长宽不变
        relu1=tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))#导入激活函数Relu
    with tf.variable_scope('layer2-pool1'):#第一个池化层
        pool1=tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')#使用最大池化层

    with tf.variable_scope('layer3-conv2'):#命名空间定义第二个卷积层
        conv2_weights = tf.get_variable(
            "weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases=tf.get_variable("bias",[CONV2_DEEP],initializer=tf.constant_initializer(0.0))
        conv2=tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='SAME')
        relu2=tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))

    with tf.name_scope('layer4-pool2'):
        pool2=tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

    pool_shape=pool2.get_shape().as_list()
    nodes=pool_shape[1]*pool_shape[2]*pool_shape[3]
    reshaped=tf.reshape(pool2,[pool_shape[0],nodes])
    with tf.variable_scope('layer5-fc1'):
        fc1_weights=tf.get_variable("weight",[nodes,FC_SIZE],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer!=None:
            tf.add_to_collection('losses',regularizer(fc1_weights))
        fc1_biases=tf.get_variable("bias",[FC_SIZE],initializer=tf.constant_initializer(0.1))
        fc1=tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)
        if train:
            fc1=tf.nn.dropout(fc1,0.5)#dropout可以随机将节点置为0,可以避免过拟合

    with tf.variable_scope('layer6-fc2'):
        fc2_weights=tf.get_variable("weight",[FC_SIZE,NUM_LABELS],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer!=None:
            tf.add_to_collection('losses',regularizer(fc2_weights))
        fc2_biases=tf.get_variable("bias",[NUM_LABELS],initializer=tf.constant_initializer(0.1))
        logit=tf.matmul(fc1,fc2_weights)+fc2_biases
    return logit

mnist_train.py代码实现

import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import numpy as np
import pandas as pd
BATCH_SIZE=100#batch大小
LEARNING_RATE_BASE=0.01#学习率初始值
LEARNING_RATE_DECAY=0.99
REGULARAZTION_RATE=0.0001
TRAINING_STEPS=10000#迭代次数
MOVING_AVERAGE_DECAY=0.99
MODEL_SAVE_PATH="C:\\Users\\user\\Desktop"#永久化存储模型路径
MODEL_NAME="model.ckpt"
def train(mnist):
    x=tf.placeholder(tf.float32,[BATCH_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS],name='x-input')#定义输入变量结构
    y_=tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],name='y-input')

    regularizer=tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)#采用L2正则化防止过拟合
    y=mnist_inference.inference(x,False,regularizer)
    global_step=tf.Variable(0,trainable=False)
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)#定义滑动平滑是预测更健壮
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_, 1))#定义交叉熵损失函数
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
    mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)#反向传播利用梯度下降最小化损失函数
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')


    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs,ys=mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs=np.reshape(xs,(BATCH_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})

            if i % 1000 == 0:
                print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def main(argv=None):
    mnist=input_data.read_data_sets("C:\\Users\\user\\Desktop\\",one_hot=True)
    train(data)

if __name__ == '__main__':
    tf.app.run()

训练结果:
这里写图片描述

mnist.eval.py代码实现

import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import mnist_train
import numpy as np
EVAL_INTERVAL_SECS=10
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x=tf.placeholder(tf.float32,[10000,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS],name='x-input')
        y_=tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],name='y-input')
        xs,ys=mnist.train.next_batch(BATCH_SIZE)
        reshaped_xs =  np.reshape(xs.values,(10000,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS))
        validate_feed={x:reshaped_xs,y_:ys}
        y=mnist_inference.inference(x,False,None)
        correct_prediction=tf.equal(tf.arg_max(y,1),tf.arg_max(y_,1))
        accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
        variable_averages=tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore=variable_averages.variables_to_restore()
        saver=tf.train.Saver(variables_to_restore)
        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                    print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
                else:
                    print('No checkpoint file found')
                    return
            time.sleep(EVAL_INTERVAL_SECS)

def main(argv=None):
    mnist=input_data.read_data_sets("C:\\Users\\user\\Desktop\\",one_hot=True)
    evaluate(data)
if __name__ == '__main__':
    tf.app.run()

这里写图片描述

您可能感兴趣的与本文相关的镜像

TensorFlow-v2.15

TensorFlow-v2.15

TensorFlow

TensorFlow 是由Google Brain 团队开发的开源机器学习框架,广泛应用于深度学习研究和生产环境。 它提供了一个灵活的平台,用于构建和训练各种机器学习模型

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值