一个基于TensorFlow 1.4版本可运行的训练mnist的RNN模型·学习笔记

本文详细介绍了一种使用TensorFlow框架实现循环神经网络(RNN)的方法,包括数据预处理、模型构建、训练过程及评估结果。以Fashion-MNIST数据集为例,展示了如何通过RNN进行图像分类任务。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import numpy as np
import tensorflow as tf
import input_data
import matplotlib.pyplot as plt

def _RNN(_X, _W, _b, _nsteps, _name, diminput=28, dimhidden=128):
    # 1. Permute input from [batchsize, nsteps, diminput]
    #   => [nsteps, batchsize, diminput]
    _X = tf.transpose(_X, [1, 0, 2])
    # 2. Reshape input to [nsteps*batchsize, diminput]
    _X = tf.reshape(_X, [-1, diminput])
    # 3. Input layer => Hidden layer
    _H = tf.matmul(_X, _W['hidden']) + _b['hidden']
    # 4. Splite data to 'nsteps' chunks. An i-th chunck indicates i-th batch data
    _Hsplit = tf.split(axis=0, num_or_size_splits=_nsteps, value=_H)
    # 5. Get LSTM's final output (_LSTM_O) and state (_LSTM_S)
    #    Both _LSTM_O and _LSTM_S consist of 'batchsize' elements
    #    Only _LSTM_O will be used to predict the output.
    with tf.variable_scope(_name, reuse=tf.AUTO_REUSE) as scope:
        # scope.reuse_variables()
        # lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
        # outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype=tf.float32)
        # lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(dimhidden, forget_bias=1.0)
        lstm_cell = tf.contrib.rnn.BasicLSTMCell(dimhidden, forget_bias=1.0)
        # _LSTM_O, _LSTM_S = tf.nn.rnn(lstm_cell, _Hsplit, dtype=tf.float32)
        _LSTM_O, _LSTM_S = tf.contrib.rnn.static_rnn(lstm_cell, _Hsplit, dtype=tf.float32)
    # 6. Output
    _O = tf.matmul(_LSTM_O[-1], _W['out']) + _b['out']
    # Return!
    return {
        'X': _X, 'H': _H, 'Hsplit': _Hsplit,
        'LSTM_O': _LSTM_O, 'LSTM_S': _LSTM_S, 'O': _O
    }


def study_rnn():
    # mnist = input_data.read_data_sets('./objects_data/mnist', one_hot=True)
    mnist = input_data.read_data_sets('./objects_data/fashion-mnist-master/fashion_mnist_data', one_hot=True)

    trainimgs, trainlabels, testimgs, testlabels = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
    ntrain, ntest, dim, nclasses = trainimgs.shape[0], testimgs.shape[0], trainimgs.shape[1], trainlabels.shape[1]
    print("MNIST loaded")

    diminput = 28
    dimhidden = 128
    dimoutput = nclasses
    nsteps = 28
    n_layers = 1
    weights = {
        'hidden': tf.Variable(tf.random_normal([diminput, dimhidden])),
        'out': tf.Variable(tf.random_normal([dimhidden, dimoutput]))
    }
    biases = {
        'hidden': tf.Variable(tf.random_normal([dimhidden])),
        'out': tf.Variable(tf.random_normal([dimoutput]))
    }

    learning_rate = 0.001
    x = tf.placeholder("float", [None, nsteps, diminput])
    y = tf.placeholder("float", [None, dimoutput])
    # rnn中的中间状态变量,包含cell 的状态(c_t)和每个cell 的输出状态(h_t) 对应lstm的输出公式[ht=o*tanh(ct)]
    # 本例中的多层RNN中,每一个CELL中的状态数目相等,因此输入状态变量是2*n_hidden*n_layers],实际上是可以不相等的
    # 另外,可以提供初始状态,也可以不提供,让tf自动初始化
    istate = tf.placeholder("float32", [None, 2 * dimhidden * n_layers])
    myrnn = _RNN(x, weights, biases, nsteps, 'basic')
    pred = myrnn['O']
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
    optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)  # Adam Optimizer
    accr = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)), tf.float32))
    init = tf.global_variables_initializer()
    print("Network Ready!")

    training_epochs = 10
    batch_size = 16
    display_step = 1
    sess = tf.Session()
    sess.run(init)
    print("Start optimization")
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # total_batch = 100
        # Loop over all batches
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            batch_xs = batch_xs.reshape((batch_size, nsteps, diminput))
            # Fit training using batch data
            feeds = {x: batch_xs, y: batch_ys}
            sess.run(optm, feed_dict=feeds)
            # Compute average loss
            avg_cost += sess.run(cost, feed_dict=feeds) / total_batch
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
            feeds = {x: batch_xs, y: batch_ys}
            train_acc = sess.run(accr, feed_dict=feeds)
            print(" Training accuracy: %.3f" % (train_acc))
            testimgs = testimgs.reshape((ntest, nsteps, diminput))
            feeds = {x: testimgs, y: testlabels, istate: np.zeros((ntest, 2 * dimhidden))}
            test_acc = sess.run(accr, feed_dict=feeds)
            print(" Test accuracy: %.3f" % (test_acc))
    print("Optimization Finished.")


if __name__ == '__main__':
    study_rnn()

最近在学TensorFlow但无奈电脑是CUDA8.0,所以只好用1.4版本了

另外input_data.py代码为:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import gzip
import os
import tempfile

import numpy
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets

这里也有一份RNN写的不错的,附上链接:

http://blog.sina.com.cn/s/blog_4b0020f30102wv4l.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值