# -*- coding:utf-8 -*-
'''
Created on 2018-1-19
'''
import numpy as np
import tflearn
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
def do_rnn(X, Y, testX, testY):
# 这里需要将(1,784)转变为(28*28),因为lstm需要的输入是时序的,这里可以28*28可以看做28行*28列,然后每次计算是一行*28列进行计算,每一行的计算结果
# 都会参与下一行的计算,和MLP,CNN不同
X = np.reshape(X, (-1, 28, 28))
testX = np.reshape(testX, (-1, 28, 28))
# 定义输入层
net = tflearn.input_data(shape=[None, 28, 28])
# 添加lstm结构,如果后面跟着lstm,那么需要return_seq=True,输出3-D Tensor [samples, timesteps, output dim]
net = tflearn.lstm(net, 128, return_seq=True)
# 添加lstm结构,输出2-D Tensor [samples, output dim]
net = tflearn.lstm(net, 128)
# 添加一层全连接,加softmax分类器
net = tflearn.fully_connected(net, 10, activation='softmax')
# 定义回归策略,参数优化器采用adam
net = tflearn.regression(net, optimizer='adam',
loss='categorical_crossentropy', name="output1")
# 生成模型
model = tflearn.DNN(net, tensorboard_verbose=2)
# 训练模型
model.fit(X, Y, n_epoch=1, validation_set=(testX,testY), show_metric=True,
snapshot_step=100)
do_rnn(X, Y, testX, testY)
使用tflearn 构建RNN
最新推荐文章于 2022-03-06 09:39:02 发布