以mnist数据集为例,学习逻辑回归分类模型。
这是一个十分类模型,利用softmax进行多分类。
导入相关包
import numpy as np
import tensorflow as tf
# tensorflow数据集
from tensorflow.examples.tutorials.mnist import input_data
导入mnist数据
print('Download and Extract MNIST dataset')
mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.labels
print('MNIST loaded')
【输出结果】
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz
MNIST loaded
查看mnist数据集
print('type of "trainimg" is %s' % (type(trainimg)))
print('type of "trainlabel" is %s' % (type(trainlabel)))
print('type of "testimg" is %s' % (type(testimg)))
print('type of "testlabel" is %s' % (type(testlabel)))
print('shape of "trainimg" is %s' % (trainimg.shape,)) # 28*28=784像素点
print('shape of "trainlabel" is %s' % (trainlabel.shape,)) # 十分类ont-hot编码
print('shape of "testimg" is %s' % (testimg.shape,))
print('shape of "testlabel" is %s' % (testlabel.shape,))
# 打印第0张图片的标签
print(trainlabel[0])
输出结果:
创建变量的两种方式
- tf.placeholder():创建变量,但不赋值,起到占位的作用
- tf.Variable():创建变量,但给定赋值
x = tf.placeholder('float', [None, 784]) # 不知道有多少样本,用None填充
y = tf.placeholder('float', [None, 10])
W = tf.Variable(tf.random_uniform([784, 10], -1.0, 1.0))
b = tf.Variable(tf.zeros([10]))
构建模型
actv = tf.nn.softmax(tf.matmul(x, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(actv), reduction_indices=1))
learning_rate = 0.01
optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
训练与预测
# 预测
pred = tf.equal(tf.argmax(actv, 1), tf.argmax(y ,1)) # argmax()返回最大值的索引
# 准确率
accr = tf.reduce_mean(tf.cast(pred, 'float')) # pred返回True、False,cast()将pred转换为1、0
# 初始化
init = tf.global_variables_initializer()
# 模型参数
training_epochs = 50
batch_size = 100
display_step = 5
# Session
sess = tf.Session()
sess.run(init)
# mini-batch learning
for epoch in range(training_epochs+1):
avg_cost = 0.
num_batch = int(mnist.train.num_examples/batch_size)
for i in range(num_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feeds = {x: batch_xs, y: batch_ys}
sess.run(optm, feed_dict=feeds)
avg_cost += sess.run(cost, feed_dict=feeds)/num_batch
if epoch % display_step == 0:
feeds_train = {x: batch_xs, y: batch_ys}
feeds_test = {x: mnist.test.images, y: mnist.test.labels}
train_acc = sess.run(accr, feed_dict=feeds_train)
test_acc = sess.run(accr, feed_dict=feeds_test)
print("Epoch: %03d/%03d cost: %.9f train_acc: %.3f test_acc: %.3f"
% (epoch, training_epochs, avg_cost, train_acc, test_acc))
print('Done')
输出结果: