tensorflow入门:mnist

本文从获取和使用MNIST数据集开始,通过TensorFlow的tf.layers构建CNN模型,详细讲解损失函数、优化器设置、模型评估、训练过程、测试集评估、TensorBoard日志输出、模型保存与恢复以及GPU配置。逐步实现手写数字识别的全过程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

1. mnist数据集的获取和使用

1.1获取mnist

from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
mnist = read_data_sets("MNIST_data/", one_hot=True) # "MNIST_data/"是数据保存的位置,one-hot是否把label变成one-hot编码

1.2使用mnist

mnist.train.next_batch(50) #batch的方式获取
mnist.test.images,mnist.test.labels #全部获取

2.使用tf.layers搭建cnn识别模型

x = tf.placeholder('float', [None, 28*28]) #输入,mnist读入的数据是28*28的,而非一个矩阵
y_ = tf.placeholder('float', [None, 10])
# cnn model
x_image = tf.reshape(x, [-1,28,28,1]) #转输入为图片格式
w_init = tf.truncated_normal_initializer(stddev=0.1, seed=9)#参数初始化器
b_init = tf.constant_initializer(0.1)
# 第一层cnn和max_pooling
cnn1 = tf.layers.conv2d(x_image, 32, (5,5), padding='same', activation=tf.nn.relu, \
	kernel_initializer=w_init, bias_initializer=b_init)
mxpl1 = tf.layers.max_pooling2d(cnn1, 2, strides=2, padding='same')
# 第二层cnn和max_pooling
cnn2 = tf.layers.conv2d(mxpl1, 64, (5,5), padding='same', activation=tf.nn.relu, \
	kernel_initializer=w_init, bias_initializer=b_init)
mxpl2 = tf.layers.max_pooling2d(cnn2, 2, strides=2, padding='same')
mxpl2_flat = tf.reshape(mxpl2, [-1,7*7*64])
#全连接1和dropout
fc1 = tf.layers.dense(mxpl2_flat, 1024, activation=tf.nn.relu, \
	kernel_initializer=w_init, bias_initializer=b_init)
keep_prob = tf.placeholder('float')
fc1_drop = tf.nn.dropout(fc1, keep_prob)
logits = tf.layers.dense(fc1_drop, 10, kernel_initializer=w_init, bias_initializer=b_init)

3.损失函数loss和优化器

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_)) #交叉熵损失
optmz = tf.train.AdamOptimizer(1e-4) #Adam优化器,学习率1e-4
train_op = optmz.minimize(loss)

4.评估:识别准确率

def get_eval_op(logits, labels):
	corr_prd = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
	return tf.reduce_mean(tf.cast(corr_prd, 'float'))
eval_op = get_eval_op(logits, y_)

5.训练

for i in range(train_steps): #batch size为50,通过feed_dict传入数据给placeholder,summary_op是logger operation,后面会讲到,不需要可以不加
batch_x, batch_y = mnist.train.next_batch(50)
_, cur_loss, cur_train_eval, summary = sess.run([train_op, loss, eval_op, summary_op], \
	feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.5} )

6.评估测试集

test_loss, test_eval = sess.run([loss, eval_op], feed_dict={x:mnist.test.images, \
	y_:mnist.test.labels, keep_prob: 1.0})

7.输出tensorboard的log

logdir = './logs/d31'
#要记录的对象,loss和eval_op
def summary_op(datapart='train'):
    tf.summary.scalar(datapart + '-loss', loss)
    tf.summary.scalar(datapart + '-eval', eval_op)
    return tf.summary.merge_all()
summary_op_train = summary_op()
summary_op_test = summary_op('test')
#创建log writer
summary_wrt = tf.summary.FileWriter(logdir,sess.graph)
#运行summary_op并添加 summary
summary = sess.run(summary_op_train, feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.5})
summary_wrt.add_summary(summary, global_step=i) #i为当前训练的step

8.模型参数保存和恢复

ckpts_dir = 'ckpts/'
ckpt_nm = 'cnn-ckpt'
#创建saver
saver = tf.train.Saver(max_to_keep=50)
#在sess中使用saver的save和restore
#save参数
saver.save(sess, ckpts_dir+ckpt_nm, global_step=i)
#恢复最近的参数
saver.restore(sess, tf.train.latest_checkpoint(ckpts_dir))
#恢复指定的参数,restore_step为指定的step
saver.restore(sess, ckpts_dir+ckpt_nm+'-'+restore_step)

9.配置gpu的使用

#指定使用哪块gpu
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.ConfigProto()  
#限制使用比率 
config.gpu_options.per_process_gpu_memory_fraction = 0.9
#按需分配
config.gpu_options.allow_growth=True # allocate when needed
#传入session中 
tf.Session(config=config)

10.完整代码

import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets

mnist = read_data_sets("MNIST_data/", one_hot=True)

x = tf.placeholder('float', [None, 28*28])
y_ = tf.placeholder('float', [None, 10])

# 自定义全连接
# W = tf.Variable(tf.zeros([784, 10]))
# b = tf.Variable(tf.zeros([10]))
# logits = tf.matmul(x, W) + b

# cnn模型
x_image = tf.reshape(x, [-1,28,28,1])
w_init = tf.truncated_normal_initializer(stddev=0.1, seed=9)
b_init = tf.constant_initializer(0.1)
cnn1 = tf.layers.conv2d(x_image, 32, (5,5), padding='same', activation=tf.nn.relu, \
	kernel_initializer=w_init, bias_initializer=b_init)
mxpl1 = tf.layers.max_pooling2d(cnn1, 2, strides=2, padding='same')
cnn2 = tf.layers.conv2d(mxpl1, 64, (5,5), padding='same', activation=tf.nn.relu, \
	kernel_initializer=w_init, bias_initializer=b_init)
mxpl2 = tf.layers.max_pooling2d(cnn2, 2, strides=2, padding='same')
mxpl2_flat = tf.reshape(mxpl2, [-1,7*7*64])
fc1 = tf.layers.dense(mxpl2_flat, 1024, activation=tf.nn.relu, \
	kernel_initializer=w_init, bias_initializer=b_init)
keep_prob = tf.placeholder('float')
fc1_drop = tf.nn.dropout(fc1, keep_prob)
logits = tf.layers.dense(fc1_drop, 10, kernel_initializer=w_init, bias_initializer=b_init)

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_))
optmz = tf.train.AdamOptimizer(1e-4)
train_op = optmz.minimize(loss)

def get_eval_op(logits, labels):
	corr_prd = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
	return tf.reduce_mean(tf.cast(corr_prd, 'float'))
eval_op = get_eval_op(logits, y_)

init = tf.initialize_all_variables()

# summary
logdir = './logs/m4d1'
def summary_op(datapart='train'):
	tf.summary.scalar(datapart + '-loss', loss)
	tf.summary.scalar(datapart + '-eval', eval_op)
	return tf.summary.merge_all()	
summary_op_train = summary_op()
summary_op_test = summary_op('test')
# whether to restore or not
ckpts_dir = 'ckpts/'
ckpt_nm = 'cnn-ckpt'
saver = tf.train.Saver(max_to_keep=50) # defaults to save all variables, using dict {'x':x,...} to save specified ones.
restore_step = ''
start_step = 0
train_steps = 100*200
best_loss = 1e6
best_step = 0

# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# config = tf.ConfigProto()  
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
# config.gpu_options.allow_growth=True # allocate when needed
# with tf.Session(config=config) as sess:
with tf.Session() as sess:
	sess.run(init)
	if restore_step:
		ckpt = tf.train.get_checkpoint_state(ckpts_dir)
		if ckpt and ckpt.model_checkpoint_path: # ckpt.model_checkpoint_path means the latest ckpt
			if restore_step == 'latest':
				ckpt_f = tf.train.latest_checkpoint(ckpts_dir)
				start_step = int(ckpt_f.split('-')[-1]) + 1
			else:
				ckpt_f = ckpts_dir+ckpt_nm+'-'+restore_step
			print('loading wgt file: '+ ckpt_f)
			saver.restore(sess, ckpt_f) 
	summary_wrt = tf.summary.FileWriter(logdir,sess.graph)
	if restore_step in ['', 'latest']:
		for i in range(start_step, train_steps):
			batch_x, batch_y = mnist.train.next_batch(50)
			_, cur_loss, cur_train_eval, summary = sess.run([train_op, loss, eval_op, summary_op_train], \
						feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.5} )
			# log to stdout and eval test set
			if i % 100 == 0 or i == train_steps-1:
				if cur_loss < best_loss:
					best_loss = cur_loss
					best_step = i
				saver.save(sess, ckpts_dir+ckpt_nm, global_step=i) # save variables
				summary_wrt.add_summary(summary, global_step=i)
				cur_test_eval, summary = sess.run([eval_op, summary_op_test], feed_dict={x:mnist.test.images, \
						y_:mnist.test.labels, keep_prob: 1.0})
				summary_wrt.add_summary(summary, global_step=i)
				print 'step %5d, loss %.5f, acc train %.5f, acc test %.5f'%(i, \
					cur_loss, cur_train_eval, cur_test_eval)
		with open(ckpts_dir+'best.step','w') as f:
			f.write('best step is %d\n'%best_step)
		print 'best step is %d'%best_step
	else:
		test_loss, test_eval = sess.run([loss, eval_op], feed_dict={x:mnist.test.images, \
						y_:mnist.test.labels, keep_prob: 1.0})
		print 'eval test: loss %.5f, acc %.5f'%(test_loss, test_eval)
		



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值