1.代码
import tensorflow as tf
import os
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
"""
用CNN来解决 手写数据集的代码。
"""
mnist = input_data.read_data_sets('../datas/mnist', one_hot=True, reshape=False)
print(mnist.train.num_examples)
"""
网络结构图。
1、input [N, 28, 28, 1] N 代表批量
2、卷积1 卷积核[5, 5, 1, 32] ---> 4-d tensor [-1, 28, 28, 32]
3、池化1 strides=2 ---> 4-d tensor [-1, 14, 14, 32]
4、卷积2 卷积核[5, 5, 32, 64] ---> [-1, 14, 14, 64]
dropout1
5、池化2 strides=2 ---> [-1, 7, 7, 64]
6、拉平层(reshape) ---> [-1, 7*7*64]
7、FC1 权重[7*7*64, 1024] ---> [-1, 1024]
dropout2
8、输出层(logits) 权重[1024, num_classes] ---> [-1, num_classes]
"""
# 1、设置超参数
learning_rate = 0.01 #学习率
epochs = 10
batch_size = 128
test_valid_size = 512 # 用于验证或者测试的样本数量。
n_classes = 10 #类别数量
keep_probab = 0.75
# 2、创建变量
my_graph = tf.Graph()
with my_graph.as_default():
weights = {
'conv1': tf.get_variable('w_conv1', shape=[5, 5, 1, 32], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1)),
'conv2': tf.get_variable('w_conv2', shape=[5, 5, 32, 64], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1)),
'fc1': tf.get_variable('w_fc1', shape=[7*7*64, 1024], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1)),
'logits': tf.get_variable('w_logits', shape=[1024, n_classes], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1)),
}
biases = {
'conv1': tf.get_variable('b_conv1', shape=[32], dtype=tf.float32,
initializer=tf.zeros_initializer()),
'conv2': tf.get_variable('b_conv2', shape=[64], dtype=tf.float32,
initializer=tf.zeros_initializer()),
'fc1': tf.get_variable('b_fc1', shape=[1024], dtype=tf.float32,
initializer=tf.zeros_initializer()),
'logits': tf.get_variable('b_logits', shape=[ n_classes], dtype=tf.float32,
initializer=tf.zeros_initializer()),
}
def conv2d_block(input_tensor, filter_w, filter_b, stride=1):
"""
实现 卷积 + 偏置项相加 + 激活
:param input_tensor:
:param filter_w:
:param filter_b:
:param stride:
:return:
"""
conv = tf.nn.conv2d(
input=input_tensor, filter=filter_w, strides=[1, stride, stride, 1], padding='SAME'
)
conv = tf.nn.bias_add(conv, filter_b)
conv = tf.nn.relu6(conv)
return conv
def maxpool(input_tensor, k=2):
"""
池化
:param input_tensor:
:param k:
:return:
"""
ksize = [1, k, k, 1]
strides = [1, k, k, 1]
max_out = tf.nn.max_pool(
value=input_tensor, ksize=ksize, strides=strides, padding='SAME'
)
return max_out
def model(input_tensor, weights, biases, keep_prob):
"""
:param input_tensor: 输入图片的占位符
:param weights:
:param biases:
:param keep_prob: 保留概率的占位符
:return:
"""
# 1、卷积1 [N, 28, 28, 1] ---> [N, 28, 28, 32]
conv1 = conv2d_block(
input_tensor=input_tensor, filter_w=weights['conv1'], filter_b=biases['conv1']
)
# 2、池化1 [N, 28, 28, 32] --->[N, 14, 14, 32]
pool1 = maxpool(conv1, k=2)
# 3、卷积2 [N, 14, 14, 32] ---> [N, 14, 14,64]
conv2 = conv2d_block(
input_tensor=pool1, filter_w=weights['conv2'], filter_b=biases['conv2']
)
conv2 = tf.nn.dropout(conv2, keep_prob=keep_prob)
# 4、池化2 [N, 14, 14,64] --->[N, 7, 7, 64]
pool2 = maxpool(conv2, k=2)
# 5、拉平层(flatten) [N, 7, 7, 64] ---> [N, 7*7*64]
x_shape = pool2.get_shape()
flatten_shape = x_shape[1] * x_shape[2] * x_shape[3]
flatted = tf.reshape(pool2, shape=[-1, flatten_shape])
# 6、FC1 全连接层
fc1 = tf.nn.relu6(tf.matmul(flatted, weights['fc1']) + biases['fc1'])
fc1 = tf.nn.dropout(fc1, keep_prob=keep_prob)
# 7、logits层
logits = tf.add(tf.matmul(fc1, weights['logits']), biases['logits'])
return logits
def create_dir_path(path):
if not os.path.exists(path):
os.makedirs(path)
print('create file path:{}'.format(path))
def train():
"""
构建模型
:return:
"""
with my_graph.as_default():
# 1、构建输入的占位符。
x = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x')
y = tf.placeholder(tf.float32, [None, n_classes], name='y')
keep_prob = tf.placeholder(tf.float32, shape=None, name='keep_prob')
# 2、构建模型图,获取logits
logits = model(x, weights, biases, keep_prob)
# 3、构建损失函数
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=y
))
# todo 增加一段可视化代码。
tf.summary.scalar('train_loss', tensor=loss, collections=['train'])
tf.summary.scalar('val_loss', tensor=loss, collections=['valid'])
# 4、构建优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_opt = optimizer.minimize(loss)
"""
tf.train.MomentumOptimizer() 动量优化器
tf.train.RMSPropOptimizer() 均方根 优化器
tf.train.AdamOptimizer()
"""
# 5、计算准确率
correct_pred = tf.equal(tf.argmax(logits, axis=1), tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# todo 增加一段可视化代码。
tf.summary.scalar('train_acc', tensor=accuracy, collections=['train'])
tf.summary.scalar('val_acc', tensor=accuracy, collections=['valid'])
# 6、构建持久化对象
saver = tf.train.Saver(max_to_keep=1)
checkpoint_dir = './models/mnist'
create_dir_path(checkpoint_dir)
with tf.Session(graph=my_graph) as sess:
# a、变量初始化
sess.run(tf.global_variables_initializer())
# 分别收集定义的train 和 valid 的可视化内容
train_summary = tf.summary.merge_all('train')
valid_summary = tf.summary.merge_all('valid')
# 定义2个writer
log_dir = './models/mnist_graph'
train_writer = tf.summary.FileWriter(
log_dir + '/train_graph', graph=sess.graph
)
valid_writer = tf.summary.FileWriter(
log_dir + '/valid_graph'
)
step = 1
for e in range(1, epochs):
# 构建批量数据的循环
for batch in range(mnist.train.num_examples // batch_size):
# 获取当前批次的数据
batch_x, batch_y = mnist.train.next_batch(batch_size)
feed = {x: batch_x, y: batch_y, keep_prob: keep_probab}
# 执行模型优化器
sess.run(train_opt, feed_dict=feed)
if step % 20 ==0:
train_loss, train_summary_ = sess.run([loss, train_summary], feed)
train_writer.add_summary(train_summary_, global_step=step)
val_dict = {x: mnist.validation.images[:test_valid_size],
y: mnist.validation.labels[:test_valid_size],
keep_prob: 1.0}
val_loss, val_acc, valid_summary_ = sess.run([loss, accuracy, valid_summary], val_dict)
valid_writer.add_summary(valid_summary_, global_step=step)
print('Epoch:{} - Step:{} - Train loss:{} - Valid Loss:{} - Valid ACC:'
'{}'.format(e, step, train_loss, val_loss, val_acc))
step += 1
if e % 3 ==0:
# 执行持久化
file_name = 'model.ckpt'
save_file = os.path.join(checkpoint_dir, file_name)
saver.save(sess, save_path=save_file, global_step=e)
print('Saved model to file:{}'.format(save_file))
train_writer.close()
valid_writer.close()
def test():
"""
使用测试数据测试模型
:return:
"""
with my_graph.as_default():
# 1、构建输入的占位符。
x = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x')
y = tf.placeholder(tf.float32, [None, n_classes], name='y')
keep_prob = tf.placeholder(tf.float32, shape=None, name='keep_prob')
# 2、构建模型图,获取logits
logits = model(x, weights, biases, keep_prob)
# 3、构建损失函数
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=y
))
# 4、构建优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_opt = optimizer.minimize(loss)
# 5、计算准确率
correct_pred = tf.equal(tf.argmax(logits, axis=1), tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 6、构建持久化对象
saver = tf.train.Saver(max_to_keep=1)
checkpoint_dir = './models/mnist'
create_dir_path(checkpoint_dir)
with tf.Session(graph=my_graph) as sess:
# a、加载持久化模型
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('load old model to test!')
else:
sess.run(tf.global_variables_initializer())
step = 1
avg_test_acc = []
for batch in range(mnist.test.num_examples // batch_size):
# 获取当前批次的数据
batch_x, batch_y = mnist.test.next_batch(batch_size)
feed = {x: batch_x, y: batch_y, keep_prob: 1.0}
test_acc = sess.run(accuracy, feed)
avg_test_acc.append(test_acc)
print('step: {} - test ACC:{}'.format(step, test_acc))
step += 1
print('Avg Test Accuracy :{:.5f}'.format(np.mean(avg_test_acc)))
if __name__ == '__main__':
train()
#test()
2.结果
D:\Anaconda\python.exe D:/AI20/HJZ/04-深度学习/3-CNN/20191207/03_CNN网络解决手写数据集.py
WARNING:tensorflow:From D:/AI20/HJZ/04-深度学习/3-CNN/20191207/03_CNN网络解决手写数据集.py:11: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
Extracting ../datas/mnist\train-images-idx3-ubyte.gz
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting ../datas/mnist\train-labels-idx1-ubyte.gz
Extracting ../datas/mnist\t10k-images-idx3-ubyte.gz
Extracting ../datas/mnist\t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
55000
WARNING:tensorflow:From D:/AI20/HJZ/04-深度学习/3-CNN/20191207/03_CNN网络解决手写数据集.py:153: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See @{tf.nn.softmax_cross_entropy_with_logits_v2}.
2019-12-28 11:27:24.231554: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
Epoch:1 - Step:20 - Train loss:1.3758785724639893 - Valid Loss:1.0853601694107056 - Valid ACC:0.75390625
Epoch:1 - Step:40 - Train loss:0.8615395426750183 - Valid Loss:0.752583384513855 - Valid ACC:0.83203125
Epoch:1 - Step:60 - Train loss:0.7379509210586548 - Valid Loss:0.5745766162872314 - Valid ACC:0.86328125
Epoch:1 - Step:80 - Train loss:0.6938822269439697 - Valid Loss:0.4762168526649475 - Valid ACC:0.8671875
Epoch:1 - Step:100 - Train loss:0.4425535202026367 - Valid Loss:0.40049058198928833 - Valid ACC:0.892578125
Epoch:1 - Step:120 - Train loss:0.5200024843215942 - Valid Loss:0.37083181738853455 - Valid ACC:0.89453125
Epoch:1 - Step:140 - Train loss:0.7850600481033325 - Valid Loss:0.34559592604637146 - Valid ACC:0.90625
Epoch:1 - Step:160 - Train loss:0.48599690198898315 - Valid Loss:0.32200711965560913 - Valid ACC:0.908203125
Epoch:1 - Step:180 - Train loss:0.4800177216529846 - Valid Loss:0.3152586817741394 - Valid ACC:0.904296875
Epoch:1 - Step:200 - Train loss:0.4079371690750122 - Valid Loss:0.2878311276435852 - Valid ACC:0.919921875
Epoch:1 - Step:220 - Train loss:0.33291447162628174 - Valid Loss:0.27526137232780457 - Valid ACC:0.916015625
Epoch:1 - Step:240 - Train loss:0.41540414094924927 - Valid Loss:0.2684410512447357 - Valid ACC:0.923828125
Epoch:1 - Step:260 - Train loss:0.3617456555366516 - Valid Loss:0.24860632419586182 - Valid ACC:0.927734375
Epoch:1 - Step:280 - Train loss:0.441132515668869 - Valid Loss:0.2630936801433563 - Valid ACC:0.921875
Epoch:1 - Step:300 - Train loss:0.3200865685939789 - Valid Loss:0.23273587226867676 - Valid ACC:0.927734375
Epoch:1 - Step:320 - Train loss:0.20851537585258484 - Valid Loss:0.2226412296295166 - Valid ACC:0.9375
Epoch:1 - Step:340 - Train loss:0.20202985405921936 - Valid Loss:0.22897276282310486 - Valid ACC:0.927734375
Epoch:1 - Step:360 - Train loss:0.26890450716018677 - Valid Loss:0.21686163544654846 - Valid ACC:0.9296875
Epoch:1 - Step:380 - Train loss:0.24909546971321106 - Valid Loss:0.20622283220291138 - Valid ACC:0.931640625
Epoch:1 - Step:400 - Train loss:0.2457318902015686 - Valid Loss:0.2070266306400299 - Valid ACC:0.931640625
Epoch:1 - Step:420 - Train loss:0.3122660219669342 - Valid Loss:0.19541355967521667 - Valid ACC:0.935546875
Epoch:2 - Step:440 - Train loss:0.2227267175912857 - Valid Loss:0.18909096717834473 - Valid ACC:0.939453125
Epoch:2 - Step:460 - Train loss:0.1187339723110199 - Valid Loss:0.1907287836074829 - Valid ACC:0.943359375
Epoch:2 - Step:480 - Train loss:0.344089150428772 - Valid Loss:0.18497200310230255 - Valid ACC:0.94140625
Epoch:2 - Step:500 - Train loss:0.34332573413848877 - Valid Loss:0.18347793817520142 - Valid ACC:0.939453125
Epoch:2 - Step:520 - Train loss:0.22418296337127686 - Valid Loss:0.18091829121112823 - Valid ACC:0.939453125
Epoch:2 - Step:540 - Train loss:0.27828335762023926 - Valid Loss:0.1750323623418808 - Valid ACC:0.9453125
Epoch:2 - Step:560 - Train loss:0.20467226207256317 - Valid Loss:0.1713973432779312 - Valid ACC:0.947265625
Epoch:2 - Step:580 - Train loss:0.29375457763671875 - Valid Loss:0.16503959894180298 - Valid ACC:0.94921875
Epoch:2 - Step:600 - Train loss:0.13541537523269653 - Valid Loss:0.17217901349067688 - Valid ACC:0.9453125
Epoch:2 - Step:620 - Train loss:0.3391205668449402 - Valid Loss:0.1702188402414322 - Valid ACC:0.94921875
Epoch:2 - Step:640 - Train loss:0.16924090683460236 - Valid Loss:0.16152311861515045 - Valid ACC:0.953125
Epoch:2 - Step:660 - Train loss:0.12828707695007324 - Valid Loss:0.16498219966888428 - Valid ACC:0.94921875
Epoch:2 - Step:680 - Train loss:0.1617966592311859 - Valid Loss:0.160232275724411 - Valid ACC:0.947265625
Epoch:2 - Step:700 - Train loss:0.23447716236114502 - Valid Loss:0.1620331108570099 - Valid ACC:0.95703125
Epoch:2 - Step:720 - Train loss:0.1807510405778885 - Valid Loss:0.15606114268302917 - Valid ACC:0.955078125
Epoch:2 - Step:740 - Train loss:0.19863054156303406 - Valid Loss:0.15519024431705475 - Valid ACC:0.951171875
Epoch:2 - Step:760 - Train loss:0.22600305080413818 - Valid Loss:0.14795538783073425 - Valid ACC:0.95703125
Epoch:2 - Step:780 - Train loss:0.08754577487707138 - Valid Loss:0.14606568217277527 - Valid ACC:0.955078125
Epoch:2 - Step:800 - Train loss:0.14012466371059418 - Valid Loss:0.14892423152923584 - Valid ACC:0.95703125
Epoch:2 - Step:820 - Train loss:0.1684097945690155 - Valid Loss:0.14880500733852386 - Valid ACC:0.958984375
Epoch:2 - Step:840 - Train loss:0.17465370893478394 - Valid Loss:0.14604640007019043 - Valid ACC:0.953125
Epoch:3 - Step:860 - Train loss:0.10211117565631866 - Valid Loss:0.14269334077835083 - Valid ACC:0.955078125
Epoch:3 - Step:880 - Train loss:0.23551996052265167 - Valid Loss:0.1403297632932663 - Valid ACC:0.958984375
Epoch:3 - Step:900 - Train loss:0.2535104751586914 - Valid Loss:0.1410532146692276 - Valid ACC:0.953125
Epoch:3 - Step:920 - Train loss:0.27433842420578003 - Valid Loss:0.14506487548351288 - Valid ACC:0.951171875
Epoch:3 - Step:940 - Train loss:0.1160753071308136 - Valid Loss:0.13755375146865845 - Valid ACC:0.95703125
Epoch:3 - Step:960 - Train loss:0.17009910941123962 - Valid Loss:0.1322987824678421 - Valid ACC:0.958984375
Epoch:3 - Step:980 - Train loss:0.230097696185112 - Valid Loss:0.13409894704818726 - Valid ACC:0.95703125
Epoch:3 - Step:1000 - Train loss:0.09265587478876114 - Valid Loss:0.1363755613565445 - Valid ACC:0.958984375
Epoch:3 - Step:1020 - Train loss:0.15349377691745758 - Valid Loss:0.12751226127147675 - Valid ACC:0.9609375
Epoch:3 - Step:1040 - Train loss:0.2572493553161621 - Valid Loss:0.13008902966976166 - Valid ACC:0.96484375
Epoch:3 - Step:1060 - Train loss:0.1679753214120865 - Valid Loss:0.125471830368042 - Valid ACC:0.96484375
Epoch:3 - Step:1080 - Train loss:0.10920414328575134 - Valid Loss:0.13013356924057007 - Valid ACC:0.96875
Epoch:3 - Step:1100 - Train loss:0.17077553272247314 - Valid Loss:0.1294427514076233 - Valid ACC:0.962890625
Epoch:3 - Step:1120 - Train loss:0.08780693262815475 - Valid Loss:0.12675990164279938 - Valid ACC:0.96484375
Epoch:3 - Step:1140 - Train loss:0.1651889532804489 - Valid Loss:0.12664562463760376 - Valid ACC:0.962890625
Epoch:3 - Step:1160 - Train loss:0.173245370388031 - Valid Loss:0.12310510128736496 - Valid ACC:0.962890625
Epoch:3 - Step:1180 - Train loss:0.10111492872238159 - Valid Loss:0.1229996532201767 - Valid ACC:0.96484375
Epoch:3 - Step:1200 - Train loss:0.206817626953125 - Valid Loss:0.12259294092655182 - Valid ACC:0.96484375
Epoch:3 - Step:1220 - Train loss:0.15290479362010956 - Valid Loss:0.11997298896312714 - Valid ACC:0.966796875
Epoch:3 - Step:1240 - Train loss:0.2379649132490158 - Valid Loss:0.12414613366127014 - Valid ACC:0.96484375
Epoch:3 - Step:1260 - Train loss:0.16798768937587738 - Valid Loss:0.12493611127138138 - Valid ACC:0.962890625
Epoch:3 - Step:1280 - Train loss:0.07689964771270752 - Valid Loss:0.12325124442577362 - Valid ACC:0.966796875
Saved model to file:./models/mnist\model.ckpt
Epoch:4 - Step:1300 - Train loss:0.1551506668329239 - Valid Loss:0.11784818023443222 - Valid ACC:0.96875
Epoch:4 - Step:1320 - Train loss:0.09287885576486588 - Valid Loss:0.12200142443180084 - Valid ACC:0.962890625
Epoch:4 - Step:1340 - Train loss:0.08747952431440353 - Valid Loss:0.1172151118516922 - Valid ACC:0.966796875
Epoch:4 - Step:1360 - Train loss:0.04157114028930664 - Valid Loss:0.11648068577051163 - Valid ACC:0.96484375
Epoch:4 - Step:1380 - Train loss:0.14990852773189545 - Valid Loss:0.11518972367048264 - Valid ACC:0.966796875
Epoch:4 - Step:1400 - Train loss:0.09128785133361816 - Valid Loss:0.11823329329490662 - Valid ACC:0.962890625
Epoch:4 - Step:1420 - Train loss:0.20824918150901794 - Valid Loss:0.11994654685258865 - Valid ACC:0.970703125
Epoch:4 - Step:1440 - Train loss:0.11932138353586197 - Valid Loss:0.11399351060390472 - Valid ACC:0.962890625
Epoch:4 - Step:1460 - Train loss:0.10036206245422363 - Valid Loss:0.11470556259155273 - Valid ACC:0.96484375
Epoch:4 - Step:1480 - Train loss:0.1543387621641159 - Valid Loss:0.11716897785663605 - Valid ACC:0.962890625
Epoch:4 - Step:1500 - Train loss:0.0649440586566925 - Valid Loss:0.11182655394077301 - Valid ACC:0.966796875
Epoch:4 - Step:1520 - Train loss:0.15861746668815613 - Valid Loss:0.11398045718669891 - Valid ACC:0.96484375
Epoch:4 - Step:1540 - Train loss:0.11883038282394409 - Valid Loss:0.11149167269468307 - Valid ACC:0.96484375
Epoch:4 - Step:1560 - Train loss:0.2793915867805481 - Valid Loss:0.1056586354970932 - Valid ACC:0.96875
Epoch:4 - Step:1580 - Train loss:0.09311538934707642 - Valid Loss:0.10736260563135147 - Valid ACC:0.96875
Epoch:4 - Step:1600 - Train loss:0.14936086535453796 - Valid Loss:0.11083288490772247 - Valid ACC:0.96875
Epoch:4 - Step:1620 - Train loss:0.13893191516399384 - Valid Loss:0.11040250211954117 - Valid ACC:0.966796875
Epoch:4 - Step:1640 - Train loss:0.12744277715682983 - Valid Loss:0.10619155317544937 - Valid ACC:0.966796875
Epoch:4 - Step:1660 - Train loss:0.14002758264541626 - Valid Loss:0.11076502501964569 - Valid ACC:0.96875
Epoch:4 - Step:1680 - Train loss:0.11670336127281189 - Valid Loss:0.10972735285758972 - Valid ACC:0.96484375
Epoch:4 - Step:1700 - Train loss:0.14633117616176605 - Valid Loss:0.10873091220855713 - Valid ACC:0.966796875
Epoch:5 - Step:1720 - Train loss:0.14722086489200592 - Valid Loss:0.1082315519452095 - Valid ACC:0.966796875
Epoch:5 - Step:1740 - Train loss:0.07508328557014465 - Valid Loss:0.10339988023042679 - Valid ACC:0.96875
Epoch:5 - Step:1760 - Train loss:0.08322592079639435 - Valid Loss:0.10490718483924866 - Valid ACC:0.96875
Epoch:5 - Step:1780 - Train loss:0.17263080179691315 - Valid Loss:0.10194595903158188 - Valid ACC:0.97265625
Epoch:5 - Step:1800 - Train loss:0.1722109168767929 - Valid Loss:0.1034020334482193 - Valid ACC:0.970703125
Epoch:5 - Step:1820 - Train loss:0.07782188057899475 - Valid Loss:0.1006486564874649 - Valid ACC:0.96875
Epoch:5 - Step:1840 - Train loss:0.05793192237615585 - Valid Loss:0.09976205229759216 - Valid ACC:0.966796875
Epoch:5 - Step:1860 - Train loss:0.11508874595165253 - Valid Loss:0.10438840091228485 - Valid ACC:0.966796875
Epoch:5 - Step:1880 - Train loss:0.1698644608259201 - Valid Loss:0.09948015958070755 - Valid ACC:0.96875
Epoch:5 - Step:1900 - Train loss:0.14465703070163727 - Valid Loss:0.09998483210802078 - Valid ACC:0.96875
Epoch:5 - Step:1920 - Train loss:0.10333187878131866 - Valid Loss:0.10414556413888931 - Valid ACC:0.966796875
Epoch:5 - Step:1940 - Train loss:0.08313795179128647 - Valid Loss:0.10611935704946518 - Valid ACC:0.966796875
Epoch:5 - Step:1960 - Train loss:0.15418609976768494 - Valid Loss:0.10660889744758606 - Valid ACC:0.966796875
Epoch:5 - Step:1980 - Train loss:0.1325407773256302 - Valid Loss:0.10565822571516037 - Valid ACC:0.966796875
Epoch:5 - Step:2000 - Train loss:0.07842736691236496 - Valid Loss:0.10541883111000061 - Valid ACC:0.96875
Epoch:5 - Step:2020 - Train loss:0.23436227440834045 - Valid Loss:0.1032685935497284 - Valid ACC:0.96875
Epoch:5 - Step:2040 - Train loss:0.10333509743213654 - Valid Loss:0.09648303687572479 - Valid ACC:0.96875
Epoch:5 - Step:2060 - Train loss:0.14600351452827454 - Valid Loss:0.10394762456417084 - Valid ACC:0.96875
Epoch:5 - Step:2080 - Train loss:0.06104746460914612 - Valid Loss:0.09854581207036972 - Valid ACC:0.966796875
Epoch:5 - Step:2100 - Train loss:0.18260376155376434 - Valid Loss:0.10310738533735275 - Valid ACC:0.966796875
Epoch:5 - Step:2120 - Train loss:0.13279765844345093 - Valid Loss:0.09692474454641342 - Valid ACC:0.96875
Epoch:5 - Step:2140 - Train loss:0.13981643319129944 - Valid Loss:0.10549578070640564 - Valid ACC:0.96484375
Epoch:6 - Step:2160 - Train loss:0.12577033042907715 - Valid Loss:0.09527498483657837 - Valid ACC:0.97265625
Epoch:6 - Step:2180 - Train loss:0.1584453284740448 - Valid Loss:0.09854905307292938 - Valid ACC:0.96875
Epoch:6 - Step:2200 - Train loss:0.0682261735200882 - Valid Loss:0.1005205363035202 - Valid ACC:0.96875
Epoch:6 - Step:2220 - Train loss:0.1041327565908432 - Valid Loss:0.0976925790309906 - Valid ACC:0.97265625
Epoch:6 - Step:2240 - Train loss:0.16466133296489716 - Valid Loss:0.09649521857500076 - Valid ACC:0.96875
Epoch:6 - Step:2260 - Train loss:0.09068195521831512 - Valid Loss:0.09930620342493057 - Valid ACC:0.966796875
Epoch:6 - Step:2280 - Train loss:0.07508169114589691 - Valid Loss:0.09630043059587479 - Valid ACC:0.974609375
Epoch:6 - Step:2300 - Train loss:0.10879471153020859 - Valid Loss:0.09501112252473831 - Valid ACC:0.96875
Epoch:6 - Step:2320 - Train loss:0.10670847445726395 - Valid Loss:0.10028455406427383 - Valid ACC:0.970703125
Epoch:6 - Step:2340 - Train loss:0.15924513339996338 - Valid Loss:0.09550382941961288 - Valid ACC:0.96484375
Epoch:6 - Step:2360 - Train loss:0.08846921473741531 - Valid Loss:0.09337668120861053 - Valid ACC:0.966796875
Epoch:6 - Step:2380 - Train loss:0.04186999052762985 - Valid Loss:0.09846983850002289 - Valid ACC:0.966796875
Epoch:6 - Step:2400 - Train loss:0.09673488140106201 - Valid Loss:0.09462832659482956 - Valid ACC:0.97265625
Epoch:6 - Step:2420 - Train loss:0.08025491237640381 - Valid Loss:0.09454915672540665 - Valid ACC:0.966796875
Epoch:6 - Step:2440 - Train loss:0.1393229365348816 - Valid Loss:0.10028853267431259 - Valid ACC:0.966796875
Epoch:6 - Step:2460 - Train loss:0.04316968470811844 - Valid Loss:0.0921938344836235 - Valid ACC:0.966796875
Epoch:6 - Step:2480 - Train loss:0.09997034817934036 - Valid Loss:0.09547010809183121 - Valid ACC:0.966796875
Epoch:6 - Step:2500 - Train loss:0.06327684223651886 - Valid Loss:0.09466446936130524 - Valid ACC:0.96875
Epoch:6 - Step:2520 - Train loss:0.10269716382026672 - Valid Loss:0.09786448627710342 - Valid ACC:0.966796875
Epoch:6 - Step:2540 - Train loss:0.2350616455078125 - Valid Loss:0.09943448007106781 - Valid ACC:0.97265625
Epoch:6 - Step:2560 - Train loss:0.0407288633286953 - Valid Loss:0.09591074287891388 - Valid ACC:0.970703125
Saved model to file:./models/mnist\model.ckpt
Epoch:7 - Step:2580 - Train loss:0.07234738767147064 - Valid Loss:0.09227857738733292 - Valid ACC:0.970703125
Epoch:7 - Step:2600 - Train loss:0.10844378173351288 - Valid Loss:0.09147576242685318 - Valid ACC:0.966796875
Epoch:7 - Step:2620 - Train loss:0.14813123643398285 - Valid Loss:0.09042800217866898 - Valid ACC:0.966796875
Epoch:7 - Step:2640 - Train loss:0.12702736258506775 - Valid Loss:0.09076666086912155 - Valid ACC:0.97265625
Epoch:7 - Step:2660 - Train loss:0.1542978584766388 - Valid Loss:0.08693449199199677 - Valid ACC:0.97265625
Epoch:7 - Step:2680 - Train loss:0.06683585792779922 - Valid Loss:0.08824575692415237 - Valid ACC:0.97265625
Epoch:7 - Step:2700 - Train loss:0.05183421075344086 - Valid Loss:0.090643011033535 - Valid ACC:0.97265625
Epoch:7 - Step:2720 - Train loss:0.0709044337272644 - Valid Loss:0.09136202931404114 - Valid ACC:0.97265625
Epoch:7 - Step:2740 - Train loss:0.06555259227752686 - Valid Loss:0.0904371440410614 - Valid ACC:0.970703125
Epoch:7 - Step:2760 - Train loss:0.06752696633338928 - Valid Loss:0.09310370683670044 - Valid ACC:0.970703125
Epoch:7 - Step:2780 - Train loss:0.1339314579963684 - Valid Loss:0.09067150205373764 - Valid ACC:0.970703125
Epoch:7 - Step:2800 - Train loss:0.10720235109329224 - Valid Loss:0.0917903259396553 - Valid ACC:0.97265625
Epoch:7 - Step:2820 - Train loss:0.09758725762367249 - Valid Loss:0.09069044142961502 - Valid ACC:0.96875
Epoch:7 - Step:2840 - Train loss:0.09487584233283997 - Valid Loss:0.09263356775045395 - Valid ACC:0.96875
Epoch:7 - Step:2860 - Train loss:0.07825423777103424 - Valid Loss:0.09433326125144958 - Valid ACC:0.970703125
Epoch:7 - Step:2880 - Train loss:0.14900749921798706 - Valid Loss:0.08829429000616074 - Valid ACC:0.970703125
Epoch:7 - Step:2900 - Train loss:0.054388973861932755 - Valid Loss:0.08841986954212189 - Valid ACC:0.970703125
Epoch:7 - Step:2920 - Train loss:0.11427734792232513 - Valid Loss:0.09050008654594421 - Valid ACC:0.966796875
Epoch:7 - Step:2940 - Train loss:0.058570340275764465 - Valid Loss:0.08476441353559494 - Valid ACC:0.97265625
Epoch:7 - Step:2960 - Train loss:0.0747164785861969 - Valid Loss:0.08622115850448608 - Valid ACC:0.97265625
Epoch:7 - Step:2980 - Train loss:0.08489186316728592 - Valid Loss:0.08538553863763809 - Valid ACC:0.974609375
Epoch:7 - Step:3000 - Train loss:0.09069662541151047 - Valid Loss:0.08788403868675232 - Valid ACC:0.97265625
Epoch:8 - Step:3020 - Train loss:0.029526952654123306 - Valid Loss:0.08585575222969055 - Valid ACC:0.97265625
Epoch:8 - Step:3040 - Train loss:0.19106654822826385 - Valid Loss:0.08536162972450256 - Valid ACC:0.974609375
Epoch:8 - Step:3060 - Train loss:0.11150145530700684 - Valid Loss:0.08402504771947861 - Valid ACC:0.97265625
Epoch:8 - Step:3080 - Train loss:0.0928545892238617 - Valid Loss:0.08684980869293213 - Valid ACC:0.96875
Epoch:8 - Step:3100 - Train loss:0.062186114490032196 - Valid Loss:0.0869147777557373 - Valid ACC:0.970703125
Epoch:8 - Step:3120 - Train loss:0.10874131321907043 - Valid Loss:0.08782439678907394 - Valid ACC:0.97265625
Epoch:8 - Step:3140 - Train loss:0.09081345796585083 - Valid Loss:0.08561057597398758 - Valid ACC:0.974609375
Epoch:8 - Step:3160 - Train loss:0.05712813138961792 - Valid Loss:0.08741847425699234 - Valid ACC:0.97265625
Epoch:8 - Step:3180 - Train loss:0.053160712122917175 - Valid Loss:0.08599395304918289 - Valid ACC:0.970703125
Epoch:8 - Step:3200 - Train loss:0.13060030341148376 - Valid Loss:0.0868854969739914 - Valid ACC:0.97265625
Epoch:8 - Step:3220 - Train loss:0.032313935458660126 - Valid Loss:0.08935139328241348 - Valid ACC:0.96875
Epoch:8 - Step:3240 - Train loss:0.07514908164739609 - Valid Loss:0.09022442251443863 - Valid ACC:0.970703125
Epoch:8 - Step:3260 - Train loss:0.0955028235912323 - Valid Loss:0.08625272661447525 - Valid ACC:0.970703125
Epoch:8 - Step:3280 - Train loss:0.06621134281158447 - Valid Loss:0.08585183322429657 - Valid ACC:0.96875
Epoch:8 - Step:3300 - Train loss:0.1185687854886055 - Valid Loss:0.08677119016647339 - Valid ACC:0.970703125
Epoch:8 - Step:3320 - Train loss:0.10628166794776917 - Valid Loss:0.08184456080198288 - Valid ACC:0.978515625
Epoch:8 - Step:3340 - Train loss:0.05693652480840683 - Valid Loss:0.08492109179496765 - Valid ACC:0.970703125
Epoch:8 - Step:3360 - Train loss:0.04095027595758438 - Valid Loss:0.08217191696166992 - Valid ACC:0.974609375
Epoch:8 - Step:3380 - Train loss:0.029418982565402985 - Valid Loss:0.07923252135515213 - Valid ACC:0.974609375
Epoch:8 - Step:3400 - Train loss:0.05943181365728378 - Valid Loss:0.08586996793746948 - Valid ACC:0.97265625
Epoch:8 - Step:3420 - Train loss:0.0665065124630928 - Valid Loss:0.08548500388860703 - Valid ACC:0.970703125
Epoch:9 - Step:3440 - Train loss:0.06376215815544128 - Valid Loss:0.08163836598396301 - Valid ACC:0.97265625
Epoch:9 - Step:3460 - Train loss:0.07327409088611603 - Valid Loss:0.0795031264424324 - Valid ACC:0.9765625
Epoch:9 - Step:3480 - Train loss:0.12537315487861633 - Valid Loss:0.084110327064991 - Valid ACC:0.97265625
Epoch:9 - Step:3500 - Train loss:0.06893593817949295 - Valid Loss:0.08117067068815231 - Valid ACC:0.970703125
Epoch:9 - Step:3520 - Train loss:0.10028155893087387 - Valid Loss:0.08138647675514221 - Valid ACC:0.974609375
Epoch:9 - Step:3540 - Train loss:0.026766153052449226 - Valid Loss:0.07938548177480698 - Valid ACC:0.97265625
Epoch:9 - Step:3560 - Train loss:0.08576803654432297 - Valid Loss:0.07901334017515182 - Valid ACC:0.970703125
Epoch:9 - Step:3580 - Train loss:0.10292135179042816 - Valid Loss:0.07886340469121933 - Valid ACC:0.97265625
Epoch:9 - Step:3600 - Train loss:0.11909455060958862 - Valid Loss:0.08312834799289703 - Valid ACC:0.96875
Epoch:9 - Step:3620 - Train loss:0.11890989542007446 - Valid Loss:0.07956550270318985 - Valid ACC:0.96875
Epoch:9 - Step:3640 - Train loss:0.08197904378175735 - Valid Loss:0.07754133641719818 - Valid ACC:0.974609375
Epoch:9 - Step:3660 - Train loss:0.065273717045784 - Valid Loss:0.0766575038433075 - Valid ACC:0.974609375
Epoch:9 - Step:3680 - Train loss:0.10197359323501587 - Valid Loss:0.07762917876243591 - Valid ACC:0.97265625
Epoch:9 - Step:3700 - Train loss:0.05971909686923027 - Valid Loss:0.07920423150062561 - Valid ACC:0.9765625
Epoch:9 - Step:3720 - Train loss:0.09735095500946045 - Valid Loss:0.0813782662153244 - Valid ACC:0.97265625
Epoch:9 - Step:3740 - Train loss:0.06895708292722702 - Valid Loss:0.08280261605978012 - Valid ACC:0.970703125
Epoch:9 - Step:3760 - Train loss:0.024128496646881104 - Valid Loss:0.0821051076054573 - Valid ACC:0.974609375
Epoch:9 - Step:3780 - Train loss:0.09070117771625519 - Valid Loss:0.08375895768404007 - Valid ACC:0.96875
Epoch:9 - Step:3800 - Train loss:0.04031326621770859 - Valid Loss:0.07855714857578278 - Valid ACC:0.97265625
Epoch:9 - Step:3820 - Train loss:0.057803235948085785 - Valid Loss:0.08145531266927719 - Valid ACC:0.974609375
Epoch:9 - Step:3840 - Train loss:0.08149026334285736 - Valid Loss:0.08015840500593185 - Valid ACC:0.97265625
Epoch:9 - Step:3860 - Train loss:0.05761280283331871 - Valid Loss:0.08173146843910217 - Valid ACC:0.97265625
Saved model to file:./models/mnist\model.ckpt
Process finished with exit code 0
测试集
D:\Anaconda\python.exe D:/AI20/HJZ/04-深度学习/3-CNN/20191207/03_CNN网络解决手写数据集.py
WARNING:tensorflow:From D:/AI20/HJZ/04-深度学习/3-CNN/20191207/03_CNN网络解决手写数据集.py:11: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
Extracting ../datas/mnist\train-images-idx3-ubyte.gz
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting ../datas/mnist\train-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting ../datas/mnist\t10k-images-idx3-ubyte.gz
Extracting ../datas/mnist\t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
55000
WARNING:tensorflow:From D:/AI20/HJZ/04-深度学习/3-CNN/20191207/03_CNN网络解决手写数据集.py:253: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See @{tf.nn.softmax_cross_entropy_with_logits_v2}.
2019-12-28 11:49:51.479618: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
load old model to test!
step: 1 - test ACC:1.0
step: 2 - test ACC:0.9921875
step: 3 - test ACC:0.96875
step: 4 - test ACC:0.9921875
step: 5 - test ACC:0.984375
step: 6 - test ACC:0.984375
step: 7 - test ACC:1.0
step: 8 - test ACC:0.96875
step: 9 - test ACC:0.9921875
step: 10 - test ACC:0.984375
step: 11 - test ACC:0.984375
step: 12 - test ACC:0.9921875
step: 13 - test ACC:0.984375
step: 14 - test ACC:0.9765625
step: 15 - test ACC:0.984375
step: 16 - test ACC:0.9921875
step: 17 - test ACC:0.9921875
step: 18 - test ACC:0.984375
step: 19 - test ACC:0.96875
step: 20 - test ACC:0.9765625
step: 21 - test ACC:0.984375
step: 22 - test ACC:0.9765625
step: 23 - test ACC:0.984375
step: 24 - test ACC:0.9921875
step: 25 - test ACC:0.953125
step: 26 - test ACC:0.9921875
step: 27 - test ACC:0.9921875
step: 28 - test ACC:0.9921875
step: 29 - test ACC:0.96875
step: 30 - test ACC:0.96875
step: 31 - test ACC:0.9921875
step: 32 - test ACC:0.984375
step: 33 - test ACC:0.984375
step: 34 - test ACC:0.96875
step: 35 - test ACC:1.0
step: 36 - test ACC:0.9765625
step: 37 - test ACC:0.9765625
step: 38 - test ACC:0.9765625
step: 39 - test ACC:0.96875
step: 40 - test ACC:0.984375
step: 41 - test ACC:0.984375
step: 42 - test ACC:0.984375
step: 43 - test ACC:0.9765625
step: 44 - test ACC:1.0
step: 45 - test ACC:1.0
step: 46 - test ACC:0.96875
step: 47 - test ACC:0.96875
step: 48 - test ACC:0.9921875
step: 49 - test ACC:0.984375
step: 50 - test ACC:0.984375
step: 51 - test ACC:0.9921875
step: 52 - test ACC:0.9921875
step: 53 - test ACC:0.9921875
step: 54 - test ACC:0.9921875
step: 55 - test ACC:0.984375
step: 56 - test ACC:1.0
step: 57 - test ACC:0.9765625
step: 58 - test ACC:0.96875
step: 59 - test ACC:0.9609375
step: 60 - test ACC:0.984375
step: 61 - test ACC:0.953125
step: 62 - test ACC:0.9765625
step: 63 - test ACC:0.9921875
step: 64 - test ACC:0.9765625
step: 65 - test ACC:0.9921875
step: 66 - test ACC:1.0
step: 67 - test ACC:0.9765625
step: 68 - test ACC:0.9921875
step: 69 - test ACC:0.9765625
step: 70 - test ACC:0.9921875
step: 71 - test ACC:0.984375
step: 72 - test ACC:0.9921875
step: 73 - test ACC:1.0
step: 74 - test ACC:0.9765625
step: 75 - test ACC:1.0
step: 76 - test ACC:0.96875
step: 77 - test ACC:1.0
step: 78 - test ACC:0.984375
Avg Test Accuracy :0.98387
Process finished with exit code 0
3.可视化
[tensorboard --logdir+路径]


本文展示了一种利用卷积神经网络(CNN)解决手写数字识别问题的方法,通过构建多层神经网络结构,包括卷积层、池化层、全连接层等,实现了对手写数字的有效识别。文章详细介绍了模型训练过程,包括参数设置、数据预处理、训练迭代及模型评估,并通过可视化展示了训练效果。
6720

被折叠的 条评论
为什么被折叠?



