MNIST手写数字识别_CNN 06

MNIST手写数字识别_CNN

该程序相对于上一篇程序仅修改了创建模型的部分:

def create_model():
    model = Sequential()
    model.add(Conv2D(filters=30, kernel_size=(5, 5), activation='relu',
                     input_shape=(28, 28, 1)))
    # 最大池化层,池化窗口 2x2
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(filters=15, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # Dropout 20% 的输入神经元
    model.add(Dropout(0.2))
    # 将 Pooled feature map 摊平后输入全连接网络
    model.add(Flatten())
    # 全联接层
    model.add(Dense(128, activation='relu'))
    model.add(Dense(50, activation='relu'))
    # 使用 softmax 激活函数做多分类,输出各数字的概率
    model.add(Dense(10, activation='softmax'))
    # 编译模型
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')

    return model

修改模型后程序的正确率可以达到97.25%。没有搞明白别人的百分之99+是怎么做出来的~

完整代码为:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import numpy as np
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
from keras.layers import Conv2D
from keras.layers import Flatten
from keras.layers import MaxPooling2D
from keras.layers import Dropout
import os

seed = 7
np.random.seed(seed)

mnist = input_data.read_data_sets('D:\pythonProject1\MNIST\MNIST_data',one_hot=True) # 热编码
train_X = mnist.train.images
train_Y = mnist.train.labels
test_X = mnist.test.images
test_Y = mnist.test.labels

print(train_X.shape, type(train_X))
print(test_X.shape, type(test_X))

train_X = train_X.reshape(train_X.shape[0],28,28,1)
test_X = test_X.reshape(test_X.shape[0],28,28,1)
print(train_X.shape, type(train_X)) # (55000,  28, 28,1 ) <class 'numpy.ndarray'>
print(test_X.shape, type(test_X)) # (10000,  28, 28, 1 ) <class 'numpy.ndarray'>

#数据归一化 min-max 标准化
train_X /= 255
test_X /= 255

def create_model():
    model = Sequential()
    model.add(Conv2D(filters=30, kernel_size=(5, 5), activation='relu',
                     input_shape=(28, 28, 1)))
    # 最大池化层,池化窗口 2x2
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(filters=15, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # Dropout 20% 的输入神经元
    model.add(Dropout(0.2))
    # 将 Pooled feature map 摊平后输入全连接网络
    model.add(Flatten())
    # 全联接层
    model.add(Dense(128, activation='relu'))
    model.add(Dense(50, activation='relu'))
    # 使用 softmax 激活函数做多分类,输出各数字的概率
    model.add(Dense(10, activation='softmax'))
    # 编译模型
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')

    return model

model = create_model()

# 查看模型
model.summary()
for layer in model.layers:
    print(layer.get_output_at(0).get_shape().as_list())

# 训练模型,保存到history中
history =model.fit(train_X,train_Y,epochs=10,batch_size=200,verbose=2,validation_data=(test_X, test_Y))
# verbose=2  仅输出每个epoch的最终结果

# 可视化数据
fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')

plt.subplot(2, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.tight_layout()

plt.show()

# 保存模型
gfile = tf.io.gfile
save_dir = "./MNIST_model/"
if gfile.exists(save_dir):
    gfile.rmtree(save_dir)
gfile.mkdir(save_dir)

model_name = 'mnist_cnn+.h5'
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)


# 加载模型
mnist_model = tf.keras.models.load_model(model_path)

# 统计模型在测试上的分类结果
loss_and_metrics = mnist_model.evaluate(test_X, test_Y, verbose=2)

print("Test Loss: {}".format(loss_and_metrics[0]))
print("Test Accuracy: {}".format(loss_and_metrics[1] * 100))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值