import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
(train_images,train_labels),(test_images,test_labels)=tf.keras.datasets.mnist.load_data()
train_images=train_images.reshape(60000,28,28,1)
test_images=test_images.reshape(10000,28,28,1)
train_images=train_images/255
test_images=test_images/255
train_labels=np.array(pd.get_dummies(train_labels))
test_labels=np.array(pd.get_dummies(test_labels))
model=tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=6,kernel_size=(5,5),input_shape=(28,28,1),padding='same',activation="sigmoid"))
model.add(tf.keras.layers.AveragePooling2D(pool_size=(2,2)))
model.add(tf.keras.layers.Conv2D(filters=16,kernel_size=(5,5),activation="sigmoid"))
model.add(tf.keras.layers.AveragePooling2D(pool_size=(2,2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(84,activation="sigmoid"))
model.add(tf.keras.layers.Dense(10,activation="softmax"))#因为是多分类问题
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 28, 28, 6) 156
average_pooling2d (Average (None, 14, 14, 6) 0
Pooling2D)
conv2d_1 (Conv2D) (None, 10, 10, 16) 2416
average_pooling2d_1 (Avera (None, 5, 5, 16) 0
gePooling2D)
flatten (Flatten) (None, 400) 0
dense (Dense) (None, 84) 33684
dense_1 (Dense) (None, 10) 850
=================================================================
Total params: 37106 (144.95 KB)
Trainable params: 37106 (144.95 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])
model.compile():配置模型的学习过程,包括选择优化器、损失函数和评估指标等
metrics=[‘acc’]:表示模型在训练和验证过程中将会监测并输出模型的准确率。
history=model.fit(train_images,train_labels,epochs=10,validation_data=(test_images,test_labels))
Epoch 1/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.7652 - acc: 0.7607 - val_loss: 0.2475 - val_acc: 0.9280
Epoch 2/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.2062 - acc: 0.9373 - val_loss: 0.1559 - val_acc: 0.9530
Epoch 3/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.1385 - acc: 0.9576 - val_loss: 0.1039 - val_acc: 0.9699
Epoch 4/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.1014 - acc: 0.9693 - val_loss: 0.0778 - val_acc: 0.9756
Epoch 5/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.0813 - acc: 0.9751 - val_loss: 0.0649 - val_acc: 0.9798
Epoch 6/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.0686 - acc: 0.9795 - val_loss: 0.0638 - val_acc: 0.9800
Epoch 7/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.0593 - acc: 0.9822 - val_loss: 0.0546 - val_acc: 0.9838
Epoch 8/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.0533 - acc: 0.9840 - val_loss: 0.0467 - val_acc: 0.9848
Epoch 9/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.0477 - acc: 0.9855 - val_loss: 0.0463 - val_acc: 0.9851
Epoch 10/10
1875/1875 [==============================] - 9s 5ms/step - loss: 0.0432 - acc: 0.9867 - val_loss: 0.0515 - val_acc: 0.9834
model.evaluate(test_images,test_labels)
313/313 [==============================] - 1s 3ms/step - loss: 0.0515 - acc: 0.9834
[0.05148673057556152, 0.9833999872207642]
model.save('mnist.h5')
最后一步保存模型一边下次调用