- 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
- 🍖 原作者:K同学啊
🚀我的环境:
- 语言环境:python 3.12.6
- 编译器:jupyter lab
- 深度学习环境:TensorFlow 2.17.0
import pathlib
data_dir = "C:/Users/PC/Desktop/48-data"
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*')))
print("图片总数为:",image_count)
图片总数为: 1800
batch_size = 16
img_height = 336
img_width = 336
import tensorflow as tf
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.youkuaiyun.com/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=12,
image_size=(img_height, img_width),
batch_size=batch_size)
Found 1800 files belonging to 17 classes.
Using 1440 files for training.
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.youkuaiyun.com/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=12,
image_size=(img_height, img_width),
batch_size=batch_size)
Found 1800 files belonging to 17 classes.
Using 360 files for validation.
class_names = train_ds.class_names
print(class_names)
['Angelina Jolie', 'Brad Pitt', 'Denzel Washington', 'Hugh Jackman', 'Jennifer Lawrence', 'Johnny Depp', 'Kate Winslet', 'Leonardo DiCaprio', 'Megan Fox', 'Natalie Portman', 'Nicole Kidman', 'Robert Downey Jr', 'Sandra Bullock', 'Scarlett Johansson', 'Tom Cruise', 'Tom Hanks', 'Will Smith']
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
(16, 336, 336, 3)
(16,)
AUTOTUNE = tf.data.AUTOTUNE
def train_preprocessing(image,label):
return (image/255.0,label)
train_ds = (
train_ds.cache()
.shuffle(1000)
.map(train_preprocessing) # 这里可以设置预处理函数
# .batch(batch_size) # 在image_dataset_from_directory处已经设置了batch_size
.prefetch(buffer_size=AUTOTUNE)
)
val_ds = (
val_ds.cache()
.shuffle(1000)
.map(train_preprocessing) # 这里可以设置预处理函数
# .batch(batch_size) # 在image_dataset_from_directory处已经设置了batch_size
.prefetch(buffer_size=AUTOTUNE)
)
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 8)) # 图形的宽为10高为5
plt.suptitle("数据展示")
for images, labels in train_ds.take(1):
for i in range(15):
plt.subplot(4, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
# 显示图片
plt.imshow(images[i])
# 显示标签
plt.xlabel(class_names[labels[i]-1])
plt.show()
D:\Users\PC\AppData\Local\Programs\Python\Python312\Lib\site-packages\IPython\core\pylabtools.py:170: UserWarning: Glyph 25968 (\N{CJK UNIFIED IDEOGRAPH-6570}) missing from font(s) DejaVu Sans.
fig.canvas.print_figure(bytes_io, **kw)
D:\Users\PC\AppData\Local\Programs\Python\Python312\Lib\site-packages\IPython\core\pylabtools.py:170: UserWarning: Glyph 25454 (\N{CJK UNIFIED IDEOGRAPH-636E}) missing from font(s) DejaVu Sans.
fig.canvas.print_figure(bytes_io, **kw)
D:\Users\PC\AppData\Local\Programs\Python\Python312\Lib\site-packages\IPython\core\pylabtools.py:170: UserWarning: Glyph 23637 (\N{CJK UNIFIED IDEOGRAPH-5C55}) missing from font(s) DejaVu Sans.
fig.canvas.print_figure(bytes_io, **kw)
D:\Users\PC\AppData\Local\Programs\Python\Python312\Lib\site-packages\IPython\core\pylabtools.py:170: UserWarning: Glyph 31034 (\N{CJK UNIFIED IDEOGRAPH-793A}) missing from font(s) DejaVu Sans.
fig.canvas.print_figure(bytes_io, **kw)
构建模型
from tensorflow.keras.layers import Dropout,Dense,BatchNormalization
from tensorflow.keras.models import Model
def create_model(optimizer='adam'):
# 加载预训练模型
vgg16_base_model = tf.keras.applications.vgg16.VGG16(weights='imagenet',
include_top=False,
input_shape=(img_width, img_height, 3),
pooling='avg')
for layer in vgg16_base_model.layers:
layer.trainable = False
X = vgg16_base_model.output
X = Dense(170, activation='relu')(X)
X = BatchNormalization()(X)
X = Dropout(0.5)(X)
output = Dense(len(class_names), activation='softmax')(X)
vgg16_model = Model(inputs=vgg16_base_model.input, outputs=output)
vgg16_model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return vgg16_model
model1 = create_model(optimizer=tf.keras.optimizers.Adam())
model2 = create_model(optimizer=tf.keras.optimizers.SGD())
model2.summary()
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5
[1m58889256/58889256[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m9s[0m 0us/step
Model: "functional_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ input_layer_1 (InputLayer) │ (None, 336, 336, 3) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block1_conv1 (Conv2D) │ (None, 336, 336, 64) │ 1,792 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block1_conv2 (Conv2D) │ (None, 336, 336, 64) │ 36,928 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block1_pool (MaxPooling2D) │ (None, 168, 168, 64) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block2_conv1 (Conv2D) │ (None, 168, 168, 128) │ 73,856 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block2_conv2 (Conv2D) │ (None, 168, 168, 128) │ 147,584 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block2_pool (MaxPooling2D) │ (None, 84, 84, 128) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block3_conv1 (Conv2D) │ (None, 84, 84, 256) │ 295,168 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block3_conv2 (Conv2D) │ (None, 84, 84, 256) │ 590,080 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block3_conv3 (Conv2D) │ (None, 84, 84, 256) │ 590,080 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block3_pool (MaxPooling2D) │ (None, 42, 42, 256) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block4_conv1 (Conv2D) │ (None, 42, 42, 512) │ 1,180,160 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block4_conv2 (Conv2D) │ (None, 42, 42, 512) │ 2,359,808 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block4_conv3 (Conv2D) │ (None, 42, 42, 512) │ 2,359,808 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block4_pool (MaxPooling2D) │ (None, 21, 21, 512) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block5_conv1 (Conv2D) │ (None, 21, 21, 512) │ 2,359,808 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block5_conv2 (Conv2D) │ (None, 21, 21, 512) │ 2,359,808 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block5_conv3 (Conv2D) │ (None, 21, 21, 512) │ 2,359,808 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ block5_pool (MaxPooling2D) │ (None, 10, 10, 512) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ global_average_pooling2d_1 │ (None, 512) │ 0 │ │ (GlobalAveragePooling2D) │ │ │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_2 (Dense) │ (None, 170) │ 87,210 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ batch_normalization_1 │ (None, 170) │ 680 │ │ (BatchNormalization) │ │ │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dropout_1 (Dropout) │ (None, 170) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_3 (Dense) │ (None, 17) │ 2,907 │ └──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 14,805,485 (56.48 MB)
Trainable params: 90,457 (353.35 KB)
Non-trainable params: 14,715,028 (56.13 MB)
训练模型
NO_EPOCHS = 50
history_model1 = model1.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
history_model2 = model2.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
Epoch 1/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m566s[0m 6s/step - accuracy: 0.1262 - loss: 3.1183 - val_accuracy: 0.1028 - val_loss: 2.7404
Epoch 2/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m629s[0m 7s/step - accuracy: 0.3282 - loss: 2.1275 - val_accuracy: 0.1250 - val_loss: 2.5474
Epoch 3/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m659s[0m 7s/step - accuracy: 0.4536 - loss: 1.6957 - val_accuracy: 0.2389 - val_loss: 2.2317
Epoch 4/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m672s[0m 7s/step - accuracy: 0.5300 - loss: 1.5064 - val_accuracy: 0.2694 - val_loss: 2.0583
Epoch 5/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m664s[0m 7s/step - accuracy: 0.6105 - loss: 1.2924 - val_accuracy: 0.4917 - val_loss: 1.6857
Epoch 6/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m637s[0m 7s/step - accuracy: 0.6551 - loss: 1.1861 - val_accuracy: 0.4333 - val_loss: 1.8876
Epoch 7/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m628s[0m 7s/step - accuracy: 0.6489 - loss: 1.0784 - val_accuracy: 0.4917 - val_loss: 1.6237
Epoch 8/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m612s[0m 7s/step - accuracy: 0.6924 - loss: 1.0258 - val_accuracy: 0.5000 - val_loss: 1.6135
Epoch 9/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m604s[0m 7s/step - accuracy: 0.7366 - loss: 0.9051 - val_accuracy: 0.4917 - val_loss: 1.5768
Epoch 10/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m569s[0m 6s/step - accuracy: 0.7647 - loss: 0.7775 - val_accuracy: 0.4306 - val_loss: 1.7517
Epoch 11/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m569s[0m 6s/step - accuracy: 0.7537 - loss: 0.7607 - val_accuracy: 0.4278 - val_loss: 2.0469
Epoch 12/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m572s[0m 6s/step - accuracy: 0.7972 - loss: 0.6743 - val_accuracy: 0.4000 - val_loss: 2.1051
Epoch 13/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m577s[0m 6s/step - accuracy: 0.8138 - loss: 0.6276 - val_accuracy: 0.5667 - val_loss: 1.4210
Epoch 14/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m582s[0m 6s/step - accuracy: 0.8470 - loss: 0.5681 - val_accuracy: 0.4111 - val_loss: 2.1960
Epoch 15/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m590s[0m 7s/step - accuracy: 0.8375 - loss: 0.5396 - val_accuracy: 0.4972 - val_loss: 1.7560
Epoch 16/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m587s[0m 7s/step - accuracy: 0.8484 - loss: 0.4755 - val_accuracy: 0.5500 - val_loss: 1.5088
Epoch 17/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m591s[0m 7s/step - accuracy: 0.8508 - loss: 0.4970 - val_accuracy: 0.5250 - val_loss: 1.6994
Epoch 18/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m593s[0m 7s/step - accuracy: 0.8569 - loss: 0.4625 - val_accuracy: 0.4889 - val_loss: 1.8389
Epoch 19/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m593s[0m 7s/step - accuracy: 0.8959 - loss: 0.3906 - val_accuracy: 0.5056 - val_loss: 1.9204
Epoch 20/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m596s[0m 7s/step - accuracy: 0.8977 - loss: 0.3555 - val_accuracy: 0.4194 - val_loss: 2.3336
Epoch 21/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m597s[0m 7s/step - accuracy: 0.8968 - loss: 0.3523 - val_accuracy: 0.5444 - val_loss: 1.7648
Epoch 22/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m601s[0m 7s/step - accuracy: 0.8908 - loss: 0.3455 - val_accuracy: 0.4556 - val_loss: 2.3906
Epoch 23/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m600s[0m 7s/step - accuracy: 0.9036 - loss: 0.3347 - val_accuracy: 0.5278 - val_loss: 1.7925
Epoch 24/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m599s[0m 7s/step - accuracy: 0.9023 - loss: 0.3163 - val_accuracy: 0.5139 - val_loss: 1.8930
Epoch 25/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m598s[0m 7s/step - accuracy: 0.9095 - loss: 0.3048 - val_accuracy: 0.5444 - val_loss: 2.0399
Epoch 26/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m599s[0m 7s/step - accuracy: 0.9182 - loss: 0.2555 - val_accuracy: 0.5278 - val_loss: 1.7862
Epoch 27/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m597s[0m 7s/step - accuracy: 0.9317 - loss: 0.2614 - val_accuracy: 0.5889 - val_loss: 1.6866
Epoch 28/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m528s[0m 6s/step - accuracy: 0.9347 - loss: 0.2201 - val_accuracy: 0.4111 - val_loss: 2.9578
Epoch 29/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m535s[0m 6s/step - accuracy: 0.9316 - loss: 0.2441 - val_accuracy: 0.5333 - val_loss: 1.9826
Epoch 30/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m540s[0m 6s/step - accuracy: 0.9414 - loss: 0.2172 - val_accuracy: 0.5472 - val_loss: 1.9726
Epoch 31/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m548s[0m 6s/step - accuracy: 0.9382 - loss: 0.2199 - val_accuracy: 0.5000 - val_loss: 2.2524
Epoch 32/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m551s[0m 6s/step - accuracy: 0.9272 - loss: 0.2298 - val_accuracy: 0.5417 - val_loss: 2.2075
Epoch 33/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m556s[0m 6s/step - accuracy: 0.9327 - loss: 0.2236 - val_accuracy: 0.5389 - val_loss: 2.1847
Epoch 34/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m559s[0m 6s/step - accuracy: 0.9348 - loss: 0.2021 - val_accuracy: 0.5583 - val_loss: 2.0273
Epoch 35/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m559s[0m 6s/step - accuracy: 0.9386 - loss: 0.1933 - val_accuracy: 0.5694 - val_loss: 2.0697
Epoch 36/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m615s[0m 7s/step - accuracy: 0.9410 - loss: 0.1903 - val_accuracy: 0.5222 - val_loss: 2.1861
Epoch 37/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m663s[0m 7s/step - accuracy: 0.9419 - loss: 0.1913 - val_accuracy: 0.4944 - val_loss: 2.3324
Epoch 38/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m669s[0m 7s/step - accuracy: 0.9379 - loss: 0.1853 - val_accuracy: 0.5417 - val_loss: 2.3182
Epoch 39/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m670s[0m 7s/step - accuracy: 0.9396 - loss: 0.1958 - val_accuracy: 0.5861 - val_loss: 2.0866
Epoch 40/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m670s[0m 7s/step - accuracy: 0.9565 - loss: 0.1503 - val_accuracy: 0.5556 - val_loss: 1.9975
Epoch 41/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m671s[0m 7s/step - accuracy: 0.9469 - loss: 0.1659 - val_accuracy: 0.5389 - val_loss: 2.2505
Epoch 42/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m670s[0m 7s/step - accuracy: 0.9500 - loss: 0.1637 - val_accuracy: 0.5444 - val_loss: 2.6272
Epoch 43/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m670s[0m 7s/step - accuracy: 0.9422 - loss: 0.1622 - val_accuracy: 0.5194 - val_loss: 2.5168
Epoch 44/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m629s[0m 7s/step - accuracy: 0.9649 - loss: 0.1330 - val_accuracy: 0.5722 - val_loss: 2.3526
Epoch 45/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m646s[0m 7s/step - accuracy: 0.9533 - loss: 0.1462 - val_accuracy: 0.3444 - val_loss: 4.0076
Epoch 46/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m656s[0m 7s/step - accuracy: 0.9386 - loss: 0.1820 - val_accuracy: 0.5167 - val_loss: 2.5950
Epoch 47/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m658s[0m 7s/step - accuracy: 0.9486 - loss: 0.1535 - val_accuracy: 0.4972 - val_loss: 2.7850
Epoch 48/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m662s[0m 7s/step - accuracy: 0.9573 - loss: 0.1538 - val_accuracy: 0.4306 - val_loss: 3.5450
Epoch 49/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m663s[0m 7s/step - accuracy: 0.9648 - loss: 0.1277 - val_accuracy: 0.5139 - val_loss: 2.8813
Epoch 50/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m668s[0m 7s/step - accuracy: 0.9462 - loss: 0.1673 - val_accuracy: 0.5167 - val_loss: 2.6039
Epoch 1/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m666s[0m 7s/step - accuracy: 0.0848 - loss: 3.1848 - val_accuracy: 0.1306 - val_loss: 2.7591
Epoch 2/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m663s[0m 7s/step - accuracy: 0.1759 - loss: 2.6226 - val_accuracy: 0.1917 - val_loss: 2.6106
Epoch 3/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m664s[0m 7s/step - accuracy: 0.2438 - loss: 2.2681 - val_accuracy: 0.3056 - val_loss: 2.4080
Epoch 4/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m663s[0m 7s/step - accuracy: 0.3625 - loss: 2.0474 - val_accuracy: 0.3722 - val_loss: 2.1757
Epoch 5/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m666s[0m 7s/step - accuracy: 0.3778 - loss: 1.8953 - val_accuracy: 0.3861 - val_loss: 1.9953
Epoch 6/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m682s[0m 8s/step - accuracy: 0.4329 - loss: 1.7916 - val_accuracy: 0.3917 - val_loss: 1.9048
Epoch 7/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m668s[0m 7s/step - accuracy: 0.4549 - loss: 1.7009 - val_accuracy: 0.4694 - val_loss: 1.7243
Epoch 8/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m664s[0m 7s/step - accuracy: 0.4900 - loss: 1.6145 - val_accuracy: 0.4333 - val_loss: 1.7711
Epoch 9/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m667s[0m 7s/step - accuracy: 0.4961 - loss: 1.5871 - val_accuracy: 0.4750 - val_loss: 1.6692
Epoch 10/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m666s[0m 7s/step - accuracy: 0.5313 - loss: 1.4237 - val_accuracy: 0.4583 - val_loss: 1.7184
Epoch 11/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m669s[0m 7s/step - accuracy: 0.5577 - loss: 1.3554 - val_accuracy: 0.4889 - val_loss: 1.5469
Epoch 12/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m663s[0m 7s/step - accuracy: 0.5801 - loss: 1.3644 - val_accuracy: 0.5111 - val_loss: 1.5456
Epoch 13/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m660s[0m 7s/step - accuracy: 0.5846 - loss: 1.2744 - val_accuracy: 0.4583 - val_loss: 1.6049
Epoch 14/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m662s[0m 7s/step - accuracy: 0.6172 - loss: 1.1892 - val_accuracy: 0.4778 - val_loss: 1.5839
Epoch 15/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m661s[0m 7s/step - accuracy: 0.6326 - loss: 1.1855 - val_accuracy: 0.5389 - val_loss: 1.5053
Epoch 16/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m662s[0m 7s/step - accuracy: 0.6254 - loss: 1.1606 - val_accuracy: 0.5083 - val_loss: 1.5106
Epoch 17/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m662s[0m 7s/step - accuracy: 0.6265 - loss: 1.1323 - val_accuracy: 0.5667 - val_loss: 1.4141
Epoch 18/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m659s[0m 7s/step - accuracy: 0.6474 - loss: 1.1108 - val_accuracy: 0.4750 - val_loss: 1.7386
Epoch 19/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m658s[0m 7s/step - accuracy: 0.6705 - loss: 1.0687 - val_accuracy: 0.5000 - val_loss: 1.5321
Epoch 20/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m663s[0m 7s/step - accuracy: 0.6778 - loss: 1.0302 - val_accuracy: 0.5306 - val_loss: 1.4830
Epoch 21/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m657s[0m 7s/step - accuracy: 0.6472 - loss: 1.0668 - val_accuracy: 0.5278 - val_loss: 1.5200
Epoch 22/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m662s[0m 7s/step - accuracy: 0.7118 - loss: 0.9241 - val_accuracy: 0.5472 - val_loss: 1.4165
Epoch 23/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m659s[0m 7s/step - accuracy: 0.7010 - loss: 0.9206 - val_accuracy: 0.5222 - val_loss: 1.6031
Epoch 24/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m657s[0m 7s/step - accuracy: 0.7166 - loss: 0.9056 - val_accuracy: 0.5556 - val_loss: 1.3807
Epoch 25/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m636s[0m 7s/step - accuracy: 0.7042 - loss: 0.9242 - val_accuracy: 0.5417 - val_loss: 1.4743
Epoch 26/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m630s[0m 7s/step - accuracy: 0.7289 - loss: 0.8675 - val_accuracy: 0.5361 - val_loss: 1.5065
Epoch 27/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m626s[0m 7s/step - accuracy: 0.7438 - loss: 0.8676 - val_accuracy: 0.5306 - val_loss: 1.5716
Epoch 28/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m626s[0m 7s/step - accuracy: 0.7322 - loss: 0.8394 - val_accuracy: 0.5694 - val_loss: 1.3942
Epoch 29/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m627s[0m 7s/step - accuracy: 0.7236 - loss: 0.8211 - val_accuracy: 0.5750 - val_loss: 1.4104
Epoch 30/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m624s[0m 7s/step - accuracy: 0.7646 - loss: 0.7565 - val_accuracy: 0.5694 - val_loss: 1.3522
Epoch 31/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m626s[0m 7s/step - accuracy: 0.7646 - loss: 0.7632 - val_accuracy: 0.5389 - val_loss: 1.4975
Epoch 32/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m626s[0m 7s/step - accuracy: 0.7566 - loss: 0.7264 - val_accuracy: 0.5778 - val_loss: 1.3838
Epoch 33/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m625s[0m 7s/step - accuracy: 0.7762 - loss: 0.7332 - val_accuracy: 0.5861 - val_loss: 1.3932
Epoch 34/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m631s[0m 7s/step - accuracy: 0.7917 - loss: 0.6899 - val_accuracy: 0.5611 - val_loss: 1.3881
Epoch 35/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m633s[0m 7s/step - accuracy: 0.7818 - loss: 0.6910 - val_accuracy: 0.6056 - val_loss: 1.3687
Epoch 36/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m632s[0m 7s/step - accuracy: 0.7911 - loss: 0.6860 - val_accuracy: 0.5417 - val_loss: 1.6040
Epoch 37/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m663s[0m 7s/step - accuracy: 0.8023 - loss: 0.6692 - val_accuracy: 0.5694 - val_loss: 1.4555
Epoch 38/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m634s[0m 7s/step - accuracy: 0.8116 - loss: 0.6243 - val_accuracy: 0.5833 - val_loss: 1.3814
Epoch 39/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m653s[0m 7s/step - accuracy: 0.7962 - loss: 0.6262 - val_accuracy: 0.5972 - val_loss: 1.3573
Epoch 40/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m649s[0m 7s/step - accuracy: 0.7984 - loss: 0.6513 - val_accuracy: 0.5444 - val_loss: 1.5204
Epoch 41/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m663s[0m 7s/step - accuracy: 0.8133 - loss: 0.6030 - val_accuracy: 0.5500 - val_loss: 1.4199
Epoch 42/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m659s[0m 7s/step - accuracy: 0.8067 - loss: 0.6098 - val_accuracy: 0.5861 - val_loss: 1.4111
Epoch 43/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m659s[0m 7s/step - accuracy: 0.8141 - loss: 0.5442 - val_accuracy: 0.5444 - val_loss: 1.5411
Epoch 44/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m656s[0m 7s/step - accuracy: 0.8331 - loss: 0.5373 - val_accuracy: 0.5528 - val_loss: 1.4741
Epoch 45/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m661s[0m 7s/step - accuracy: 0.8388 - loss: 0.5189 - val_accuracy: 0.5806 - val_loss: 1.4860
Epoch 46/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m660s[0m 7s/step - accuracy: 0.8487 - loss: 0.5245 - val_accuracy: 0.5111 - val_loss: 1.6518
Epoch 47/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m714s[0m 8s/step - accuracy: 0.8357 - loss: 0.5337 - val_accuracy: 0.6000 - val_loss: 1.3335
Epoch 48/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m686s[0m 8s/step - accuracy: 0.8741 - loss: 0.4625 - val_accuracy: 0.5583 - val_loss: 1.4303
Epoch 49/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m654s[0m 7s/step - accuracy: 0.8387 - loss: 0.5235 - val_accuracy: 0.5528 - val_loss: 1.4411
Epoch 50/50
[1m90/90[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m676s[0m 8s/step - accuracy: 0.8354 - loss: 0.4936 - val_accuracy: 0.5750 - val_loss: 1.3859
评估模型
from matplotlib.ticker import MultipleLocator
plt.rcParams['savefig.dpi'] = 300 #图片像素
plt.rcParams['figure.dpi'] = 300 #分辨率
acc1 = history_model1.history['accuracy']
acc2 = history_model2.history['accuracy']
val_acc1 = history_model1.history['val_accuracy']
val_acc2 = history_model2.history['val_accuracy']
loss1 = history_model1.history['loss']
loss2 = history_model2.history['loss']
val_loss1 = history_model1.history['val_loss']
val_loss2 = history_model2.history['val_loss']
epochs_range = range(len(acc1))
plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc1, label='Training Accuracy-Adam')
plt.plot(epochs_range, acc2, label='Training Accuracy-SGD')
plt.plot(epochs_range, val_acc1, label='Validation Accuracy-Adam')
plt.plot(epochs_range, val_acc2, label='Validation Accuracy-SGD')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
# 设置刻度间隔,x轴每1一个刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss1, label='Training Loss-Adam')
plt.plot(epochs_range, loss2, label='Training Loss-SGD')
plt.plot(epochs_range, val_loss1, label='Validation Loss-Adam')
plt.plot(epochs_range, val_loss2, label='Validation Loss-SGD')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
# 设置刻度间隔,x轴每1一个刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))
plt.show()
def test_accuracy_report(model):
score = model.evaluate(val_ds, verbose=0)
print('Loss function: %s, accuracy:' % score[0], score[1])
test_accuracy_report(model2)
总结
有结果对比可知,Adam优化器的效果更好。