- 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
- 🍖 原作者:K同学啊
🚀我的环境:
- 语言环境:python 3.12.6
- 编译器:jupyter lab
- 深度学习环境:TensorFlow 2.17.0
前期准备
import pathlib
data_dir = "d:/Users/yxy/Desktop/48-data"
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.jpg')))
print("图片总数为:",image_count)
图片总数为: 1800
from PIL import Image
roses = list(data_dir.glob('Jennifer Lawrence/*.jpg'))
Image.open(str(roses[0]))
数据预处理
batch_size = 32
img_height = 224
img_width = 224
import tensorflow as tf
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.1,
subset="training",
label_mode = "categorical",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
Found 1800 files belonging to 17 classes.
Using 1620 files for training.
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.1,
subset="validation",
label_mode = "categorical",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
Found 1800 files belonging to 17 classes.
Using 180 files for validation.
class_names = train_ds.class_names
print(class_names)
['Angelina Jolie', 'Brad Pitt', 'Denzel Washington', 'Hugh Jackman', 'Jennifer Lawrence', 'Johnny Depp', 'Kate Winslet', 'Leonardo DiCaprio', 'Megan Fox', 'Natalie Portman', 'Nicole Kidman', 'Robert Downey Jr', 'Sandra Bullock', 'Scarlett Johansson', 'Tom Cruise', 'Tom Hanks', 'Will Smith']
import numpy as np
plt.figure(figsize=(20, 10))
for images, labels in train_ds.take(1):
for i in range(20):
ax = plt.subplot(5, 10, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[np.argmax(labels[i])])
plt.axis("off")
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
(32, 224, 224, 3)
(32, 17)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
构建CNN网络
from tensorflow.keras import models, layers
model = models.Sequential([
layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, (3, 3), activation='relu', input_shape=(img_height, img_width, 3)), # 卷积层1,卷积核3*3
layers.AveragePooling2D((2, 2)), # 池化层1,2*2采样
layers.Conv2D(32, (3, 3), activation='relu'), # 卷积层2,卷积核3*3
layers.AveragePooling2D((2, 2)), # 池化层2,2*2采样
layers.Dropout(0.5),
layers.Conv2D(64, (3, 3), activation='relu'), # 卷积层3,卷积核3*3
layers.AveragePooling2D((2, 2)),
layers.Dropout(0.5),
layers.Conv2D(128, (3, 3), activation='relu'), # 卷积层3,卷积核3*3
layers.Dropout(0.5),
layers.Flatten(), # Flatten层,连接卷积层与全连接层
layers.Dense(128, activation='relu'), # 全连接层,特征进一步提取
layers.Dense(len(class_names)) # 输出层,输出预期结果
])
model.summary()
C:\Users\yxy\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\preprocessing\tf_data_layer.py:19: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(**kwargs)
C:\Users\yxy\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ rescaling (Rescaling) │ (None, 224, 224, 3) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ conv2d (Conv2D) │ (None, 222, 222, 16) │ 448 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ average_pooling2d (AveragePooling2D) │ (None, 111, 111, 16) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ conv2d_1 (Conv2D) │ (None, 109, 109, 32) │ 4,640 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ average_pooling2d_1 │ (None, 54, 54, 32) │ 0 │ │ (AveragePooling2D) │ │ │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dropout (Dropout) │ (None, 54, 54, 32) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ conv2d_2 (Conv2D) │ (None, 52, 52, 64) │ 18,496 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ average_pooling2d_2 │ (None, 26, 26, 64) │ 0 │ │ (AveragePooling2D) │ │ │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dropout_1 (Dropout) │ (None, 26, 26, 64) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ conv2d_3 (Conv2D) │ (None, 24, 24, 128) │ 73,856 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dropout_2 (Dropout) │ (None, 24, 24, 128) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ flatten (Flatten) │ (None, 73728) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense (Dense) │ (None, 128) │ 9,437,312 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_1 (Dense) │ (None, 17) │ 2,193 │ └──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 9,536,945 (36.38 MB)
Trainable params: 9,536,945 (36.38 MB)
Non-trainable params: 0 (0.00 B)
训练模型
initial_learning_rate = 1e-4
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=60, # 敲黑板!!!这里是指 steps,不是指epochs
decay_rate=0.96, # lr经过一次衰减就会变成 decay_rate*lr
staircase=True)
# 将指数衰减学习率送入优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
epochs = 100
# 保存最佳模型参数
checkpointer = ModelCheckpoint('best_model.keras',
monitor='val_accuracy',
save_best_only=True)
# 设置早停
earlystopper = EarlyStopping(monitor='val_accuracy',
min_delta=0.001,
patience=20)
history = model.fit(train_ds,
validation_data=val_ds,
epochs=epochs,
callbacks=[checkpointer, earlystopper])
Epoch 1/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m26s[0m 393ms/step - accuracy: 0.0827 - loss: 2.8268 - val_accuracy: 0.1389 - val_loss: 2.7637
Epoch 2/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m21s[0m 421ms/step - accuracy: 0.1102 - loss: 2.7585 - val_accuracy: 0.1556 - val_loss: 2.6512
Epoch 3/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m22s[0m 432ms/step - accuracy: 0.1661 - loss: 2.6480 - val_accuracy: 0.1778 - val_loss: 2.5926
Epoch 4/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 385ms/step - accuracy: 0.2093 - loss: 2.4788 - val_accuracy: 0.1333 - val_loss: 2.5707
Epoch 5/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m18s[0m 360ms/step - accuracy: 0.2398 - loss: 2.3431 - val_accuracy: 0.1833 - val_loss: 2.5165
Epoch 6/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 387ms/step - accuracy: 0.2892 - loss: 2.1952 - val_accuracy: 0.2111 - val_loss: 2.4548
Epoch 7/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 380ms/step - accuracy: 0.3265 - loss: 2.0938 - val_accuracy: 0.2278 - val_loss: 2.3699
Epoch 8/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m18s[0m 363ms/step - accuracy: 0.3713 - loss: 1.9857 - val_accuracy: 0.2056 - val_loss: 2.3612
Epoch 9/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 373ms/step - accuracy: 0.3688 - loss: 1.9512 - val_accuracy: 0.2722 - val_loss: 2.4096
Epoch 10/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 370ms/step - accuracy: 0.4240 - loss: 1.8404 - val_accuracy: 0.2889 - val_loss: 2.4087
Epoch 11/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m18s[0m 361ms/step - accuracy: 0.4424 - loss: 1.7890 - val_accuracy: 0.2389 - val_loss: 2.3628
Epoch 12/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 366ms/step - accuracy: 0.4559 - loss: 1.7215 - val_accuracy: 0.2944 - val_loss: 2.4093
Epoch 13/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 372ms/step - accuracy: 0.5207 - loss: 1.5909 - val_accuracy: 0.2778 - val_loss: 2.4230
Epoch 14/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 380ms/step - accuracy: 0.5144 - loss: 1.5185 - val_accuracy: 0.3000 - val_loss: 2.4088
Epoch 15/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 379ms/step - accuracy: 0.5400 - loss: 1.4638 - val_accuracy: 0.2833 - val_loss: 2.4511
Epoch 16/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 368ms/step - accuracy: 0.5685 - loss: 1.3780 - val_accuracy: 0.2889 - val_loss: 2.4629
Epoch 17/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 364ms/step - accuracy: 0.5700 - loss: 1.3317 - val_accuracy: 0.2833 - val_loss: 2.4272
Epoch 18/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 375ms/step - accuracy: 0.6292 - loss: 1.1891 - val_accuracy: 0.3444 - val_loss: 2.4488
Epoch 19/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 367ms/step - accuracy: 0.6551 - loss: 1.1108 - val_accuracy: 0.3000 - val_loss: 2.5322
Epoch 20/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 377ms/step - accuracy: 0.6718 - loss: 1.0616 - val_accuracy: 0.3222 - val_loss: 2.4995
Epoch 21/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 398ms/step - accuracy: 0.6965 - loss: 0.9727 - val_accuracy: 0.3278 - val_loss: 2.6823
Epoch 22/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 373ms/step - accuracy: 0.7338 - loss: 0.9325 - val_accuracy: 0.2889 - val_loss: 2.7349
Epoch 23/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 366ms/step - accuracy: 0.7413 - loss: 0.8511 - val_accuracy: 0.2833 - val_loss: 2.7395
Epoch 24/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 366ms/step - accuracy: 0.7651 - loss: 0.7753 - val_accuracy: 0.3278 - val_loss: 2.7438
Epoch 25/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 367ms/step - accuracy: 0.7708 - loss: 0.7456 - val_accuracy: 0.2778 - val_loss: 2.9000
Epoch 26/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m18s[0m 360ms/step - accuracy: 0.7940 - loss: 0.6489 - val_accuracy: 0.3222 - val_loss: 2.7897
Epoch 27/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m23s[0m 445ms/step - accuracy: 0.8191 - loss: 0.5812 - val_accuracy: 0.2833 - val_loss: 2.8317
Epoch 28/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m21s[0m 419ms/step - accuracy: 0.8143 - loss: 0.5806 - val_accuracy: 0.3389 - val_loss: 2.8969
Epoch 29/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m21s[0m 410ms/step - accuracy: 0.8359 - loss: 0.5226 - val_accuracy: 0.3278 - val_loss: 2.9856
Epoch 30/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m22s[0m 428ms/step - accuracy: 0.8339 - loss: 0.5384 - val_accuracy: 0.3111 - val_loss: 3.1002
Epoch 31/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 383ms/step - accuracy: 0.8685 - loss: 0.4484 - val_accuracy: 0.3111 - val_loss: 3.1016
Epoch 32/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 374ms/step - accuracy: 0.8747 - loss: 0.4216 - val_accuracy: 0.2944 - val_loss: 3.1678
Epoch 33/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 370ms/step - accuracy: 0.8856 - loss: 0.3835 - val_accuracy: 0.3333 - val_loss: 3.1715
Epoch 34/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 371ms/step - accuracy: 0.8748 - loss: 0.4013 - val_accuracy: 0.3000 - val_loss: 3.2885
Epoch 35/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 369ms/step - accuracy: 0.8952 - loss: 0.3723 - val_accuracy: 0.3000 - val_loss: 3.4370
Epoch 36/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 374ms/step - accuracy: 0.8971 - loss: 0.3334 - val_accuracy: 0.3278 - val_loss: 3.4450
Epoch 37/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 389ms/step - accuracy: 0.9179 - loss: 0.2977 - val_accuracy: 0.3500 - val_loss: 3.3395
Epoch 38/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 368ms/step - accuracy: 0.9114 - loss: 0.2968 - val_accuracy: 0.3222 - val_loss: 3.5281
Epoch 39/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 372ms/step - accuracy: 0.9251 - loss: 0.2621 - val_accuracy: 0.3167 - val_loss: 3.6133
Epoch 40/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 365ms/step - accuracy: 0.9231 - loss: 0.2764 - val_accuracy: 0.3222 - val_loss: 3.5546
Epoch 41/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 365ms/step - accuracy: 0.9209 - loss: 0.2641 - val_accuracy: 0.3000 - val_loss: 3.6872
Epoch 42/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 370ms/step - accuracy: 0.9271 - loss: 0.2513 - val_accuracy: 0.3111 - val_loss: 3.7209
Epoch 43/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 365ms/step - accuracy: 0.9533 - loss: 0.1985 - val_accuracy: 0.3278 - val_loss: 3.6575
Epoch 44/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 366ms/step - accuracy: 0.9436 - loss: 0.1984 - val_accuracy: 0.3111 - val_loss: 3.7081
Epoch 45/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 366ms/step - accuracy: 0.9534 - loss: 0.2002 - val_accuracy: 0.3278 - val_loss: 3.7684
Epoch 46/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 372ms/step - accuracy: 0.9434 - loss: 0.1896 - val_accuracy: 0.3222 - val_loss: 3.7947
Epoch 47/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 366ms/step - accuracy: 0.9374 - loss: 0.1759 - val_accuracy: 0.3389 - val_loss: 3.7310
Epoch 48/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 369ms/step - accuracy: 0.9561 - loss: 0.1816 - val_accuracy: 0.3389 - val_loss: 3.8626
Epoch 49/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 368ms/step - accuracy: 0.9643 - loss: 0.1660 - val_accuracy: 0.3333 - val_loss: 3.9613
Epoch 50/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 364ms/step - accuracy: 0.9519 - loss: 0.1636 - val_accuracy: 0.3111 - val_loss: 3.9040
Epoch 51/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 369ms/step - accuracy: 0.9653 - loss: 0.1408 - val_accuracy: 0.3278 - val_loss: 3.9250
Epoch 52/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 378ms/step - accuracy: 0.9608 - loss: 0.1406 - val_accuracy: 0.3222 - val_loss: 4.0969
Epoch 53/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 373ms/step - accuracy: 0.9556 - loss: 0.1536 - val_accuracy: 0.3278 - val_loss: 4.0601
Epoch 54/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 389ms/step - accuracy: 0.9668 - loss: 0.1324 - val_accuracy: 0.3444 - val_loss: 3.9655
Epoch 55/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 374ms/step - accuracy: 0.9580 - loss: 0.1538 - val_accuracy: 0.3444 - val_loss: 4.2095
Epoch 56/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 396ms/step - accuracy: 0.9658 - loss: 0.1314 - val_accuracy: 0.3611 - val_loss: 4.1347
Epoch 57/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m21s[0m 402ms/step - accuracy: 0.9649 - loss: 0.1507 - val_accuracy: 0.3611 - val_loss: 4.1817
Epoch 58/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 381ms/step - accuracy: 0.9725 - loss: 0.1140 - val_accuracy: 0.3333 - val_loss: 4.1600
Epoch 59/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 372ms/step - accuracy: 0.9590 - loss: 0.1343 - val_accuracy: 0.3278 - val_loss: 4.2230
Epoch 60/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 376ms/step - accuracy: 0.9769 - loss: 0.0986 - val_accuracy: 0.3389 - val_loss: 4.1082
Epoch 61/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 381ms/step - accuracy: 0.9700 - loss: 0.1157 - val_accuracy: 0.3056 - val_loss: 4.2214
Epoch 62/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 381ms/step - accuracy: 0.9769 - loss: 0.1101 - val_accuracy: 0.3333 - val_loss: 4.2070
Epoch 63/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 378ms/step - accuracy: 0.9728 - loss: 0.1079 - val_accuracy: 0.3278 - val_loss: 4.2743
Epoch 64/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 378ms/step - accuracy: 0.9679 - loss: 0.1054 - val_accuracy: 0.3389 - val_loss: 4.3470
Epoch 65/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 381ms/step - accuracy: 0.9748 - loss: 0.1091 - val_accuracy: 0.3278 - val_loss: 4.3067
Epoch 66/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 381ms/step - accuracy: 0.9804 - loss: 0.0890 - val_accuracy: 0.3278 - val_loss: 4.3438
Epoch 67/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 383ms/step - accuracy: 0.9739 - loss: 0.1109 - val_accuracy: 0.3389 - val_loss: 4.4087
Epoch 68/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 390ms/step - accuracy: 0.9814 - loss: 0.0843 - val_accuracy: 0.3278 - val_loss: 4.2631
Epoch 69/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 381ms/step - accuracy: 0.9815 - loss: 0.0858 - val_accuracy: 0.3167 - val_loss: 4.3813
Epoch 70/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 376ms/step - accuracy: 0.9711 - loss: 0.1060 - val_accuracy: 0.3222 - val_loss: 4.3157
Epoch 71/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 373ms/step - accuracy: 0.9712 - loss: 0.0919 - val_accuracy: 0.3278 - val_loss: 4.3920
Epoch 72/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 378ms/step - accuracy: 0.9768 - loss: 0.0913 - val_accuracy: 0.3278 - val_loss: 4.4655
Epoch 73/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 374ms/step - accuracy: 0.9757 - loss: 0.1045 - val_accuracy: 0.3278 - val_loss: 4.4178
Epoch 74/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 377ms/step - accuracy: 0.9723 - loss: 0.0952 - val_accuracy: 0.3167 - val_loss: 4.4283
Epoch 75/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 375ms/step - accuracy: 0.9770 - loss: 0.0881 - val_accuracy: 0.3222 - val_loss: 4.4729
Epoch 76/100
[1m51/51[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m19s[0m 368ms/step - accuracy: 0.9830 - loss: 0.0806 - val_accuracy: 0.3167 - val_loss: 4.4333
模型评估
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# 加载效果最好的模型权重
model.load_weights('best_model.keras')
from PIL import Image
import numpy as np
img = Image.open("d:/Users/yxy/Desktop/48-data/Jennifer Lawrence/003_963a3627.jpg") #这里选择你需要预测的图片
image = tf.image.resize(img, [img_height, img_width])
img_array = tf.expand_dims(image, 0)
predictions = model.predict(img_array) # 这里选用你已经训练好的模型
print("预测结果为:",class_names[np.argmax(predictions)])
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 101ms/step
预测结果为: Jennifer Lawrence
总结
1.损失函数:
本文使用的categorical_crossentropy用于多分类问题,当标签是 one-hot 编码格式时使用。
sparse_categorical_crossentropy也用于多分类问题,但标签为整数编码形式。比如,对于三个类标签 [0,1,2] 的任务,可直接使用整数编码而不是 one-hot。
2.本文使用了 ModelCheckpoint 和 EarlyStopping 回调来保存最佳模型并在验证集精度不再提升时提前终止训练。
ModelCheckpoint:在 val_accuracy 提升时保存模型,文件名为 best_model.keras。
EarlyStopping:若 val_accuracy 在20个epoch内不再提升,且变化小于 min_delta=0.001,则停止训练。这避免了过拟合和不必要的计算。