第T10周数据增强

🚀我的环境:

  • 语言环境:python 3.12.6
  • 编译器:jupyter lab
  • 深度学习环境:TensorFlow 2.17.0
import tensorflow as tf
data_dir="C:/Users/PC/Desktop/34-data"
img_height=224
img_width=224
batch_size=32

train_ds=tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.3,
    subset="training",
    seed=12,
    image_size=(img_height,img_width),
    batch_size=batch_size)
Found 600 files belonging to 2 classes.
Using 420 files for training.
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.3,
    subset="validation",
    seed=12,
    image_size=(img_height, img_width),
    batch_size=batch_size)
Found 600 files belonging to 2 classes.
Using 180 files for validation.
#由于原始数据集不包含测试集,因此需要创建一个。
#使用 tf.data.experimental.cardinality 确定验证集中有多少批次的数据,然后将其中的 20% 移至测试集。
val_batches=tf.data.experimental.cardinality(val_ds)
test_ds=val_ds.take(val_batches//5)
val_ds=val_ds.skip(val_batches//5)
print('number of validation batches:%d'%tf.data.experimental.cardinality(val_ds))
print('number of test batches:%d'%tf.data.experimental.cardinality(test_ds))
number of validation batches:5
number of test batches:1
class_names = train_ds.class_names
print(class_names)
['cat', 'dog']
AUTOTUNE = tf.data.AUTOTUNE

def preprocess_image(image,label):
    return (image/255.0,label)

# 归一化处理
train_ds = train_ds.map(preprocess_image, num_parallel_calls=AUTOTUNE)
val_ds   = val_ds.map(preprocess_image, num_parallel_calls=AUTOTUNE)
test_ds  = test_ds.map(preprocess_image, num_parallel_calls=AUTOTUNE)

train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds   = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 10))  # 图形的宽为15高为10

for images, labels in train_ds.take(1):
    for i in range(8):
        
        ax = plt.subplot(5, 8, i + 1) 
        plt.imshow(images[i])
        plt.title(class_names[labels[i]])
        
        plt.axis("off")

在这里插入图片描述

数据增强

data_augmentation=tf.keras.Sequential([
    tf.keras.layers.RandomFlip("horizontal_and_vertical"),
    tf.keras.layers.RandomRotation(0.2),#按照0.2的弧度值进行随机旋转
])

image=tf.expand_dims(images[i],0)
plt.figure(figsize=(8,8))
for i in range(9):
    augmented_image=data_augmentation(image)
    ax=plt.subplot(3,3,i+1)
    plt.imshow(augmented_image[0])
    plt.axis("off")

在这里插入图片描述

增强方式一:将其嵌入model中

import tensorflow as tf  
from tensorflow.keras import layers
model=tf.keras.Sequential([
    data_augmentation,
    layers.Conv2D(16,3,padding='same',activation='relu'),
    layers.MaxPooling2D(),
])

方法二:在dataset数据集中进行数据增强

batch_size = 32
AUTOTUNE = tf.data.AUTOTUNE

def prepare(ds):
    ds = ds.map(lambda x, y: (data_augmentation(x, training=True), y), num_parallel_calls=AUTOTUNE)
    return ds

train_ds=prepare(train_ds)

训练模型

model = tf.keras.Sequential([
  layers.Conv2D(16, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(32, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(64, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Flatten(),
  layers.Dense(128, activation='relu'),
  layers.Dense(len(class_names))
])
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
epochs=20
history = model.fit(
  train_ds,
  validation_data=val_ds,
  epochs=epochs
)
Epoch 1/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m13s[0m 552ms/step - accuracy: 0.4658 - loss: 2.0357 - val_accuracy: 0.5743 - val_loss: 0.6860
Epoch 2/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 460ms/step - accuracy: 0.5377 - loss: 0.6911 - val_accuracy: 0.6486 - val_loss: 0.6591
Epoch 3/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 479ms/step - accuracy: 0.5047 - loss: 0.7048 - val_accuracy: 0.4189 - val_loss: 0.7036
Epoch 4/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 445ms/step - accuracy: 0.6013 - loss: 0.6658 - val_accuracy: 0.7500 - val_loss: 0.5791
Epoch 5/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 471ms/step - accuracy: 0.7784 - loss: 0.5445 - val_accuracy: 0.7500 - val_loss: 0.4860
Epoch 6/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 465ms/step - accuracy: 0.7894 - loss: 0.4906 - val_accuracy: 0.7568 - val_loss: 0.4605
Epoch 7/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m8s[0m 500ms/step - accuracy: 0.7857 - loss: 0.4711 - val_accuracy: 0.7095 - val_loss: 0.5757
Epoch 8/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 473ms/step - accuracy: 0.7611 - loss: 0.4665 - val_accuracy: 0.7568 - val_loss: 0.4784
Epoch 9/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 451ms/step - accuracy: 0.8123 - loss: 0.4278 - val_accuracy: 0.8446 - val_loss: 0.3827
Epoch 10/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 463ms/step - accuracy: 0.8111 - loss: 0.3671 - val_accuracy: 0.8649 - val_loss: 0.3651
Epoch 11/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m8s[0m 499ms/step - accuracy: 0.8447 - loss: 0.3547 - val_accuracy: 0.8716 - val_loss: 0.3382
Epoch 12/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 483ms/step - accuracy: 0.9059 - loss: 0.2697 - val_accuracy: 0.8919 - val_loss: 0.3045
Epoch 13/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m8s[0m 489ms/step - accuracy: 0.8941 - loss: 0.2647 - val_accuracy: 0.8986 - val_loss: 0.2810
Epoch 14/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m8s[0m 513ms/step - accuracy: 0.9245 - loss: 0.2233 - val_accuracy: 0.8851 - val_loss: 0.2944
Epoch 15/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 460ms/step - accuracy: 0.8831 - loss: 0.2767 - val_accuracy: 0.7905 - val_loss: 0.5426
Epoch 16/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 467ms/step - accuracy: 0.8961 - loss: 0.2494 - val_accuracy: 0.8514 - val_loss: 0.3678
Epoch 17/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 466ms/step - accuracy: 0.9080 - loss: 0.1834 - val_accuracy: 0.8446 - val_loss: 0.3440
Epoch 18/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 460ms/step - accuracy: 0.9278 - loss: 0.2186 - val_accuracy: 0.8514 - val_loss: 0.3474
Epoch 19/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 466ms/step - accuracy: 0.9366 - loss: 0.1672 - val_accuracy: 0.8108 - val_loss: 0.6559
Epoch 20/20
[1m14/14[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m7s[0m 459ms/step - accuracy: 0.9481 - loss: 0.1700 - val_accuracy: 0.9054 - val_loss: 0.2914
loss, acc = model.evaluate(test_ds)
print("Accuracy", acc)
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 251ms/step - accuracy: 0.9062 - loss: 0.3152
Accuracy 0.90625

自定义增强函数

import random
# 这是大家可以自由发挥的一个地方
def aug_img(image):
    seed = (random.randint(0,9), 0)
    # 随机改变图像对比度
    stateless_random_brightness = tf.image.stateless_random_contrast(image, lower=0.1, upper=1.0, seed=seed)
    return stateless_random_brightness
image = tf.expand_dims(images[3]*255, 0)
print("Min and max pixel values:", image.numpy().min(), image.numpy().max())
Min and max pixel values: 14.000048 253.28577
plt.figure(figsize=(8, 8))
for i in range(9):
    augmented_image = aug_img(image)
    ax = plt.subplot(3, 3, i + 1)
    plt.imshow(augmented_image[0].numpy().astype("uint8"))

    plt.axis("off")

在这里插入图片描述
总结
数据增强是机器学习中用来扩充训练数据集的技术,目的是通过创建数据的变体来模拟更多的训练样本,从而提高模型的泛化能力和减少过拟合。
本周学习了两种数据增强方法,一是将数据增强模块嵌入model中,二是在Dataset数据集中进行数据增强。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值