VGGNet网络结构及实现--对自己数据集进行识别

本文介绍了一种使用TensorFlow实现VGG16卷积神经网络的方法,并详细展示了如何通过该模型进行图像分类任务。包括数据准备、模型构建、训练过程及评估结果。
部署运行你感兴趣的模型镜像

VGG16网络结构如下所示:

 实战代码

 

import pathlib
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models, Model, Sequential
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Dense, Flatten, MaxPool2D, Dropout
import numpy as np
import matplotlib.pylab as plt
import pathlib
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models, Model, Sequential
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Dense, Flatten, Dropout, MaxPooling2D
import numpy as np
import os
import matplotlib.pylab as plt
import PySide2
dirname = os.path.dirname(PySide2.__file__)
plugin_path = os.path.join(dirname, 'plugins', 'platforms')
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = plugin_path

# 准备数据
data_dir = "F:\danzi\数据"
data_dir = pathlib.Path(data_dir)  # 读出的是c1-c10文件夹
# image_count = len(list(data_dir.glob("*/*")))  # 读出的是图像的个数
#
# print("图片的综述为:", image_count)

# 参数设定
batch_size = 8
image_height = 64
image_wijdth = 64
epoch = 50

# 划分数据集

train_dst = keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=None,  # 0和1之间的可选浮点数,可保留一部分数据用于验证
    seed=123,  # 用于shuffle和转换的可选随机种子
    image_size=(image_height, image_wijdth),
    batch_size=batch_size
)

test_dst = keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=None,
    seed=123,
    image_size=(image_height, image_wijdth),
    batch_size=batch_size
)

# 获取训练集的标签
triain_dst_label = train_dst.class_names
print(triain_dst_label)

AUTOTUME = tf.data.experimental.AUTOTUNE  # 根据可用的CPU动态设置并行调用的数量。
train_dst = train_dst.cache().shuffle(1000).prefetch(buffer_size=AUTOTUME)
test_dst = test_dst.cache().shuffle(1000).prefetch(buffer_size=AUTOTUME)


# 模型的定义
class VGG16(Model):
    def __init__(self):
        super(VGG16, self).__init__()
        # SAME表示0填充,VALID表示不填充
        """
        2个包含64个卷积核的卷积层
        """
        self.c1 = Conv2D(filters=64, kernel_size=(3, 3), padding="SAME")
        # 批标准化防止梯度消失,这样让梯度变大,避免梯度消失问题产生,而且梯度变大意味着学习收敛速度快,能大大加快训练速度。
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        # 64个卷积核
        self.c2 = Conv2D(filters=64, kernel_size=(3, 3), padding="SAME")
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2)

        self.d1 = Dropout(0.2)

        """
        2个包含128个卷积核的卷积层
        """
        self.c3 = Conv2D(filters=128, kernel_size=(3, 3), padding="SAME")
        self.b3 = BatchNormalization()
        self.a3 = Activation('relu')

        self.c4 = Conv2D(filters=128, kernel_size=(3, 3), padding="SAME")
        self.b4 = BatchNormalization()
        self.a4 = Activation('relu')
        self.p2 = MaxPool2D(pool_size=(2, 2), strides=2)
        self.d2 = Dropout(0.2)

        """
        3个包含256个卷积核的卷积层
        """
        self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding="SAME")
        self.b5 = BatchNormalization()
        self.a5 = Activation('relu')

        self.c6 = Conv2D(filters=256, kernel_size=(3, 3), padding="SAME")
        self.b6 = BatchNormalization()
        self.a6 = Activation('relu')

        self.c7 = Conv2D(filters=256, kernel_size=(3, 3), padding="SAME")
        self.b7 = BatchNormalization()
        self.a7 = Activation('relu')
        self.p3 = MaxPool2D(pool_size=(2, 2), strides=2)
        self.d3 = Dropout(0.2)

        """
        6个包含512个卷积核的卷积层
        """
        self.c8 = Conv2D(filters=512, kernel_size=(3, 3), padding="SAME")
        self.b8 = BatchNormalization()
        self.a8 = Activation('relu')

        self.c9 = Conv2D(filters=512, kernel_size=(3, 3), padding="SAME")
        self.b9 = BatchNormalization()
        self.a9 = Activation('relu')

        self.c10 = Conv2D(filters=512, kernel_size=(3, 3), padding="SAME")
        self.b10 = BatchNormalization()
        self.a10 = Activation('relu')
        self.p4 = MaxPool2D(pool_size=(2, 2), strides=2)
        self.d4 = Dropout(0.2)

        self.c11 = Conv2D(filters=512, kernel_size=(3, 3), padding="SAME")
        self.b11 = BatchNormalization()
        self.a11 = Activation('relu')

        self.c12 = Conv2D(filters=512, kernel_size=(3, 3), padding="SAME")
        self.b12 = BatchNormalization()
        self.a12 = Activation('relu')

        self.c13 = Conv2D(filters=512, kernel_size=(3, 3), padding="SAME")
        self.b13 = BatchNormalization()
        self.a13 = Activation('relu')
        self.p5 = MaxPool2D(pool_size=(2, 2), strides=2)
        self.d5 = Dropout(0.2)

        """
        2个包含4096个神经元的全连接层
        """
        # Flatten层用来将输入“压平”,即把多维的输入一维化,常用在从卷积层到全连接层的过渡。Flatten不影响batch的大小。
        self.flatten = Flatten()
        # FC层在keras中叫做Dense层,在pytorch中叫Linear层
        self.f1 = Dense(4096, activation='relu')
        self.d6 = Dropout(0.2)
        self.f2 = Dense(4096, activation='relu')
        self.d7 = Dropout(0.2)

        """
        1个包含11个神经元的全连接层
        """
        self.f3 = Dense(11, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)

        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p1(x)
        x = self.d1(x)

        x = self.c3(x)
        x = self.b3(x)
        x = self.a3(x)

        x = self.c4(x)
        x = self.b4(x)
        x = self.a4(x)
        x = self.p2(x)
        x = self.d2(x)

        x = self.c5(x)
        x = self.b5(x)
        x = self.a5(x)

        x = self.c6(x)
        x = self.b6(x)
        x = self.a6(x)

        x = self.c7(x)
        x = self.b7(x)
        x = self.a7(x)
        x = self.p3(x)
        x = self.d3(x)

        x = self.c8(x)
        x = self.b8(x)
        x = self.a8(x)

        x = self.c9(x)
        x = self.b9(x)
        x = self.a9(x)

        x = self.c10(x)
        x = self.b10(x)
        x = self.a10(x)
        x = self.p4(x)
        x = self.d4(x)

        x = self.c11(x)
        x = self.b11(x)
        x = self.a11(x)

        x = self.c12(x)
        x = self.b12(x)
        x = self.a12(x)

        x = self.c13(x)
        x = self.b13(x)
        x = self.a13(x)
        x = self.p5(x)
        x = self.d5(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d6(x)

        x = self.f2(x)
        x = self.d7(x)

        y = self.f3(x)

        return y


model = VGG16()
opt = tf.keras.optimizers.Adam(learning_rate=1e-4)

model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])

history = model.fit(train_dst, validation_data=test_dst, epochs=epoch)
model.save_weights('VGG16_model.h5')
model.summary()

# 查看可训练的变量
print(model.trainable_variables)

file = open('./weight.txt', 'w')
for v in model.trainable_variables:
    file.write(str(v.name) + '\n')
    file.write(str(v.shape) + '\n')
    file.write(str(v.numpy()) + '\n')
file.close()

#############   show    ##################
# 显示训练集和测试集的ACC和Loss
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(121)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title("VGG16 bs=6 Accuracy")
plt.legend()

plt.subplot(122)
plt.plot(loss, label='Train Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title("VGG16 bs=6  Loss")
plt.legend()
plt.show()

 

 

您可能感兴趣的与本文相关的镜像

TensorFlow-v2.15

TensorFlow-v2.15

TensorFlow

TensorFlow 是由Google Brain 团队开发的开源机器学习框架,广泛应用于深度学习研究和生产环境。 它提供了一个灵活的平台,用于构建和训练各种机器学习模型

为了查找与测绘遥感相关的SCI期刊列表,可以通过学术搜索引擎或访问特定的数据库来获得最新的信息。通常这些资源会定期更新以反映最新收录情况。 些常用的搜索方式包括: 查阅Web of Science (WOS) 数据库 这是最直接的方法之,因为Science Citation Index(SCI)正是由该数据库维护。可以在其中设置关键词为"remote sensing", "surveying and mapping" 或者更具体的主题术语,并选择仅显示被SCI索引的文章和期刊。 利用Google Scholar 虽然不是专门针对SCI期刊,但可以找到很多高影响力的测绘遥感类文章及其发表刊物的信息。从这里也可以了解到哪些是活跃且受认可的研究领域内的出版物。 参考Journal Citation Reports (JCR) 这是个评估科学和技术期刊影响力的重要工具。通过查看影响因子和其他指标,可以帮助确定哪些测绘遥感领域的期刊最具权威性并且属于SCI范畴。 咨询图书馆员或专业人士 大学或研究机构的专业人员能够提供指导和支持,帮助定位最适合需求的具体期刊名称及详情。 订阅行业通讯和服务 某些服务如Elsevier's Scopus也会报告关于各个学科顶级期刊的消息,保持关注可以获得及时的通知。 以下是几个知名的测绘遥感相关SCI期刊的例子: - Remote Sensing of Environment - IEEE Transactions on Geoscience and Remote Sensing - ISPRS Journal of Photogrammetry and Remote Sensing - International Journal of Applied Earth Observation and Geoinformation 请注意,实际的SCI期刊名单可能会随着时间而变化,因此建议总是使用最新的在线资源来进行确认。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI炮灰

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值