AlexNet训练

该代码示例展示了如何在TensorFlow和Keras中实现AlexNet卷积神经网络。模型包括多层卷积、池化、批归一化和激活函数。数据预处理包括图片缩放和归一化。模型在训练集和测试集上进行训练,使用CategoricalCrossentropy作为损失函数,SGD优化器,并监测训练和测试的准确率。经过训练,测试集上的准确率为65.217%。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

from tensorflow.keras import layers, models, Model, Sequential
from tensorflow.keras.layers import Conv2D,BatchNormalization,Activation,MaxPool2D,Dense,Dropout,Flatten
class AlexNet(Model):
    def __init__(self):
        super(AlexNet, self).__init__()
        self.c1 = Conv2D(filters=96, kernel_size=(3,3))
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.p1 = MaxPool2D(pool_size=(3,3), strides=2)

        self.c2 = Conv2D(filters=256, kernel_size=(3,3))
        self.b2 = BatchNormalization()
        self.a2 = Activation("relu")
        self.p2 = MaxPool2D(pool_size=(3,3), strides=2)

        self.c3 = Conv2D(filters=384, kernel_size=(3,3), padding='same',
                         activation='relu')

        self.c4 = Conv2D(filters=384, kernel_size=(3,3), padding='same',
                         activation='relu')
        self.c5 = Conv2D(filters=256, kernel_size=(3,3), padding='same',
                         activation='relu')
        self.p3 = MaxPool2D(pool_size=(3,3), strides=2)


        self.flatten = Flatten()
        self.f1 = Dense(2048, activation='relu')
        self.d1 = Dropout(0.5)
        self.f2 = Dense(2048,activation='relu')
        self.d2 = Dropout(0.5)
        self.f3 = Dense(10,activation='softmax')
    def call(self,x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p2(x)

        x = self.c3(x)

        x = self.c4(x)

        x = self.c5(x)
        x = self.p3(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d1(x)
        x = self.f2(x)
        x = self.d2(x)
        y = self.f3(x)
        return y

import tensorflow as tf
import numpy as np
import os
from random import shuffle
import cv2 as cv
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses

name_dict = {"BF":0,"BK":1,"BL":2,"BR":3,"CF":4,"CL":5,"CV":6,"CXK":7,"S":8,"XF":9}

data_root_path = "color_part_data_processing/"
test_file_path =  "TXT_doc/test.txt"  #测试集数据集文件
trainer_file_path = "TXT_doc/trainer.txt"  #训练集数据集文件

name_data_list = {}  #记录每类图片有多少训练图片、测试图片

trainer_list = []
test_list = []


def generateds(train_list):
    x, y_ = [], []  # x图片数据,y_为标签
    with open(train_list,'r') as f:
        #读取所有行
        lines = [line.strip()for line in f] #对数据进行掐头去尾放入列表
        for line in lines:
            img_path, lab = line.strip().split("\t")
            img = cv.imread(img_path) #读入图片
            img = cv.resize(img,(224,224)) ####对图片进行放缩**********************************
            # img = np.array(img.convert('L')) #将图片变为8位宽灰度值的np.array格式
            img = img / 255 #数据归一化(实现预处理)
            x.append(img) #归一化后的数据,贴到列表x
            y_.append(lab)

    x = np.array(x)
    y_ = np.array(y_)
    y_ = y_.astype(np.int64)
    return x, y_

x_train , y_train = generateds(trainer_file_path)
x_test, y_test = generateds(test_file_path)
x_train = tf.convert_to_tensor(x_train,dtype=tf.float32)
y_train = tf.convert_to_tensor(y_train,dtype=tf.int32)
x_test = tf.convert_to_tensor(x_test,dtype=tf.float32)
y_test = tf.convert_to_tensor(y_test,dtype=tf.int32)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train)) #构建数据集对象
train_dataset = train_dataset.batch(1)  #设置批量训练的batch为32,要将训练集重复训练10遍
test_dataset = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_dataset = test_dataset.batch(1)


loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)  # 定义损失函数(这种方式需要one-hot编码)
optimizer = optimizers.SGD(lr=0.01)  # 声明采用批量随机梯度下降方法,学习率=0.01
acc_meter = metrics.Accuracy()  # 新建accuracy测量器

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')  # 定义平均准确率

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

best_test_loss = float('inf')
best_test_accuracy = float(0)

AlexNet = AlexNet()
epochs= 1000
for epoch in range(1, epochs+1):
    train_loss.reset_states()        # 训练损失值清零
    train_accuracy.reset_states()    # clear history info
    test_loss.reset_states()         # clear history info
    test_accuracy.reset_states()

    for step, (images, labels) in enumerate(train_dataset):
        with tf.GradientTape() as tape:
            # print(images.shape)
            output = AlexNet(images)
            labels = tf.one_hot(labels, depth=10)  # one-hot编码
            loss = loss_object(labels, output)
            gradients = tape.gradient(loss, AlexNet.trainable_variables)
            optimizer.apply_gradients(zip(gradients,AlexNet.trainable_variables))
            train_loss(loss)
            train_accuracy(labels, output)

    for step,(images,labels) in enumerate(test_dataset):
        output1 = AlexNet(images)
        labels = tf.one_hot(labels, depth=10)  # one-hot编码
        t_loss = loss_object(labels, output1)
        test_loss(t_loss)
        test_accuracy(labels, output1)
    template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
    print(template.format(epoch,
                          train_loss.result(),
                          train_accuracy.result() * 100,
                          test_loss.result(),
                          test_accuracy.result() * 100))
    if test_loss.result() < best_test_loss:
        best_test_loss = test_loss.result()
        print('best_test_loss:', best_test_loss)
    if test_accuracy.result() > best_test_accuracy:
        best_test_accuracy = test_accuracy.result()
        print('best_test_accuracy:', best_test_accuracy * 100)
        print('******************************best_test_accuracy*****************************', best_test_accuracy * 100)
print('******************************best_test_accuracy*****************************', best_test_accuracy * 100)

测试集准确率为65.21739196777344%

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值