第P10周:Pytorch实现车牌识别


FROM


我的环境

  • 语言环境:Python 3.11.9
  • 开发工具:Jupyter Lab
  • 深度学习环境:
    • torch==2.3.1+cu121
    • torchvision==0.18.1+cu121

一、导入数据

from torchvision.transforms import transforms
from torch.utils.data       import DataLoader
from torchvision            import datasets
import torchvision.models   as models
import torch.nn.functional  as F
import torch.nn             as nn
import torch,torchvision

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device

输出:
在这里插入图片描述

1. 获取类别名

import os,PIL,random,pathlib
import matplotlib.pyplot as plt
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

data_dir = './015_licence_plate/'
data_dir = pathlib.Path(data_dir)

data_paths  = list(data_dir.glob('*'))
classeNames = [str(path).split("/")[1].split("_")[1].split(".")[0] for path in data_paths]
print(classeNames)

输出:
在这里插入图片描述

data_paths     = list(data_dir.glob('*'))
data_paths_str = [str(path) for path in data_paths]
data_paths_str

输出:
在这里插入图片描述

2.数据可视化

plt.figure(figsize=(14,5))
plt.suptitle("数据示例(K同学啊)",fontsize=15)

for i in range(18):
    plt.subplot(3,6,i+1)
    # plt.xticks([])
    # plt.yticks([])
    # plt.grid(False)
    
    # 显示图片
    images = plt.imread(data_paths_str[i])
    plt.imshow(images)

plt.show()

输出:
在这里插入图片描述

3. 标签数字化

import numpy as np

char_enum = ["京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖","闽","赣","鲁",\
              "豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","军","使"]

number   = [str(i) for i in range(0, 10)]    # 0 到 9 的数字
alphabet = [chr(i) for i in range(65, 91)]   # A 到 Z 的字母

char_set       = char_enum + number + alphabet
char_set_len   = len(char_set)
label_name_len = len(classeNames[0])

# 将字符串数字化
def text2vec(text):
    vector = np.zeros([label_name_len, char_set_len])
    for i, c in enumerate(text):
        idx = char_set.index(c)
        vector[i][idx] = 1.0
    return vector

all_labels = [text2vec(i) for i in classeNames]

4. 加载数据文件

import os
import pandas as pd
from torchvision.io import read_image
from torch.utils.data import Dataset
import torch.utils.data as data
from PIL import Image

class MyDataset(data.Dataset):
    def __init__(self, all_labels, data_paths_str, transform):
        self.img_labels = all_labels      # 获取标签信息
        self.img_dir    = data_paths_str  # 图像目录路径
        self.transform  = transform       # 目标转换函数

    def __len__(self):
        return len(self.img_labels)

    def __getitem__(self, index):
        image    = Image.open(self.img_dir[index]).convert('RGB')#plt.imread(self.img_dir[index])  # 使用 torchvision.io.read_image 读取图像
        label    = self.img_labels[index]  # 获取图像对应的标签
        
        if self.transform:
            image = self.transform(image)
            
        return image, label  # 返回图像和标签
       
 total_datadir = './03_traffic_sign/'

# 关于transforms.Compose的更多介绍可以参考:https://blog.youkuaiyun.com/qq_38251616/article/details/124878863
train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406], 
        std =[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

total_data = MyDataset(all_labels, data_paths_str, train_transforms)
total_data

5.划分数据

train_size = int(0.8 * len(total_data))
test_size  = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
train_size,test_size

输出:
在这里插入图片描述

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=16,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=16,
                                          shuffle=True)

print("The number of images in a training set is: ", len(train_loader)*16)
print("The number of images in a test set is: ", len(test_loader)*16)
print("The number of batches per epoch is: ", len(train_loader))

输出:
在这里插入图片描述

for X, y in test_loader:
    print("Shape of X [N, C, H, W]: ", X.shape)
    print("Shape of y: ", y.shape, y.dtype)
    break

输出:
在这里插入图片描述

二、自建模型

class Network_bn(nn.Module):
    def __init__(self):
        super(Network_bn, self).__init__()
        """
        nn.Conv2d()函数:
        第一个参数(in_channels)是输入的channel数量
        第二个参数(out_channels)是输出的channel数量
        第三个参数(kernel_size)是卷积核大小
        第四个参数(stride)是步长,默认为1
        第五个参数(padding)是填充大小,默认为0
        """
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(12)
        self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5, stride=1, padding=0)
        self.bn2 = nn.BatchNorm2d(12)
        self.pool = nn.MaxPool2d(2,2)
        self.conv4 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=5, stride=1, padding=0)
        self.bn4 = nn.BatchNorm2d(24)
        self.conv5 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=5, stride=1, padding=0)
        self.bn5 = nn.BatchNorm2d(24)
        self.fc1 = nn.Linear(24*50*50, label_name_len*char_set_len)
        self.reshape = Reshape([label_name_len,char_set_len])

    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))      
        x = F.relu(self.bn2(self.conv2(x)))     
        x = self.pool(x)                        
        x = F.relu(self.bn4(self.conv4(x)))     
        x = F.relu(self.bn5(self.conv5(x)))  
        x = self.pool(x)                        
        x = x.view(-1, 24*50*50)
        x = self.fc1(x)
        
        # 最终reshape
        x = self.reshape(x)

        return x
    
# 定义Reshape层
class Reshape(nn.Module):
    def __init__(self, shape):
        super(Reshape, self).__init__()
        self.shape = shape

    def forward(self, x):
        return x.view(x.size(0), *self.shape)

device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))

model = Network_bn().to(device)
model

输出:
在这里插入图片描述

import torchsummary

''' 显示网络结构 '''
torchsummary.summary(model, (3, 224, 224))

输出:
在这里插入图片描述

三、模型训练

1. 优化器与损失函数

optimizer  = torch.optim.Adam(model.parameters(), 
                              lr=1e-4, 
                              weight_decay=0.0001)

loss_model = nn.CrossEntropyLoss()
from torch.autograd import Variable

def test(model, test_loader, loss_model):
    size = len(test_loader.dataset)
    num_batches = len(test_loader)
    
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in test_loader:
            X, y = X.to(device), y.to(device)
            pred = model(X)

            correct += (pred.argmax(2) == y.argmax(2)).type(torch.float).sum().item()
            test_loss += loss_model(pred, y).item()
            
    test_loss /= num_batches

    print(f"Avg loss: {test_loss:>8f} \n")
    return correct,test_loss

def train(model,train_loader,loss_model,optimizer):
    model=model.to(device)
    model.train()
    size = len(train_loader.dataset)  # 训练集的大小
    num_batches = len(train_loader)   # 批次数目, (size/batch_size,向上取整)
    train_loss, train_acc = 0, 0  # 初始化训练损失和正确率
    
    for i, (images, labels) in enumerate(train_loader, 0): #0是标起始位置的值。

        images = Variable(images.to(device))
        labels = Variable(labels.to(device))

        
        outputs = model(images)
        loss = loss_model(outputs, labels)
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % 1000 == 0:    
            print('[%5d] loss: %.3f' % (i, loss))
            
            # 记录acc与loss
    train_acc  += (outputs.argmax(2) == labels.argmax(2)).type(torch.float).sum().item()
    train_loss += loss.item()
            
    train_acc  /= size
    train_loss /= num_batches

    return train_acc, train_loss


test_acc_list  = []
test_loss_list = []
epochs = 30

2. 模型的训练

test_acc_list  = []
test_loss_list = []
epochs = 30

for t in range(epochs):
    print(f"Epoch {t+1}\n-------------------------------")
    train(model,train_loader,loss_model,optimizer)
    test_acc,test_loss = test(model, test_loader, loss_model)
    test_acc_list.append(test_acc)
    test_loss_list.append(test_loss)
print("Done!")

输出:

Epoch 1
-------------------------------
[    0] loss: 0.214
Avg loss: 0.087744 

Epoch 2
-------------------------------
[    0] loss: 0.066
Avg loss: 0.053029 

Epoch 3
-------------------------------
[    0] loss: 0.029
Avg loss: 0.048253 

Epoch 4
-------------------------------
[    0] loss: 0.023
Avg loss: 0.042447 

Epoch 5
-------------------------------
[    0] loss: 0.024
Avg loss: 0.042803 

Epoch 6
-------------------------------
[    0] loss: 0.015
Avg loss: 0.043501 

Epoch 7
-------------------------------
[    0] loss: 0.024
Avg loss: 0.042448 

Epoch 8
-------------------------------
[    0] loss: 0.020
Avg loss: 0.041267 

Epoch 9
-------------------------------
[    0] loss: 0.025
Avg loss: 0.039533 

Epoch 10
-------------------------------
[    0] loss: 0.018
Avg loss: 0.037440 

Epoch 11
-------------------------------
[    0] loss: 0.020
Avg loss: 0.038482 

Epoch 12
-------------------------------
[    0] loss: 0.021
Avg loss: 0.036985 

Epoch 13
-------------------------------
[    0] loss: 0.014
Avg loss: 0.035747 

Epoch 14
-------------------------------
[    0] loss: 0.014
Avg loss: 0.035735 

Epoch 15
-------------------------------
[    0] loss: 0.021
Avg loss: 0.033841 

Epoch 16
-------------------------------
[    0] loss: 0.015
Avg loss: 0.032838 

Epoch 17
-------------------------------
[    0] loss: 0.017
Avg loss: 0.032109 

Epoch 18
-------------------------------
[    0] loss: 0.021
Avg loss: 0.033465 

Epoch 19
-------------------------------
[    0] loss: 0.020
Avg loss: 0.032128 

Epoch 20
-------------------------------
[    0] loss: 0.013
Avg loss: 0.032026 

Epoch 21
-------------------------------
[    0] loss: 0.017
Avg loss: 0.031841 

Epoch 22
-------------------------------
[    0] loss: 0.018
Avg loss: 0.031387 

Epoch 23
-------------------------------
[    0] loss: 0.018
Avg loss: 0.032210 

Epoch 24
-------------------------------
[    0] loss: 0.024
Avg loss: 0.030751 

Epoch 25
-------------------------------
[    0] loss: 0.016
Avg loss: 0.029903 

Epoch 26
-------------------------------
[    0] loss: 0.013
Avg loss: 0.030018 

Epoch 27
-------------------------------
[    0] loss: 0.018
Avg loss: 0.030378 

Epoch 28
-------------------------------
[    0] loss: 0.014
Avg loss: 0.029110 

Epoch 29
-------------------------------
[    0] loss: 0.019
Avg loss: 0.029295 

Epoch 30
-------------------------------
[    0] loss: 0.020
Avg loss: 0.028399 

Done!

四、结果分析

import numpy as np
import matplotlib.pyplot as plt

x = [i for i in range(1,31)]

plt.plot(x, test_loss_list, label="Loss", alpha=0.8)

plt.xlabel("Epoch")
plt.ylabel("Loss")

plt.legend()    
plt.show()

输出:
在这里插入图片描述

五、总结

1. 数据集的划分

由于这次的数据集是没有进行分类的,所以需要我们进行一次手动处理数据,将车牌号的 7 位,加上 省份、字母、数字 的 69 位,组成一个二维tensor,把图像转换为向量来进行训练。

思考: 在三维的 tensor 中,我们使用的是其中的二维 tensor 来确定我们的号码牌是否预测正确,其中 7 代表 7 位车牌号, 69 代表省份 + 字母 + 数字的组合,在这个 tensor 里,1 代表第 n 位的值,所以 7 行中每一行只有一个值为 1 ,这样我们获取到的二维 tensor 需要完全一致,才能够确定预测正确。
准确率计算公式为准确标签数除以总标签数。由于 y 的形状是 [batch, 7, 69] ,每个样本包含 7 个独立标签,每个标签有 69 种可能,outputs形状也是 [batch, 7, 69] 。使用 outputs.argmax(dim=2) 得到形状为 [batch, 7] 的张量,表示每个样本每部分最可能的预测类别。

计算准确率时,train_correct += (preds == labels).type(torch.float).sum().item()这一步统计了模型预测正确的标签总数。preds == labels生成一个形状为 [batch, 7] 的二维布尔张量,其中 True 表示预测正确。通过 .sum() 得到整个批次中所有样本所有部分预测正确的总数。要计算整体准确率,需知道整个训练集中所有部分的总数,即size * 7。

One-hot编码 是一种常用的数值表示方法,特别是在处理分类数据时。在这种表示法中,每个类别被表示为一个二进制向量,除了表示该类别的一个元素是1之外,其余元素都是0。

特点

  • 互斥性:One-hot编码的向量是互斥的,即任何一个向量中只有一个位置是1,其余都是0。
  • 稀疏性:如果类别数量很多,One-hot编码的向量会非常稀疏,大部分元素都是0。
  • 无序性:One-hot编码不包含任何关于类别之间顺序的信息。

应用

One-hot编码在机器学习和自然语言处理中非常常见,尤其是在处理标签数据时。例如:

  • 机器学习:在分类问题中,目标变量通常是类别标签,使用One-hot编码可以方便地将这些标签转换为模型可以处理的数值形式。
  • 自然语言处理:在处理文本数据时,One-hot编码可以用来表示单词、字符或其他词汇元素。

示例

假设有一个简单的分类问题,目标变量有三个类别:{A, B, C}。One-hot编码可以表示为:

  • A: [1, 0, 0]
  • B: [0, 1, 0]
  • C: [0, 0, 1]

优点

  • 简单直观:One-hot编码直观且易于理解,实现起来也很简单。
  • 避免偏序:它避免了给类别赋予任何形式的数值顺序,这对于某些模型(如线性模型)来说是必要的。

2.补全代码后的运行结果

Epoch 1
-------------------------------
[    0] loss: 0.209
Avg loss: 0.102005,Avg acc: 0.166780 

Epoch: 1, Train_acc:9.8%, Train_loss:0.143, Test_acc:16.7%,Test_loss:0.102
Epoch 2
-------------------------------
[    0] loss: 0.083
Avg loss: 0.071170,Avg acc: 0.284722 

Epoch: 2, Train_acc:25.9%, Train_loss:0.069, Test_acc:28.5%,Test_loss:0.071
Epoch 3
-------------------------------
[    0] loss: 0.046
Avg loss: 0.053852,Avg acc: 0.363541 

Epoch: 3, Train_acc:40.2%, Train_loss:0.038, Test_acc:36.4%,Test_loss:0.054
Epoch 4
-------------------------------
[    0] loss: 0.028
Avg loss: 0.046592,Avg acc: 0.409976 

Epoch: 4, Train_acc:48.5%, Train_loss:0.024, Test_acc:41.0%,Test_loss:0.047
Epoch 5
-------------------------------
[    0] loss: 0.020
Avg loss: 0.045525,Avg acc: 0.435623 

Epoch: 5, Train_acc:53.3%, Train_loss:0.020, Test_acc:43.6%,Test_loss:0.046
Epoch 6
-------------------------------
[    0] loss: 0.018
Avg loss: 0.045588,Avg acc: 0.446122 

Epoch: 6, Train_acc:55.8%, Train_loss:0.019, Test_acc:44.6%,Test_loss:0.046
Epoch 7
-------------------------------
[    0] loss: 0.016
Avg loss: 0.045767,Avg acc: 0.451972 

Epoch: 7, Train_acc:58.0%, Train_loss:0.020, Test_acc:45.2%,Test_loss:0.046
Epoch 8
-------------------------------
[    0] loss: 0.018
Avg loss: 0.045151,Avg acc: 0.466754 

Epoch: 8, Train_acc:59.0%, Train_loss:0.019, Test_acc:46.7%,Test_loss:0.045
Epoch 9
-------------------------------
[    0] loss: 0.019
Avg loss: 0.042168,Avg acc: 0.470880 

Epoch: 9, Train_acc:60.1%, Train_loss:0.020, Test_acc:47.1%,Test_loss:0.042
Epoch 10
-------------------------------
[    0] loss: 0.015
Avg loss: 0.043899,Avg acc: 0.475947 

Epoch:10, Train_acc:61.0%, Train_loss:0.019, Test_acc:47.6%,Test_loss:0.044
Epoch 11
-------------------------------
[    0] loss: 0.018
Avg loss: 0.043849,Avg acc: 0.478976 

Epoch:11, Train_acc:62.0%, Train_loss:0.018, Test_acc:47.9%,Test_loss:0.044
Epoch 12
-------------------------------
[    0] loss: 0.015
Avg loss: 0.041141,Avg acc: 0.496527 

Epoch:12, Train_acc:63.1%, Train_loss:0.018, Test_acc:49.7%,Test_loss:0.041
Epoch 13
-------------------------------
[    0] loss: 0.016
Avg loss: 0.039633,Avg acc: 0.507443 

Epoch:13, Train_acc:63.7%, Train_loss:0.018, Test_acc:50.7%,Test_loss:0.040
Epoch 14
-------------------------------
[    0] loss: 0.015
Avg loss: 0.041965,Avg acc: 0.500810 

Epoch:14, Train_acc:64.7%, Train_loss:0.018, Test_acc:50.1%,Test_loss:0.042
Epoch 15
-------------------------------
[    0] loss: 0.017
Avg loss: 0.037552,Avg acc: 0.514025 

Epoch:15, Train_acc:65.7%, Train_loss:0.017, Test_acc:51.4%,Test_loss:0.038
Epoch 16
-------------------------------
[    0] loss: 0.017
Avg loss: 0.038561,Avg acc: 0.522173 

Epoch:16, Train_acc:66.2%, Train_loss:0.017, Test_acc:52.2%,Test_loss:0.039
Epoch 17
-------------------------------
[    0] loss: 0.015
Avg loss: 0.038025,Avg acc: 0.525359 

Epoch:17, Train_acc:67.6%, Train_loss:0.017, Test_acc:52.5%,Test_loss:0.038
Epoch 18
-------------------------------
[    0] loss: 0.012
Avg loss: 0.038425,Avg acc: 0.536380 

Epoch:18, Train_acc:68.0%, Train_loss:0.017, Test_acc:53.6%,Test_loss:0.038
Epoch 19
-------------------------------
[    0] loss: 0.014
Avg loss: 0.037057,Avg acc: 0.539358 

Epoch:19, Train_acc:68.5%, Train_loss:0.016, Test_acc:53.9%,Test_loss:0.037
Epoch 20
-------------------------------
[    0] loss: 0.013
Avg loss: 0.036633,Avg acc: 0.546200 

Epoch:20, Train_acc:69.0%, Train_loss:0.016, Test_acc:54.6%,Test_loss:0.037
Epoch 21
-------------------------------
[    0] loss: 0.014
Avg loss: 0.037100,Avg acc: 0.548080 

Epoch:21, Train_acc:70.1%, Train_loss:0.016, Test_acc:54.8%,Test_loss:0.037
Epoch 22
-------------------------------
[    0] loss: 0.015
Avg loss: 0.034921,Avg acc: 0.562183 

Epoch:22, Train_acc:70.1%, Train_loss:0.016, Test_acc:56.2%,Test_loss:0.035
Epoch 23
-------------------------------
[    0] loss: 0.012
Avg loss: 0.036810,Avg acc: 0.547924 

Epoch:23, Train_acc:71.2%, Train_loss:0.015, Test_acc:54.8%,Test_loss:0.037
Epoch 24
-------------------------------
[    0] loss: 0.014
Avg loss: 0.036908,Avg acc: 0.561609 

Epoch:24, Train_acc:70.6%, Train_loss:0.015, Test_acc:56.2%,Test_loss:0.037
Epoch 25
-------------------------------
[    0] loss: 0.016
Avg loss: 0.035637,Avg acc: 0.560773 

Epoch:25, Train_acc:71.6%, Train_loss:0.015, Test_acc:56.1%,Test_loss:0.036
Epoch 26
-------------------------------
[    0] loss: 0.016
Avg loss: 0.035210,Avg acc: 0.566049 

Epoch:26, Train_acc:72.1%, Train_loss:0.015, Test_acc:56.6%,Test_loss:0.035
Epoch 27
-------------------------------
[    0] loss: 0.014
Avg loss: 0.033531,Avg acc: 0.583547 

Epoch:27, Train_acc:72.6%, Train_loss:0.015, Test_acc:58.4%,Test_loss:0.034
Epoch 28
-------------------------------
[    0] loss: 0.017
Avg loss: 0.032652,Avg acc: 0.583024 

Epoch:28, Train_acc:72.5%, Train_loss:0.015, Test_acc:58.3%,Test_loss:0.033
Epoch 29
-------------------------------
[    0] loss: 0.014
Avg loss: 0.033740,Avg acc: 0.573413 

Epoch:29, Train_acc:72.9%, Train_loss:0.015, Test_acc:57.3%,Test_loss:0.034
Epoch 30
-------------------------------
[    0] loss: 0.013
Avg loss: 0.034106,Avg acc: 0.565944 

Epoch:30, Train_acc:73.3%, Train_loss:0.015, Test_acc:56.6%,Test_loss:0.034
Done!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值