知识点回顾:
- resnet结构解析
- CBAM放置位置的思考
- 针对预训练模型的训练策略
- 差异化学习率
- 三阶段微调
作业:
- 好好理解下resnet18的模型结构
- 尝试对vgg16+cbam进行微调策略
vgg16+cbam
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import time
# 设置中文字体支持
plt.rcParams["font.family"] = ["SimHei"]
plt.rcParams['axes.unicode_minus'] = False # 解决负号显示问题
# 通道注意力模块(与原代码一致)
class ChannelAttention(nn.Module):
def __init__(self, in_channels, ratio=16):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, in_channels // ratio, bias=False),
nn.ReLU(),
nn.Linear(in_channels // ratio, in_channels, bias=False)
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, c, h, w = x.shape
avg_out = self.fc(self.avg_pool(x).view(b, c))
max_out = self.fc(self.max_pool(x).view(b, c))
attention = self.sigmoid(avg_out + max_out).view(b, c, 1, 1)
return x * attention
# 空间注意力模块
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super().__init__()
self.conv = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
pool_out = torch.cat([avg_out, max_out], dim=1)
attention = self.conv(pool_out)
return x * self.sigmoid(attention)
# CBAM模块
class CBAM(nn.Module):
def __init__(self, in_channels, ratio=16, kernel_size=7):
super().__init__()
self.channel_attn = ChannelAttention(in_channels, ratio)
self.spatial_attn = SpatialAttention(kernel_size)
def forward(self, x):
x = self.channel_attn(x)
x = self.spatial_attn(x)
return x
# 数据预处理
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
# 加载数据集
train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform)
test_dataset = datasets.CIFAR10(root='./data', train=False, transform=test_transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
# 自定义VGG16模型,插入CBAM模块
class VGG16_CBAM(nn.Module):
def __init__(self, num_classes=10, pretrained=True, cbam_ratio=16, cbam_kernel=7):
super().__init__()
# 加载预训练VGG16
self.backbone = models.vgg16(pretrained=pretrained).features
# 修改首层卷积以适应32x32输入(CIFAR10)
# VGG16原输入尺寸为224x224,首层卷积stride=2,这里改为stride=1
self.backbone[0] = nn.Conv2d(
in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False
)
# 在VGG的不同block后添加CBAM模块
# VGG16的block划分:
# block1: 0-1 (2层卷积)
# block2: 2-5 (4层卷积)
# block3: 6-13 (8层卷积)
# block4: 14-23 (12层卷积)
# block5: 24-33 (16层卷积)
self.cbam_block1 = CBAM(in_channels=64, ratio=cbam_ratio, kernel_size=cbam_kernel)
self.cbam_block2 = CBAM(in_channels=128, ratio=cbam_ratio, kernel_size=cbam_kernel)
self.cbam_block3 = CBAM(in_channels=256, ratio=cbam_ratio, kernel_size=cbam_kernel)
self.cbam_block4 = CBAM(in_channels=512, ratio=cbam_ratio, kernel_size=cbam_kernel)
self.cbam_block5 = CBAM(in_channels=512, ratio=cbam_ratio, kernel_size=cbam_kernel)
# 定义自适应平均池化,将特征图统一为7x7(适应32x32输入)
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
# 修改分类头(VGG16原分类头为3层全连接,改为适合10分类)
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def forward(self, x):
# 主干特征提取
# Block 1
x = self.backbone[0](x) # 卷积层1
x = self.backbone[1](x) # ReLU
x = self.backbone[2](x) # 最大池化
x = self.cbam_block1(x) # CBAM
# Block 2
x = self.backbone[3](x) # 卷积层1
x = self.backbone[4](x) # ReLU
x = self.backbone[5](x) # 最大池化
x = self.cbam_block2(x) # CBAM
# Block 3
x = self.backbone[6](x) # 卷积层1
x = self.backbone[7](x) # ReLU
x = self.backbone[8](x) # 卷积层2
x = self.backbone[9](x) # ReLU
x = self.backbone[10](x) # 卷积层3
x = self.backbone[11](x) # ReLU
x = self.backbone[12](x) # 最大池化
x = self.cbam_block3(x) # CBAM
# Block 4
x = self.backbone[13](x) # 卷积层1
x = self.backbone[14](x) # ReLU
x = self.backbone[15](x) # 卷积层2
x = self.backbone[16](x) # ReLU
x = self.backbone[17](x) # 卷积层3
x = self.backbone[18](x) # ReLU
x = self.backbone[19](x) # 最大池化
x = self.cbam_block4(x) # CBAM
# Block 5
x = self.backbone[20](x) # 卷积层1
x = self.backbone[21](x) # ReLU
x = self.backbone[22](x) # 卷积层2
x = self.backbone[23](x) # ReLU
x = self.backbone[24](x) # 卷积层3
x = self.backbone[25](x) # ReLU
x = self.backbone[26](x) # 最大池化
x = self.cbam_block5(x) # CBAM
# 自适应平均池化 + 分类
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
# 分阶段训练相关函数
def set_trainable_layers(model, trainable_parts):
print(f"\n---> 解冻以下部分并设为可训练: {trainable_parts}")
for name, param in model.named_parameters():
param.requires_grad = False
for part in trainable_parts:
if part in name:
param.requires_grad = True
break
def train_staged_finetuning(model, criterion, train_loader, test_loader, device, epochs):
optimizer = None
# 初始化历史记录列表
all_iter_losses, iter_indices = [], []
train_acc_history, test_acc_history = [], []
train_loss_history, test_loss_history = [], []
for epoch in range(1, epochs + 1):
epoch_start_time = time.time()
# --- 动态调整学习率和冻结层 ---
if epoch == 1:
print("\n" + "="*50 + "\n🚀 **阶段 1:训练注意力模块和分类头**\n" + "="*50)
set_trainable_layers(model, ["cbam", "classifier"])
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3)
elif epoch == 6:
print("\n" + "="*50 + "\n✈️ **阶段 2:解冻高层卷积块 (block4, block5)**\n" + "="*50)
set_trainable_layers(model, ["cbam", "classifier", "backbone.13", "backbone.20"])
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4)
elif epoch == 21:
print("\n" + "="*50 + "\n🛰️ **阶段 3:解冻所有层,进行全局微调**\n" + "="*50)
for param in model.parameters(): param.requires_grad = True
optimizer = optim.Adam(model.parameters(), lr=1e-5)
# --- 训练循环 ---
model.train()
running_loss, correct, total = 0.0, 0, 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
# 记录每个iteration的损失
iter_loss = loss.item()
all_iter_losses.append(iter_loss)
iter_indices.append((epoch - 1) * len(train_loader) + batch_idx + 1)
running_loss += iter_loss
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
# 每100个batch打印一次
if (batch_idx + 1) % 100 == 0:
print(f'Epoch: {epoch}/{epochs} | Batch: {batch_idx+1}/{len(train_loader)} '
f'| 单Batch损失: {iter_loss:.4f} | 累计平均损失: {running_loss/(batch_idx+1):.4f}')
epoch_train_loss = running_loss / len(train_loader)
epoch_train_acc = 100. * correct / total
train_loss_history.append(epoch_train_loss)
train_acc_history.append(epoch_train_acc)
# --- 测试循环 ---
model.eval()
test_loss, correct_test, total_test = 0, 0, 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item()
_, predicted = output.max(1)
total_test += target.size(0)
correct_test += predicted.eq(target).sum().item()
epoch_test_loss = test_loss / len(test_loader)
epoch_test_acc = 100. * correct_test / total_test
test_loss_history.append(epoch_test_loss)
test_acc_history.append(epoch_test_acc)
# 打印每个epoch的最终结果
print(f'Epoch {epoch}/{epochs} 完成 | 耗时: {time.time() - epoch_start_time:.2f}s | 训练准确率: {epoch_train_acc:.2f}% | 测试准确率: {epoch_test_acc:.2f}%')
# 训练结束后调用绘图函数
print("\n训练完成! 开始绘制结果图表...")
plot_iter_losses(all_iter_losses, iter_indices)
plot_epoch_metrics(train_acc_history, test_acc_history, train_loss_history, test_loss_history)
# 返回最终的测试准确率
return epoch_test_acc
# 绘图函数
def plot_iter_losses(losses, indices):
plt.figure(figsize=(10, 4))
plt.plot(indices, losses, 'b-', alpha=0.7, label='Iteration Loss')
plt.xlabel('Iteration(Batch序号)')
plt.ylabel('损失值')
plt.title('每个 Iteration 的训练损失')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()
def plot_epoch_metrics(train_acc, test_acc, train_loss, test_loss):
epochs = range(1, len(train_acc) + 1)
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs, train_acc, 'b-', label='训练准确率')
plt.plot(epochs, test_acc, 'r-', label='测试准确率')
plt.xlabel('Epoch')
plt.ylabel('准确率 (%)')
plt.title('训练和测试准确率')
plt.legend(); plt.grid(True)
plt.subplot(1, 2, 2)
plt.plot(epochs, train_loss, 'b-', label='训练损失')
plt.plot(epochs, test_loss, 'r-', label='测试损失')
plt.xlabel('Epoch')
plt.ylabel('损失值')
plt.title('训练和测试损失')
plt.legend(); plt.grid(True)
plt.tight_layout()
plt.show()
# 执行训练
if __name__ == "__main__":
# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")
# 初始化模型并移至设备
model = VGG16_CBAM().to(device)
criterion = nn.CrossEntropyLoss()
epochs = 50
print("开始使用带分阶段微调策略的VGG16+CBAM模型进行训练...")
final_accuracy = train_staged_finetuning(model, criterion, train_loader, test_loader, device, epochs)
print(f"训练完成!最终测试准确率: {final_accuracy:.2f}%")
# torch.save(model.state_dict(), 'vgg16_cbam_finetuned.pth')
# print("模型已保存为: vgg16_cbam_finetuned.pth")
今天时间紧,来不及了
2419

被折叠的 条评论
为什么被折叠?



