Vision Transformer (ViT) 是一种基于 Transformer 架构的图像分类模型。与传统的卷积神经网络 (CNN) 不同,ViT 将图像分割成多个小块(patches),并将这些小块视为序列输入到 Transformer 中。以下是使用 PyTorch 实现 Vision Transformer 进行图像分类的步骤。
1. 安装必要的库
首先,确保你已经安装了必要的库:
pip install torch torchvision
注意:具体需要依据cuda版本来选择对应版本
2. 导入库
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
3. 定义 Vision Transformer 模型
import math
from torch import nn, einsum
import torch.nn.functional as F
class PatchEmbedding(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_channels=3, embed_dim=768):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
self.n_patches = (img_size // patch_size) ** 2
self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x) # (B, embed_dim, n_patches ** 0.5, n_patches ** 0.5)
x = x.flatten(2) # (B, embed_dim, n_patches)
x = x.transpose(1, 2) # (B, n_patches, embed_dim)
return x
class VisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_channels=3, n_classes=1000, embed_dim=768, depth=12, n_heads=12, mlp_ratio=4., dropout=0.1):
super().__init__()
self.patch_embed = PatchEmbedding(img_size, patch_size, in_channels, embed_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, self.patch_embed.n_patches + 1, embed_dim))
self.dropout = nn.Dropout(dropout)
self.blocks = nn.ModuleList([
nn.TransformerEncoderLayer(d_model=embed_dim, nhead=n_heads, dim_feedforward=int(embed_dim * mlp_ratio), dropout=dropout)
for _ in range(depth)
])
self.norm = nn.LayerNorm(embed_dim)
self.head = nn.Linear(embed_dim, n_classes)
def forward(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.dropout(x)
for block in self.blocks:
x = block(x)
x = self.norm(x)
cls_token_final = x[:, 0]
x = self.head(cls_token_final)
return x
4. 数据预处理和加载
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
5. 训练模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = VisionTransformer(img_size=224, patch_size=16, in_channels=3, n_classes=10, embed_dim=768, depth=12, n_heads=12, mlp_ratio=4., dropout=0.1).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
for epoch in range(10): # 训练10个epoch
model.train()
running_loss = 0.0
for i, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99: # 每100个batch打印一次损失
print(f'Epoch [{epoch + 1}/{10}], Step [{i + 1}/{len(train_loader)}], Loss: {running_loss / 100:.4f}')
running_loss = 0.0
print('Finished Training')
6. 测试模型
model.eval()
correct = 0
total = 0
with torch.no_grad():
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Accuracy of the model on the test images: {100 * correct / total:.2f}%')
7. 保存模型
torch.save(model.state_dict(), 'vit_cifar10.pth')
8. 加载模型
model = VisionTransformer(img_size=224, patch_size=16, in_channels=3, n_classes=10, embed_dim=768, depth=12, n_heads=12, mlp_ratio=4., dropout=0.1).to(device)
model.load_state_dict(torch.load('vit_cifar10.pth'))
以上展示了如何使用 PyTorch 实现 Vision Transformer 进行图像分类。在实际应用中可以根据需要调整模型的超参数,如 `embed_dim`、`depth`、`n_heads` 等,以适应不同的任务和数据集。