PyTorch实战(2)---- 用CNN在MNIST上做图像分类任务

用CNN在MNIST上做图像分类任务

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms

# 首先定义一个基于ConvNet的简单神经网络
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5, 1)
        self.conv2 = nn.Conv2d(20, 50, 5, 1)
        self.fc1 = nn.Linear(4*4*50, 500)
        self.fc2 = nn.Linear(500, 10)

    def forward(self, x):
        # x: 1 * 28 * 28
        x = F.relu(self.conv1(x))  # 20 * 24 * 24
        x = F.max_pool2d(x, 2, 2)  # 12 * 12
        x = F.relu(self.conv2(x))  # 8 * 8
        x = F.max_pool2d(x, 2, 2)  # 4 * 4
        x = x.view(-1, 4*4*50)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)


mnist_data = datasets.MNIST("./data/mnist_data", train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),]))

data = [d[0].data.cpu().numpy() for d in mnist_data]
np.mean(data)
np.std(data)

# 定义训练函数
def train(model, device, train_dataloader, optimizer, epoch):
    model.train()
    for idx, (data, target) in enumerate(train_dataloader):
        data, target = data.to(device), target.to(device)
        pred = model(data)
        loss = F.nll_loss(pred, target)
        
        # SGD
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if idx % 100 == 0:
            print("Train epoch: {}, iteration: {}, Loss: {}".format(epoch, idx, loss.item()))


# 定义测试函数
def test(model, device, test_dataloader):
    model.eval()
    total_loss = 0.
    correct = 0.
    with torch.no_grad():

        for idx, (data, target) in enumerate(test_dataloader):
            data, target = data.to(device), target.to(device)
            output = model(data)
            total_loss += F.nll_loss(output, target, reduction="sum").item()
            pred = output.argmax(dim=1)
            correct += pred.eq(target.view_as(pred)).sum().item()
    
    total_loss /= len(test_dataloader.dataset)
    acc = correct/len(test_dataloader.dataset) * 100.
    print("Test loss: {}, Accuracy: {}".format(total_loss, acc))


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 32
# 加载训练集
train_dataloader = torch.utils.data.DataLoader(datasets.MNIST("./data/mnist_data", train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((np.mean(data),),(np.std(data),))])), batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True)
# 加载测试集
test_dataloader = torch.utils.data.DataLoader(datasets.MNIST("./data/mnist_data", train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((np.mean(data),),(np.std(data),))])), batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True)

lr = 0.01
momentum = 0.5
model = Net().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)

num_epochs = 2
for epoch in range(num_epochs):
    train(model, device, train_dataloader, optimizer, epoch)
    test(model, device, test_dataloader)

# 保存模型
torch.save(model.state_dict(), "./model/mnist_cnn.pt")
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值