Pytorch学习笔记(一)——经典MNIST卷积网络实现

pytorch入门

记录在pytorch入门学习使用到的基本函数与操作,复现mnist数据集分类结果。

导入包与函数
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets

# 配置参数
torch.manual_seed(1) #设置随机数种子
batch_size = 128 #批处理大小128
learning_rate = 1e-2 #学习率
num_epoches =10 #训练次数

train_dataset = datasets.MNIST(
    root = './data',
    train = True,
    transform = transforms.ToTensor(), #一个取值是[0,255]的PIL,Image
    download=True
)
test_dataset = datasets.MNIST(
    root = './data',
    train = True,
    transform = transforms.ToTensor()
)

train_loader = DataLoader(train_dataset , batch_size = batch_size , shuffle = True) #shuffle=True表示次序随机
test_loader = DataLoader(test_dataset , batch_size = batch_size , shuffle = True)

创建CNN模型

我们用一个类来建立CNN模型,这个CNN由一个输入层,两个卷积层、两个全连接层、一个输出层组成、其中卷积层构成卷积(Conv2d)->激励函数ReLU->池化(MaxPooling)
全连接层由线性层(Linear)组成。

# 定义神经网络
class Cnn(nn.Module):
    def __init__(self, in_dim ,n_class): #28*28*1
        super(Cnn,self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_dim, 6, 3, stride =1 ,padding=1), #28*28
            nn.ReLU(True),
            nn.MaxPool2d(2,2), #14*14
            nn.Conv2d(6,16 ,5 ,stride=1, padding=0), # 10*10*16
            nn.ReLU(True),
            nn.MaxPool2d(2,2) # 5*5*16
        )
        
        self.fc = nn.Sequential(
            nn.Linear(400, 120), # 400 = 5*5*16
            nn.Linear(120,84),
            nn.Linear(84,n_class)
        )
    def forward(self,x):
        out = self.conv(x)
        out = out.view(out.size(0),400)
        out = self.fc(out)
        return out
model = Cnn(1 , 10)
print(model)
   
测试CNN结果:
Cnn(
  (conv): Sequential(
    (0): Conv2d(1, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (fc): Sequential(
    (0): Linear(in_features=400, out_features=120, bias=True)
    (1): Linear(in_features=120, out_features=84, bias=True)
    (2): Linear(in_features=84, out_features=10, bias=True)
  )
)
定义Loss 和Optimizer
#定义Loss 和optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)

#开始训练
for epoch in range(num_epoches):
    running_loss = 0.0
    running_acc = 0.0
    for i,data in enumerate(train_loader ,1):
        img, label = data #data是训练集中的一个数据
        
        img = Variable(img)
        label = Variable(label)
        # 前向传播
        out = model(img)
        loss = criterion(out, label) #loss
#         running_loss += loss.data[0] * label.size(0)
        running_loss += loss.item() * label.size(0)

        _, pred = torch.max(out,1)
        num_correct = (pred == label).sum()  #正确结果数量
        
#         running_acc += num_correct.data[0] 
        running_acc += num_correct.item() 

        #后向传播
        optimizer.zero_grad() #梯度清零,以免影响其他batch
        loss.backward() #后向传播,计算梯度
        optimizer.step() #利用梯度更新W,B参数
    
    print("Train {} epoch, Loss:{:.6f} ,Acc:{:.6f}".format(epoch +1,running_loss /(len(train_dataset)),running_acc /(len(train_dataset))))

输出结果
Train 1 epoch, Loss:2.285605 ,Acc:0.239000
Train 2 epoch, Loss:1.361690 ,Acc:0.638717
Train 3 epoch, Loss:0.411499 ,Acc:0.878000
Train 4 epoch, Loss:0.293580 ,Acc:0.912200
Train 5 epoch, Loss:0.232458 ,Acc:0.930067
Train 6 epoch, Loss:0.188559 ,Acc:0.942883
Train 7 epoch, Loss:0.158860 ,Acc:0.951783
Train 8 epoch, Loss:0.138971 ,Acc:0.958467
Train 9 epoch, Loss:0.125900 ,Acc:0.962000
Train 10 epoch, Loss:0.115519 ,Acc:0.965467
测试模型效果
model.eval()

eval_loss = 0
eval_acc = 0
for data in test_loader:
    img, label = data
    
    img = Variable(img, volatile=True)
    
    out = model(img)
    loss = criterion(out, label)
    eval_loss += loss.item()
    _, pred = torch.max(out,1)
    num_correct = (pred == label) . sum()
    eval_acc += num_correct.item()
print("Test Loss:{:0.6f},Acc:{:.6f}".format(eval_loss/(len(test_dataset)),eval_acc*1.0/(len(test_dataset))))
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值