手写数字识别项目

 

准备数据

 import numpy as np

import torch

 

#导入pytorch内置的mnist数据

from torchvision.datasets import mnist

#import torchvision

#导入预处理模块

import torchvision.transforms as transforms

from torch.utils.data import DataLoader

# import torchvision

import torchvision.transforms as transforms

from torch.utils.data import DataLoader

from torchvision.datasets import mnist

from torch import nn

import torch.nn.functional as F

import torch.optim as optim

from torch.utils.tensorboard import SummaryWriter

#导入nn及优化器

 

print("==========================================================================")

 

#定义一些超参数

train_batch_size = 64

test_batch_size = 128

learning_rate = 0.01

nun_epochs = 20

 

print("============================================================================")

 

transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])])

#定义预处理函数

transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])])

#下载数据,并对数据进行预处理

train_dataset = mnist.MNIST(root='../data/', train=True, transform=transform, download=True)

test_dataset = mnist.MNIST(root='../data/', train=False, transform=transform)

#得到一个生成器

train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True)

test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=True)

 

print("===============================================================================")

examples = enumerate(train_loader)

batch_idx, (example_data, example_target) = next(examples)

 

example_data.shape

 

#可视化源数据

import matplotlib.pyplot as plt

 

examples = enumerate(test_loader)

batch_idx, (example_data, example_target) = next(examples)

 

fig = plt.figure()

for i in range(6):

    plt.subplot(2, 3, i+1)

    plt.tight_layout()

    plt.imshow(example_data[i][0], cmap='gray',interpolation='none')

    plt.title('Ground Truth: {}'.format(example_target[i]))

    plt.xticks([])

    plt.yticks([])

#构建模型

class Net(nn.Module):

    def __init__(self,in_dim,n_hidden_1,n_hidden_2,out_dim):

        super(Net,self).__init__()

        self.flatten = nn.Flatten()

        self.layer1 = nn.Sequential(nn.Linear(in_dim,n_hidden_1),nn.BatchNorm1d(n_hidden_1))

        self.layer2 = nn.Sequential(nn.Linear(n_hidden_1,n_hidden_2),nn.BatchNorm1d(n_hidden_2))

        self.out = nn.Sequential(nn.Linear(n_hidden_2,out_dim))

 

 

    def forward(self,x):

        x=self.flatten(x)

        x=F.relu(self.layer1(x))

        x=F.relu(self.layer2(x))

        x=F.softmax(self.out(x),dim=1)

        return x

 

print("=================================================================================")

lr =0.01

momentum=0.9

#实例化模型

device = torch.device("cuda:0"if torch.cuda.is_available()else "cpu")

model = Net(28*28,300,100,10)

model.to(device)

 

#定义损失函数和优化器

criterion = nn.CrossEntropyLoss()

optimizer = optim.SGD(model.parameters(),lr=lr,momentum=momentum)

 

#训练模型

losses = []

acces = []

eval_losses=[]

eval_acces = []

writer = SummaryWriter(log_dir='logs',comment='train-loss')

 

for epoch in range(nun_epochs):

    train_loss=0

    train_acc=0

    model.train()

    #动态修改参数学习率

    if epoch%5==0:

        optimizer.param_groups[0]['lr']*=0.9

        print("学习率:{:.6f}".format(optimizer.param_groups[0]["lr"]))

    for img ,label in train_loader:

        img=img.to(device)

        label=label.to(device)

        #正向传播

        out = model(img)

        loss = criterion(out,label)

        #反向传播

        optimizer.zero_grad()

        loss.backward()

        optimizer.step()

        #记录误差

        train_loss += loss.item()

        #保存loss的数据与epoch数值

        writer.add_scalar("Train",train_loss/len(train_loader),epoch)

        #计算分类的准确率

        _,pred = out.max(1)

        num_correct = (pred==label).sum().item()

        acc=num_correct / img.shape[0]

        train_acc += acc

    losses.append(train_loss/len(train_loader))

    acces.append(train_acc/len(train_loader))

    #在测试集上测试结果

    eval_loss=0

    eval_acc=0

    #net.eval()将模型改为预测模式

    model.eval()

    for img ,label in test_loader:

        img=img.to(device)

        label=label.to(device)

        img = img.view(img.size(0),-1)

        out = model(img)

        loss = criterion(out,label)

        #记录误差

        eval_loss +=loss.item()

        #记录准确率

        _,pred = out.max(1)

        num_correct = (pred==label).sum().item()

        acc=num_correct / img.shape[0]

        eval_acc += acc

    eval_losses.append(eval_loss/len(test_loader))

    eval_acces.append(eval_acc / len(test_loader))

    print("epoch:{},Train Loss:{:.4f},Train Acc:{:.4f},Test Loss:{:.4f},Test Acc:{:.4f}".format(epoch,train_loss / len(train_loader),train_acc / len(train_loader),eval_loss / len(test_loader),eval_acc / len(test_loader)))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值