pytorch在mnist手写数据集实现多层连接网络(Resnet34)

# _*_encoding=utf-8_*_

import os
os.environ['CUDA_DEVICE_ORDER']="PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch
from torch.autograd import Variable
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

# 简单的三层网络
class simpleNet(nn.Module):
    def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super(simpleNet, self).__init__()
        self.layer1 = nn.Linear(in_dim, n_hidden_1)
        self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)
        self.layer3 = nn.Linear(n_hidden_2, out_dim)

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        return x

# 添加激活函数
class Activation_Net(nn.Module):
    def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super(Activation_Net,self).__init__()
        # 这是为了将网络的层组合在一起,比如这里的nn.Linear() 和 nn.Relu()组合在一起作为self.layer
        self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1), nn.ReLU(True))
        self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.ReLU(True))
        self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim), nn.ReLU(True))

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        return x


# 添加批标准化
class Batch_Net(nn.Module):
    def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super(Batch_Net, self).__init__()
        self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1), nn.BatchNorm1d(n_hidden_1), nn.ReLU(True))
        self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.BatchNorm1d(n_hidden_2), nn.ReLU(True))
        self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        return x


batch_size = 32
learning_rate = 1e-2
num_epoches = 30


# compose操作是将所有的预处理操作组合到一起
data_ft = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])

# load data
train_dataset = datasets.MNIST(root='./data', train=True, transform=data_ft, download=True)
test_dataset = datasets.MNIST(root='./data', train=False, transform=data_ft)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

# Load net and def loss and optimizer
model = net.Batch_Net(28*28, 300, 100, 10)
if torch.cuda.is_available():
    model = model.cuda()
else:
    model = model

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)

all_loss = 0
for each_poch in range(num_epoches):
    for data in train_loader:
        img, label = data
        img = img.view(img.size(0), -1)
        if torch.cuda.is_available():
            img = Variable(img).cuda()
            label = Variable(label).cuda()
        else:
            img = Variable(img)
            label = Variable(label)
        target = model(img)
        loss = criterion(target, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        all_loss = loss
       # print("Loss is {}",format(loss))
    each_poch = each_poch +1
    print("poch {}".format(each_poch), ", loss is {}".format(all_loss))


model.eval()
eval_loss = 0.0
eval_acc = 0.0
for data in test_loader:
    img, label = data
    img = img.view(img.size(0), -1)
    if torch.cuda.is_available():
        img = Variable(img).cuda()
        label = Variable(label).cuda()
    else:
        img = Variable(img)
        label = Variable(label)
    out = model(img)
    loss = criterion(out,label)
    eval_loss +=loss.data*label.size(0)
    _, pred = torch.max(out, 1)
    num_correct = torch.sum((pred==label))
    eval_acc += num_correct.data
eval_acc = eval_acc.float()
print('Test Loss:{:.6f}'.format(eval_acc/(len(test_dataset))))
print((len(test_dataset)))
print('Test Loss:{:.6f}'.format(eval_loss/(len(test_dataset))))




实验的结果为:

 

# _*_ encoding=utf-8 _*_
import torch
from torch import nn
from torch.nn import functional as F


class ResidualBlock(nn.Module):
    def __init__(self, in_channel, out_channel, stride=1, short_cut=None):
        super(ResidualBlock,self).__init__()
        self.left = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, 3, stride, 1, bias=False),
            nn.BatchNorm2d(out_channel),
            # 表示输出直接覆盖到输入中,这样的话就可以节省内存和显存
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channel, out_channel, 3, stride, 1, bias=False),
            nn.BatchNorm2d(out_channel))
        self.right = short_cut

    def forward(self, input):
        out = self.left(input)
        residual = input if self.right is None else self.right(input)
        out +=residual
        return F.relu(out)



class ResNet(nn.Module):
    def __init__(self,num_class = 1000):
        super(ResNet,self).__init__()
        self.pre = nn.Sequential(
            nn.Conv2d(3,64,7,2,3,bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3,2,1)
        )
        self.layer1 = self._make_layer(64,128,3)
        self.layer2 = self._make_layer(128, 256, 4, stride =2)
        self.layer3 = self._make_layer(256, 512, 6, stride =2)
        self.layer4 = self._make_layer(512, 512, 3, stride=2)
        self.fc = nn.Linear(512, num_class)

    def _make_layer(self, in_channel, out_channel, block_num, stride=1):
        short_cut = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, 1, stride, bias=False),
            nn.BatchNorm2d(out_channel)
        )
        layers =[]
        layers.append(ResidualBlock(in_channel, out_channel, stride, short_cut))
        for i in range(1, block_num):
            layers.append(ResidualBlock(out_channel, out_channel))
        return nn.Sequential(*layers)
    def forward(self, x):
        x = self.pre(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = F.avg_pool2d(x, 7)
        x = x.view(x.size(0),-1)
        return self.fc(x)



 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值