import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
定义残差模块
def conv3x3(in_channel, out_channel, stride=1):
return nn.Conv2d(in_channel, out_channel, 3, stride=stride, padding=1, bias=False)
class residual_block(nn.Module):
def __init__(self, in_channel, out_channel, same_shape=True):
super(residual_block,self).__init__()
self.same_shape = same_shape
stride = 1 if self.same_shape else 2
self.conv1 = conv3x3(in_channel, out_channel, stride=stride)
self.bn1 = nn.BatchNorm2d(out_channel)
self.conv2 = conv3x3(out_channel, out_channel)
self.bn2 = nn.BatchNorm2d(out_channel)
if not self.same_shape:
self.conv3 = nn.Conv2d(in_channel, out_channel, 1, stride=stride)
def forward(self, x):
out = self.conv1(x)
out = F.relu(self.bn1(out), True)
out = self.conv2(out)
out = F.relu(self.bn2(out), True)
if not self.same_shape:
x = self.conv3(x)
return F.relu(x+out, True)
残差模块中,体现残差的地方在于最后的 out + x。如果残差模块输入与输出相同,则 out 和 x 可以直接相加,如果输入与输出不相同,则需要通过卷积运算conv3改变维数,才能相加。
测试残差模块
(1)输入输出形状相同时
test_net = residual_block(32, 32)
test_x = Variable(torch.zeros(1, 32, 96, 96))
print('input: {}'.format(test_x.shape))
test_y = test_net(test_x)
print('input: {}'.format(test_y.shape))
input: torch.Size([1, 32, 96, 96])
input: torch.Size([1, 32, 96, 96])
(2)输入与输出形状不相同
test_net = residual_block(32, 32, False)
test_x = Variable(torch.zeros(1, 32, 96, 96))
print('input: {}'.format(test_x.shape))
test_y = test_net(test_x)
print('input: {}'.format(test_y.shape))
输出:
input: torch.Size([1, 32, 96, 96])
input: torch.Size([1, 32, 48, 48])
搭建ResNet模型
class resnet(nn.Module):
def __init__(self, in_channel, num_classes, verbose=False):
super(resnet, self).__init__()
self.verbose = verbose
self.block1 = nn.Conv2d(in_channel, 64, 7, 2)
self.block2 = nn.Sequential(
nn.MaxPool2d(3, 2),
residual_block(64, 64),
residual_block(64, 64)
)
self.block3 = nn.Sequential(
residual_block(64, 128, False),
residual_block(128, 128)
)
self.block4 = nn.Sequential(
residual_block(128, 256, False),
residual_block(256, 256)
)
self.block5 = nn.Sequential(
residual_block(256, 512, False),
residual_block(512, 512),
nn.AvgPool2d(3)
)
self.classifier = nn.Linear(512, num_classes)
def forward(self, x):
x = self.block1(x)
if self.verbose:
print('block 1 output: {}'.format(x.shape))
x = self.block2(x)
if self.verbose:
print('block 2 output: {}'.format(x.shape))
x = self.block3(x)
if self.verbose:
print('block 3 output: {}'.format(x.shape))
x = self.block4(x)
if self.verbose:
print('block 4 output: {}'.format(x.shape))
x = self.block5(x)
if self.verbose:
print('block 5 output: {}'.format(x.shape))
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
test_net = resnet(3, 10, True)
test_x = Variable(torch.zeros(1, 3, 96, 96))
test_y = test_net(test_x)
print('input: {}'.format(test_y.shape))
输出:
block 1 output: torch.Size([1, 64, 45, 45])
block 2 output: torch.Size([1, 64, 22, 22])
block 3 output: torch.Size([1, 128, 11, 11])
block 4 output: torch.Size([1, 256, 6, 6])
block 5 output: torch.Size([1, 512, 1, 1])
input: torch.Size([1, 10])
本文介绍了如何在PyTorch中实现ResNet模型,重点在于残差模块的设计。当输入和输出尺寸相同时,直接相加;否则,通过卷积层调整尺寸以实现相加。还展示了模块的测试过程及ResNet模型的搭建。
1765

被折叠的 条评论
为什么被折叠?



