J4 - ResNet与DenseNet结合



环境

  • 系统: Linux
  • 语言: Python3.8.10
  • 深度学习框架: Pytorch2.0.0+cu118
  • 显卡:GTX2080TI

模型设计

原始的DenseNet结构图如下:
DenseNet结构图
原始的ResNet结构图如下:
ResNet结构图
经过对比可以发现,ResNet的恒等块是经过了3个Conv层,而DenseNet只有两个,于是将DenseNet的结构修改为ResNet的风格,然后进行测试。

# BN ReLU Conv 顺序的残差块
class ResidualBlock(nn.Sequential):
    def __init__(self, kernel_size, input_size, hidden_size, drop_rate):
        super().__init__()
        
        self.add_module('norm1', nn.BatchNorm2d(input_size)),
        self.add_module('relu1', nn.ReLU(inplace=True)),
        self.add_module('conv1', nn.Conv2d(input_size, hidden_size, kernel_size=1, bias=False))
        
        self.add_module('norm2', nn.BatchNorm2d(hidden_size)),
        self.add_module('relu2', nn.ReLU(inplace=True)),
        self.add_module('conv2', nn.Conv2d(hidden_size, hidden_size, kernel_size=kernel_size, padding='same', bias=False))
        
        
        self.add_module('norm3', nn.BatchNorm2d(hidden_size)),
        self.add_module('relu3', nn.ReLU(inplace=True)),
        self.add_module('conv3', nn.Conv2d(hidden_size, input_size, kernel_size=1, bias=False))
        
        self.drop_rate = drop_rate
        
    def forward(self, x):
        features = super().forward(x)
        if self.drop_rate > 0:
            features = F.dropout(features, p = self.drop_rate, training=self.training)
        
        return torch.concat([x, features], 1)
class DenseBlock(nn.Sequential):
    def __init__(self, num_layers, input_size, drop_rate):
        super().__init__()
        for i in range(num_layers):
            layer = ResidualBlock(3, input_size, int(input_size / 4), drop_rate)
            input_size *= 2 # 每次都是上个的堆叠,每次都翻倍
            self.add_module('denselayer%d'%(i+1,), layer)
# 过渡层没有任务变化
class Transition(nn.Sequential):
    def __init__(self, input_size, output_size):
        super().__init__()
        self.add_module('norm', nn.BatchNorm2d(input_size))
        self.add_module('relu', nn.ReLU())
        self.add_module('conv', nn.Conv2d(input_size, output_size, kernel_size=1, stride=1, bias=False))
        self.add_module('pool', nn.AvgPool2d(2, stride=2))
# 构建自定义的DenseNet
class DenseNet(nn.Module):
	# 模型的规模小一点,方便测试
    def __init__(self, growth_rate=32, block_config=(2,4,3, 2), 
                 init_size=64, bn_size=4, compression_rate=0.5, drop_rate=0, num_classes=1000):
        super().__init__()
        
        self.features = nn.Sequential(OrderedDict([
            ("conv0", nn.Conv2d(3, init_size, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(init_size)),
            ('relu0', nn.ReLU()),
            ('pool0', nn.MaxPool2d(3, stride=2, padding=1))
        ]))
        
        num_features = init_size
        for i, num_layers in enumerate(block_config):
            block = DenseBlock(num_layers, num_features, drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features*(2**num_layers)
            if i != len(block_config) - 1:
                transition = Transition(num_features, int(num_features*compression_rate))
                self.features.add_module('transition%d' % (i + 1), transition)
                num_features = int(num_features * compression_rate)
                
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))
        self.features.add_module('relu5', nn.ReLU())
        
        self.classifier = nn.Linear(num_features, num_classes)
        
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
                
    def forward(self, x):
        features = self.features(x)
        out = F.avg_pool2d(features, 7, stride=1
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值