第J2周:ResNet50V2 算法实战与解析

FROM


我的环境

  • 语言环境:Python 3.11.9
  • 开发工具:Jupyter Lab
  • 深度学习环境:
    • torch==2.3.1+cu121
    • torchvision==0.18.1+cu121

一、本周内容和个人收获

1. v1和v2的区别

ResNet50V2是何恺明等人在ResNet基础上提出的改进版本,主要改进点在于残差结构和激活函数的使用。ResNetV2结构与原始ResNet结构的主要差别在于,原始ResNet结构先进行卷积操作,然后进行BN和激活函数计算,最后执行加法操作后再进行ReLU计算;而ResNetV2结构则是先进行BN和激活函数计算后卷积,把加法操作后的ReLU计算放到了残差结构内部。

ResNet v1 和 v2 的核心区别在于残差块(Residual Block)的内部结构:
在这里插入图片描述
1. 预激活设计:
v1:采用 “卷积层 -> BN层 -> ReLU” 的顺序
v2:采用 “BN层 -> ReLU -> 卷积层” 的顺序(预激活设计)
2. 信号流路径:
v1:identity分支在最后与主分支相加后再进行ReLU
v2:identity分支直接与主分支相加,不进行最后的ReLU

2. v2的主要结构

class ResNet50V2(nn.Module):
    def __init__(self, num_classes=1000):
        super(ResNet50V2, self).__init__()
        
        # 1. 初始层
        self.conv1 = nn.Sequential(
            nn.ZeroPad2d(3),
            nn.Conv2d(3, 64, kernel_size=7, stride=2),
            nn.ZeroPad2d(1),
            nn.MaxPool2d(kernel_size=3, stride=2)
        )
        
        # 2. 残差块堆叠
        self.conv2_x = self._make_layer(64, 64, blocks=3)    # 输出 256 通道
        self.conv3_x = self._make_layer(256, 128, blocks=4)  # 输出 512 通道
        self.conv4_x = self._make_layer(512, 256, blocks=6)  # 输出 1024 通道
        self.conv5_x = self._make_layer(1024, 512, blocks=3) # 输出 2048 通道
        
        # 3. 输出层
        self.post_bn = nn.BatchNorm2d(2048)
        self.post_relu = nn.ReLU(inplace=True)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(2048, num_classes)

二、核心代码及运行截图

模型代码:

import torch
import torch.nn as nn

class ResNet50V2(nn.Module):
    def __init__(self, 
                 include_top=True,      # 是否包含顶部的全连接层
                 preact=True,           # 是否使用预激活
                 use_bias=True,         # 是否使用偏置
                 input_shape=(3, 224, 224),  # 输入形状 (C,H,W)
                 pooling=None,          # 池化方式
                 num_classes=1000,      # 分类数量
                 classifier_activation='softmax'): # 分类层激活函数
        super(ResNet50V2, self).__init__()
        
        self.include_top = include_top
        self.preact = preact
        self.pooling = pooling
        
        # 初始层
        self.conv1_pad = nn.ZeroPad2d(3)
        self.conv1 = nn.Conv2d(input_shape[0], 64, kernel_size=7, 
                              stride=2, bias=use_bias)
        
        if not preact:
            self.conv1_bn = nn.BatchNorm2d(64)
            self.conv1_relu = nn.ReLU(inplace=True)
            
        self.pool1_pad = nn.ZeroPad2d(1)
        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
        
        # 残差块堆叠
        self.conv2 = self._make_stack2(64, 64, 3)
        self.conv3 = self._make_stack2(256, 128, 4)
        self.conv4 = self._make_stack2(512, 256, 6)
        self.conv5 = self._make_stack2(1024, 512, 3, stride1=1)
        
        if preact:
            self.post_bn = nn.BatchNorm2d(2048)
            self.post_relu = nn.ReLU(inplace=True)
            
        if include_top:
            self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
            self.fc = nn.Linear(2048, num_classes)
            if classifier_activation == 'softmax':
                self.activation = nn.Softmax(dim=1)
                
    def _make_stack2(self, in_channels, filters, blocks, stride1=2):
        layers = []
        layers.append(ResBlock(in_channels, filters, stride=stride1))
        for _ in range(1, blocks):
            layers.append(ResBlock(filters * 4, filters))
        return nn.Sequential(*layers)
        
    def forward(self, x):
        # 初始层
        x = self.conv1_pad(x)
        x = self.conv1(x)
        
        if not self.preact:
            x = self.conv1_bn(x)
            x = self.conv1_relu(x)
            
        x = self.pool1_pad(x)
        x = self.pool1(x)
        
        # 残差块
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        
        if self.preact:
            x = self.post_bn(x)
            x = self.post_relu(x)
            
        if self.include_top:
            x = self.avg_pool(x)
            x = torch.flatten(x, 1)
            x = self.fc(x)
            if hasattr(self, 'activation'):
                x = self.activation(x)
        else:
            if self.pooling == 'avg':
                x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
                x = torch.flatten(x, 1)
            elif self.pooling == 'max':
                x = nn.functional.adaptive_max_pool2d(x, (1, 1))
                x = torch.flatten(x, 1)
                
        return x

class ResBlock(nn.Module):
    def __init__(self, in_channels, filters, stride=1):
        super(ResBlock, self).__init__()
        
        self.bn1 = nn.BatchNorm2d(in_channels)
        self.conv1 = nn.Conv2d(in_channels, filters, kernel_size=1, stride=stride)
        
        self.bn2 = nn.BatchNorm2d(filters)
        self.conv2 = nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1)
        
        self.bn3 = nn.BatchNorm2d(filters)
        self.conv3 = nn.Conv2d(filters, filters * 4, kernel_size=1)
        
        self.relu = nn.ReLU(inplace=True)
        
        # 快捷连接
        if stride != 1 or in_channels != filters * 4:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, filters * 4, kernel_size=1, stride=stride)
            )
        else:
            self.shortcut = nn.Identity()
            
    def forward(self, x):
        identity = self.shortcut(x)
        
        out = self.bn1(x)
        out = self.relu(out)
        out = self.conv1(out)
        
        out = self.bn2(out)
        out = self.relu(out)
        out = self.conv2(out)
        
        out = self.bn3(out)
        out = self.relu(out)
        out = self.conv3(out)
        
        out += identity
        return out

# 创建模型
model = ResNet50V2(
    include_top=True,
    preact=True,
    input_shape=(3, 224, 224),
    num_classes=1000
)

model
ResNet50V2(
  (conv1_pad): ZeroPad2d((3, 3, 3, 3))
  (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2))
  (pool1_pad): ZeroPad2d((1, 1, 1, 1))
  (pool1): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (conv2): Sequential(
    (0): ResBlock(
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(2, 2))
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Sequential(
        (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(2, 2))
      )
    )
    (1): ResBlock(
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
    (2): ResBlock(
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
  )
  (conv3): Sequential(
    (0): ResBlock(
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(2, 2))
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Sequential(
        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2))
      )
    )
    (1): ResBlock(
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
    (2): ResBlock(
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
    (3): ResBlock(
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
  )
  (conv4): Sequential(
    (0): ResBlock(
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(2, 2))
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Sequential(
        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2))
      )
    )
    (1): ResBlock(
      (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
    (2): ResBlock(
      (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
    (3): ResBlock(
      (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
    (4): ResBlock(
      (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
    (5): ResBlock(
      (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
  )
  (conv5): Sequential(
    (0): ResBlock(
      (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Sequential(
        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1))
      )
    )
    (1): ResBlock(
      (bn1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
    (2): ResBlock(
      (bn1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1))
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1))
      (relu): ReLU(inplace=True)
      (shortcut): Identity()
    )
  )
  (post_bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (post_relu): ReLU(inplace=True)
  (avg_pool): AdaptiveAvgPool2d(output_size=(1, 1))
  (fc): Linear(in_features=2048, out_features=1000, bias=True)
  (activation): Softmax(dim=1)
)
from torchsummary import summary
import torch
from torch import nn

# 将模型移至 GPU(如果可用)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)

# 使用 torchsummary 查看模型结构
summary(model, (3, 224, 224))

输出:

----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
         ZeroPad2d-1          [-1, 3, 230, 230]               0
            Conv2d-2         [-1, 64, 112, 112]           9,472
         ZeroPad2d-3         [-1, 64, 114, 114]               0
         MaxPool2d-4           [-1, 64, 56, 56]               0
            Conv2d-5          [-1, 256, 28, 28]          16,640
       BatchNorm2d-6           [-1, 64, 56, 56]             128
              ReLU-7           [-1, 64, 56, 56]               0
            Conv2d-8           [-1, 64, 28, 28]           4,160
       BatchNorm2d-9           [-1, 64, 28, 28]             128
             ReLU-10           [-1, 64, 28, 28]               0
           Conv2d-11           [-1, 64, 28, 28]          36,928
      BatchNorm2d-12           [-1, 64, 28, 28]             128
             ReLU-13           [-1, 64, 28, 28]               0
           Conv2d-14          [-1, 256, 28, 28]          16,640
         ResBlock-15          [-1, 256, 28, 28]               0
         Identity-16          [-1, 256, 28, 28]               0
      BatchNorm2d-17          [-1, 256, 28, 28]             512
             ReLU-18          [-1, 256, 28, 28]               0
           Conv2d-19           [-1, 64, 28, 28]          16,448
      BatchNorm2d-20           [-1, 64, 28, 28]             128
             ReLU-21           [-1, 64, 28, 28]               0
           Conv2d-22           [-1, 64, 28, 28]          36,928
      BatchNorm2d-23           [-1, 64, 28, 28]             128
             ReLU-24           [-1, 64, 28, 28]               0
           Conv2d-25          [-1, 256, 28, 28]          16,640
         ResBlock-26          [-1, 256, 28, 28]               0
         Identity-27          [-1, 256, 28, 28]               0
      BatchNorm2d-28          [-1, 256, 28, 28]             512
             ReLU-29          [-1, 256, 28, 28]               0
           Conv2d-30           [-1, 64, 28, 28]          16,448
      BatchNorm2d-31           [-1, 64, 28, 28]             128
             ReLU-32           [-1, 64, 28, 28]               0
           Conv2d-33           [-1, 64, 28, 28]          36,928
      BatchNorm2d-34           [-1, 64, 28, 28]             128
             ReLU-35           [-1, 64, 28, 28]               0
           Conv2d-36          [-1, 256, 28, 28]          16,640
         ResBlock-37          [-1, 256, 28, 28]               0
           Conv2d-38          [-1, 512, 14, 14]         131,584
      BatchNorm2d-39          [-1, 256, 28, 28]             512
             ReLU-40          [-1, 256, 28, 28]               0
           Conv2d-41          [-1, 128, 14, 14]          32,896
      BatchNorm2d-42          [-1, 128, 14, 14]             256
             ReLU-43          [-1, 128, 14, 14]               0
           Conv2d-44          [-1, 128, 14, 14]         147,584
      BatchNorm2d-45          [-1, 128, 14, 14]             256
             ReLU-46          [-1, 128, 14, 14]               0
           Conv2d-47          [-1, 512, 14, 14]          66,048
         ResBlock-48          [-1, 512, 14, 14]               0
         Identity-49          [-1, 512, 14, 14]               0
      BatchNorm2d-50          [-1, 512, 14, 14]           1,024
             ReLU-51          [-1, 512, 14, 14]               0
           Conv2d-52          [-1, 128, 14, 14]          65,664
      BatchNorm2d-53          [-1, 128, 14, 14]             256
             ReLU-54          [-1, 128, 14, 14]               0
           Conv2d-55          [-1, 128, 14, 14]         147,584
      BatchNorm2d-56          [-1, 128, 14, 14]             256
             ReLU-57          [-1, 128, 14, 14]               0
           Conv2d-58          [-1, 512, 14, 14]          66,048
         ResBlock-59          [-1, 512, 14, 14]               0
         Identity-60          [-1, 512, 14, 14]               0
      BatchNorm2d-61          [-1, 512, 14, 14]           1,024
             ReLU-62          [-1, 512, 14, 14]               0
           Conv2d-63          [-1, 128, 14, 14]          65,664
      BatchNorm2d-64          [-1, 128, 14, 14]             256
             ReLU-65          [-1, 128, 14, 14]               0
           Conv2d-66          [-1, 128, 14, 14]         147,584
      BatchNorm2d-67          [-1, 128, 14, 14]             256
             ReLU-68          [-1, 128, 14, 14]               0
           Conv2d-69          [-1, 512, 14, 14]          66,048
         ResBlock-70          [-1, 512, 14, 14]               0
         Identity-71          [-1, 512, 14, 14]               0
      BatchNorm2d-72          [-1, 512, 14, 14]           1,024
             ReLU-73          [-1, 512, 14, 14]               0
           Conv2d-74          [-1, 128, 14, 14]          65,664
      BatchNorm2d-75          [-1, 128, 14, 14]             256
             ReLU-76          [-1, 128, 14, 14]               0
           Conv2d-77          [-1, 128, 14, 14]         147,584
      BatchNorm2d-78          [-1, 128, 14, 14]             256
             ReLU-79          [-1, 128, 14, 14]               0
           Conv2d-80          [-1, 512, 14, 14]          66,048
         ResBlock-81          [-1, 512, 14, 14]               0
           Conv2d-82           [-1, 1024, 7, 7]         525,312
      BatchNorm2d-83          [-1, 512, 14, 14]           1,024
             ReLU-84          [-1, 512, 14, 14]               0
           Conv2d-85            [-1, 256, 7, 7]         131,328
      BatchNorm2d-86            [-1, 256, 7, 7]             512
             ReLU-87            [-1, 256, 7, 7]               0
           Conv2d-88            [-1, 256, 7, 7]         590,080
      BatchNorm2d-89            [-1, 256, 7, 7]             512
             ReLU-90            [-1, 256, 7, 7]               0
           Conv2d-91           [-1, 1024, 7, 7]         263,168
         ResBlock-92           [-1, 1024, 7, 7]               0
         Identity-93           [-1, 1024, 7, 7]               0
      BatchNorm2d-94           [-1, 1024, 7, 7]           2,048
             ReLU-95           [-1, 1024, 7, 7]               0
           Conv2d-96            [-1, 256, 7, 7]         262,400
      BatchNorm2d-97            [-1, 256, 7, 7]             512
             ReLU-98            [-1, 256, 7, 7]               0
           Conv2d-99            [-1, 256, 7, 7]         590,080
     BatchNorm2d-100            [-1, 256, 7, 7]             512
            ReLU-101            [-1, 256, 7, 7]               0
          Conv2d-102           [-1, 1024, 7, 7]         263,168
        ResBlock-103           [-1, 1024, 7, 7]               0
        Identity-104           [-1, 1024, 7, 7]               0
     BatchNorm2d-105           [-1, 1024, 7, 7]           2,048
            ReLU-106           [-1, 1024, 7, 7]               0
          Conv2d-107            [-1, 256, 7, 7]         262,400
     BatchNorm2d-108            [-1, 256, 7, 7]             512
            ReLU-109            [-1, 256, 7, 7]               0
          Conv2d-110            [-1, 256, 7, 7]         590,080
     BatchNorm2d-111            [-1, 256, 7, 7]             512
            ReLU-112            [-1, 256, 7, 7]               0
          Conv2d-113           [-1, 1024, 7, 7]         263,168
        ResBlock-114           [-1, 1024, 7, 7]               0
        Identity-115           [-1, 1024, 7, 7]               0
     BatchNorm2d-116           [-1, 1024, 7, 7]           2,048
            ReLU-117           [-1, 1024, 7, 7]               0
          Conv2d-118            [-1, 256, 7, 7]         262,400
     BatchNorm2d-119            [-1, 256, 7, 7]             512
            ReLU-120            [-1, 256, 7, 7]               0
          Conv2d-121            [-1, 256, 7, 7]         590,080
     BatchNorm2d-122            [-1, 256, 7, 7]             512
            ReLU-123            [-1, 256, 7, 7]               0
          Conv2d-124           [-1, 1024, 7, 7]         263,168
        ResBlock-125           [-1, 1024, 7, 7]               0
        Identity-126           [-1, 1024, 7, 7]               0
     BatchNorm2d-127           [-1, 1024, 7, 7]           2,048
            ReLU-128           [-1, 1024, 7, 7]               0
          Conv2d-129            [-1, 256, 7, 7]         262,400
     BatchNorm2d-130            [-1, 256, 7, 7]             512
            ReLU-131            [-1, 256, 7, 7]               0
          Conv2d-132            [-1, 256, 7, 7]         590,080
     BatchNorm2d-133            [-1, 256, 7, 7]             512
            ReLU-134            [-1, 256, 7, 7]               0
          Conv2d-135           [-1, 1024, 7, 7]         263,168
        ResBlock-136           [-1, 1024, 7, 7]               0
        Identity-137           [-1, 1024, 7, 7]               0
     BatchNorm2d-138           [-1, 1024, 7, 7]           2,048
            ReLU-139           [-1, 1024, 7, 7]               0
          Conv2d-140            [-1, 256, 7, 7]         262,400
     BatchNorm2d-141            [-1, 256, 7, 7]             512
            ReLU-142            [-1, 256, 7, 7]               0
          Conv2d-143            [-1, 256, 7, 7]         590,080
     BatchNorm2d-144            [-1, 256, 7, 7]             512
            ReLU-145            [-1, 256, 7, 7]               0
          Conv2d-146           [-1, 1024, 7, 7]         263,168
        ResBlock-147           [-1, 1024, 7, 7]               0
          Conv2d-148           [-1, 2048, 7, 7]       2,099,200
     BatchNorm2d-149           [-1, 1024, 7, 7]           2,048
            ReLU-150           [-1, 1024, 7, 7]               0
          Conv2d-151            [-1, 512, 7, 7]         524,800
     BatchNorm2d-152            [-1, 512, 7, 7]           1,024
            ReLU-153            [-1, 512, 7, 7]               0
          Conv2d-154            [-1, 512, 7, 7]       2,359,808
     BatchNorm2d-155            [-1, 512, 7, 7]           1,024
            ReLU-156            [-1, 512, 7, 7]               0
          Conv2d-157           [-1, 2048, 7, 7]       1,050,624
        ResBlock-158           [-1, 2048, 7, 7]               0
        Identity-159           [-1, 2048, 7, 7]               0
     BatchNorm2d-160           [-1, 2048, 7, 7]           4,096
            ReLU-161           [-1, 2048, 7, 7]               0
          Conv2d-162            [-1, 512, 7, 7]       1,049,088
     BatchNorm2d-163            [-1, 512, 7, 7]           1,024
            ReLU-164            [-1, 512, 7, 7]               0
          Conv2d-165            [-1, 512, 7, 7]       2,359,808
     BatchNorm2d-166            [-1, 512, 7, 7]           1,024
            ReLU-167            [-1, 512, 7, 7]               0
          Conv2d-168           [-1, 2048, 7, 7]       1,050,624
        ResBlock-169           [-1, 2048, 7, 7]               0
        Identity-170           [-1, 2048, 7, 7]               0
     BatchNorm2d-171           [-1, 2048, 7, 7]           4,096
            ReLU-172           [-1, 2048, 7, 7]               0
          Conv2d-173            [-1, 512, 7, 7]       1,049,088
     BatchNorm2d-174            [-1, 512, 7, 7]           1,024
            ReLU-175            [-1, 512, 7, 7]               0
          Conv2d-176            [-1, 512, 7, 7]       2,359,808
     BatchNorm2d-177            [-1, 512, 7, 7]           1,024
            ReLU-178            [-1, 512, 7, 7]               0
          Conv2d-179           [-1, 2048, 7, 7]       1,050,624
        ResBlock-180           [-1, 2048, 7, 7]               0
     BatchNorm2d-181           [-1, 2048, 7, 7]           4,096
            ReLU-182           [-1, 2048, 7, 7]               0
AdaptiveAvgPool2d-183           [-1, 2048, 1, 1]               0
          Linear-184                 [-1, 1000]       2,049,000
         Softmax-185                 [-1, 1000]               0
================================================================
Total params: 25,575,912
Trainable params: 25,575,912
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 97.93
Params size (MB): 97.56
Estimated Total Size (MB): 196.07
----------------------------------------------------------------

训练结果:
在这里插入图片描述
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值