SENet(Squeeze Excitation)、CBAM(Convolutional Block Attention Module)和CA(Coordinate Attention)

SENet(Squeeze Excitation)

我们可以看到,已经有很多工作在空间维度上来提升网络的性能。那么很自然想到,网络是否可以从其他层面来考虑去提升性能,比如考虑特征通道之间的关系?我们的工作就是基于这一点并提出了 Squeeze-and-Excitation Networks(简称 SENet)。Squeeze 和 Excitation 是两个非常关键的操作,所以我们以此来命名。动机是希望显式地建模特征通道之间的相互依赖关系。另外,我们并不打算引入一个新的空间维度来进行特征通道间的融合,而是采用了一种全新的「特征重标定」策略。具体来说,就是通过学习的方式来自动获取到每个特征通道的重要程度,然后依照这个重要程度去提升有用的特征并抑制对当前任务用处不大的特征。

简单来说就分为三步:

  1. Squeeze:通过Global Average pooling [n,c,h,w]->[n,c,1,1],得到一个w矩阵
  2. Excitation: 把这个w矩阵通过FC和Relu变成[n,c/r,1,1],再通过FC和Sigmoid变成[n,c,1,1]
  3. Reweight:最后用这个w矩阵来reweight输入

SENet的code如下:

code摘录自 imgclsmob/pytorch/pytorchcv/models/common.py at 68335927ba27f2356093b985bada0bc3989836b1 · osmr/imgclsmob · GitHub

import torch.nn as nn
import torch

def conv1x1(in_channels,
            out_channels,
            stride=1,
            groups=1,
            bias=False):
    return nn.Conv2d(
        in_channels=in_channels,
        out_channels=out_channels,
        kernel_size=1,
        stride=stride,
        groups=groups,
        bias=bias)

class SEBlock(nn.Module):

    def __init__(self,
                 channels,
                 reduction=16):
        super(SEBlock, self).__init__()
        mid_cannels = channels // reduction

        self.pool = nn.AdaptiveAvgPool2d(output_size=1)
        self.conv1 = conv1x1(
            in_channels=channels,
            out_channels=mid_cannels,
            bias=True)
        self.activ = nn.ReLU(inplace=True)
        self.conv2 = conv1x1(
            in_channels=mid_cannels,
            out_channels=channels,
            bias=True)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        w = self.pool(x)
        print('after pool shape {}'.format(w.shape))
        w = self.conv1(w)
        print('after conv1 shape {}'.format(w.shape))
        w = self.activ(w)
        print('after activ shape {}'.format(w.shape))
        w = self.conv2(w)
        print('after conv2 shape {}'.format(w.shape))
        w = self.sigmoid(w)
        print('after sigmoid shape {}'.format(w.shape))
        x = x * w
        return x

if __name__ == '__main__':
    bs,c,h,w=2,16,3,3
    x = torch.randn(bs,c,h,w)
    se = SEBlock(channels=c)
    print(se(x).shape)

'''
输出为:
after pool shape torch.Size([2, 16, 1, 1])
after conv1 shape torch.Size([2, 1, 1, 1])
after activ shape torch.Size([2, 1, 1, 1])
after conv2 shape torch.Size([2, 16, 1, 1])
after sigmoid shape torch.Size([2, 16, 1, 1])
torch.Size([2, 16, 3, 3])
'''

Convolutional Block Attention Module (CBAM)

该网络发表于2018年的CVPR。其主要思想是对特征进行空间和通道上的注意力操作,可以认为是SENet的增强版。

图中第一阶段Channel Attention Module相当于SENet中的pooling用两种方式做了两遍,代码中叫作ChannelGate;第二阶段Spatial Attention Module相当于先用ChannelPool分别用均值最大两种pooling的方式把[n,c,h,w]压缩到[n,2,h,w](代码中ChannelPool的部分),然后再通过一个input_channels=2,output_channels=1的卷积变成[n,1,h,w],最后来一个sigmoid后reweight的操作。ChannelGate整体相当于SENet中pooling->mlp->relu->mlp的方式,而ChannelPool是直接做2种方式的pooling,细节可以参考下面的代码:

import torch
import math
import torch.nn as nn
import torch.nn.functional as F
 
class BasicConv(nn.Module):
    def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
        super(BasicConv, self).__init__()
        self.out_channels = out_planes
        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
        self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
        self.relu = nn.ReLU() if relu else None
 
    def forward(self, x):
        x = self.conv(x)
        if self.bn is not None:
            x = self.bn(x)
        if self.relu is not None:
            x = self.relu(x)
        return x
 
class Flatten(nn.Module):
    def forward(self, x):
        return x.view(x.size(0), -1)
 
class ChannelGate(nn.Module):
    def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
        super(ChannelGate, self).__init__()
        self.gate_channels = gate_channels
        self.mlp = nn.Sequential(
            Flatten(),
            nn.Linear(gate_channels, gate_channels // reduction_ratio),
            nn.ReLU(),
            nn.Linear(gate_channels // reduction_ratio, gate_channels)
            )
        self.pool_types = pool_types
    def forward(self, x):
        channel_att_sum = None
        for pool_type in self.pool_types:
            if pool_type=='avg':
                avg_pool = F.avg_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp( avg_pool )
            elif pool_type=='max':
                max_pool = F.max_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp( max_pool )
            elif pool_type=='lp':
                lp_pool = F.lp_pool2d( x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp( lp_pool )
            elif pool_type=='lse':
                # LSE pool only
                lse_pool = logsumexp_2d(x)
                channel_att_raw = self.mlp( lse_pool )
 
            if channel_att_sum is None:
                channel_att_sum = channel_att_raw
            else:
                channel_att_sum = channel_att_sum + channel_att_raw
 
        scale = F.sigmoid( channel_att_sum ).unsqueeze(2).unsqueeze(3).expand_as(x)
        return x * scale
 
def logsumexp_2d(tensor):
    tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
    s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
    outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
    return outputs
 
class ChannelPool(nn.Module):
    def forward(self, x):
        # torch.max(x,1)[0] 表示在dim=1这维算最大,max返回namedtuple (values, indices) ,这里只取values
        # 整个函数是把channel维度的最大和平均给拼起来
        return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )
 
class SpatialGate(nn.Module):
    def __init__(self):
        super(SpatialGate, self).__init__()
        kernel_size = 7
        self.compress = ChannelPool()
        self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
    def forward(self, x):
        x_compress = self.compress(x)
        print('after x_compress shape {}'.format(x_compress.shape))
        x_out = self.spatial(x_compress)
        print('after x_out shape {}'.format(x_out.shape))
        scale = F.sigmoid(x_out) # broadcasting
        return x * scale
 
class CBAM(nn.Module):
    def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False):
        super(CBAM, self).__init__()
        self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
        self.no_spatial=no_spatial
        if not no_spatial:
            self.SpatialGate = SpatialGate()
    def forward(self, x):
        x_out = self.ChannelGate(x)
        print('after channelgate shape {}'.format(x_out.shape))
        if not self.no_spatial:
            x_out = self.SpatialGate(x_out)
        return x_out
 
if __name__ == '__main__':
    bs,c,h,w=2,16,3,3
    x = torch.randn(bs,c,h,w)
    cbam = CBAM(gate_channels=c)
    res = cbam(x)
    print(res.shape)

"""
after channelgate shape torch.Size([2, 16, 3, 3])
after x_compress shape torch.Size([2, 2, 3, 3])
after x_out shape torch.Size([2, 1, 3, 3])
torch.Size([2, 16, 3, 3])
"""

Coordinate Attention for Efficient Mobile Network Design(简称Coordinate Attention)

该网络发表于2021的CVPR,核心是拆成h,w两个维度进行reweight,只用avg pool,最终只reweight一次就可以了,参考下图:

代码摘录自CoordAttention/coordatt.py at main · houqb/CoordAttention · GitHub

import torch
import torch.nn as nn
import math
import torch.nn.functional as F

class h_sigmoid(nn.Module):
    def __init__(self, inplace=True):
        super(h_sigmoid, self).__init__()
        self.relu = nn.ReLU6(inplace=inplace)

    def forward(self, x):
        return self.relu(x + 3) / 6

class h_swish(nn.Module):
    def __init__(self, inplace=True):
        super(h_swish, self).__init__()
        self.sigmoid = h_sigmoid(inplace=inplace)

    def forward(self, x):
        return x * self.sigmoid(x)

class CoordAtt(nn.Module):
    def __init__(self, inp, oup, reduction=32):
        super(CoordAtt, self).__init__()
        self.pool_h = nn.AdaptiveAvgPool2d((None, 1))
        self.pool_w = nn.AdaptiveAvgPool2d((1, None))

        mip = max(8, inp // reduction)

        self.conv1 = nn.Conv2d(inp, mip, kernel_size=1, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(mip)
        self.act = h_swish()
        
        self.conv_h = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)
        self.conv_w = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)
        

    def forward(self, x):
        identity = x
        
        n,c,h,w = x.size()
        x_h = self.pool_h(x)
        x_w = self.pool_w(x).permute(0, 1, 3, 2)

        y = torch.cat([x_h, x_w], dim=2)
        y = self.conv1(y)
        y = self.bn1(y)
        y = self.act(y) 
        
        x_h, x_w = torch.split(y, [h, w], dim=2)
        x_w = x_w.permute(0, 1, 3, 2)

        a_h = self.conv_h(x_h).sigmoid()
        a_w = self.conv_w(x_w).sigmoid()

        out = identity * a_w * a_h

        return out

### CBAM、SE CA 注意力机制的区别 #### 一、CBAM (Convolutional Block Attention Module) CBAM 是一种轻量级的注意力模块,能够同时考虑通道维度空间维度的信息。它通过两个独立的子模块——通道注意(Channel Attention空间注意(Spatial Attention),逐步增强特征图中的重要区域[^1]。 - **特点**: - 首先计算通道注意力,通过对全局平均池化最大池化的结果进行处理来获取每个通道的重要性权重。 - 接着利用这些权重调整输入特征图,再进一步应用空间注意力以突出重要的空间位置。 - **适用模型/场景**: - 主要适用于卷积神经网络(CNNs),尤其是图像分类、目标检测以及语义分割等任务中,在提升性能的同时不会显著增加计算开销[^2]。 ```python class CBAM(nn.Module): def __init__(self, channels, reduction_ratio=16): super(CBAM, self).__init__() # Channel attention module self.channel_attention = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(channels, channels // reduction_ratio, kernel_size=1), nn.ReLU(), nn.Conv2d(channels // reduction_ratio, channels, kernel_size=1), nn.Sigmoid() ) # Spatial attention module self.spatial_attention = nn.Sequential( nn.Conv2d(2, 1, kernel_size=7, padding=3), nn.Sigmoid() ) def forward(self, x): channel_att_map = self.channel_attention(x).expand_as(x) out = x * channel_att_map max_out, _ = torch.max(out, dim=1, keepdim=True) avg_out = torch.mean(out, dim=1, keepdim=True) spatial_input = torch.cat([max_out, avg_out], dim=1) spatial_att_map = self.spatial_attention(spatial_input).expand_as(out) return out * spatial_att_map ``` --- #### 二、SE (Squeeze-and-Excitation Networks) SENet 提出了 Squeeze-and-Excitation 的方法,专注于改进 CNN 中不同通道之间的关系建模能力。其核心思想是对每个通道学习到一个重新校准的权值,从而动态地强调有用的信息并抑制无用的部分[^3]。 - **特点**: - 使用全局上下文信息(Global Average Pooling)压缩每张特征图的空间尺寸至单个数值。 - 经过全连接层映射后得到各通道对应的激励系数,并将其乘回原始特征图上实现自适应调节。 - **适用模型/场景**: - 广泛应用于各种视觉识别任务,如 ResNet、MobileNet 等架构上的扩展版本;尤其适合资源受限环境下的高效推理需求[^4]。 ```python class SELayer(nn.Module): def __init__(self, channel, reduction=16): super(SELayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction, bias=False), nn.ReLU(inplace=True), nn.Linear(channel // reduction, channel, bias=False), nn.Sigmoid() ) def forward(self, x): b, c, _, _ = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return x * y.expand_as(x) ``` --- #### 三、CA (Coordinate Attention) CA 将传统的二维特征分解成水平方向的一维向量序列与垂直方向的一维向量序列分别施加注意力操作,旨在捕捉更细粒度的位置依赖特性[^5]。 - **特点**: - 不同于其他两种仅关注单一维度或者简单组合的方式,CA 同时作用于 H×W 形状数据的不同轴线上。 - 这种设计可以更好地保留局部结构信息,同时减少冗余参数数量。 - **适用模型/场景**: - 特别适配高分辨率图片分析领域内的工作流,比如超分重建(Super Resolution Reconstruction),医学影像诊断等领域,因为这类问题往往需要精确控制像素级别的变化趋势[^6]。 ```python import math def coordAtt(x_h, x_w, rd_ratio=0.125): batchsize, C, height, width = x_h.shape inter_channel = int(math.floor(C*rd_ratio)) proj_query_h = nn.Conv2d(in_channels=C,out_channels=inter_channel,kernel_size=(1,width)).cuda()(x_h) proj_key_h = nn.Conv2d(in_channels=C,out_channels=inter_channel,kernel_size=(height,1)).cuda()(x_h.permute(0,1,3,2)) energy_h = torch.bmm(proj_query_h.view(batchsize,-1,height),proj_key_h.view(batchsize,-1,height).permute(0,2,1)) / ((height*width)**0.5) atten_h = nn.Softmax(dim=-1)(energy_h) proj_value_h = nn.Conv2d(in_channels=C,out_channels=inter_channel,kernel_size=(1,width)).cuda()(x_h) out_h = torch.bmm(proj_value_h.view(batchsize,-1,height),atten_h.permute(0,2,1)).view(batchsize,C//8,height,width) proj_query_w = nn.Conv2d(in_channels=C,out_channels=inter_channel,kernel_size=(height,1)).cuda()(x_w) proj_key_w = nn.Conv2d(in_channels=C,out_channels=inter_channel,kernel_size=(1,width)).cuda()(x_w.permute(0,1,3,2)) energy_w = torch.bmm(proj_query_w.view(batchsize,-1,width),proj_key_w.view(batchsize,-1,width).permute(0,2,1)) / ((height*width)**0.5) atten_w = nn.Softmax(dim=-1)(energy_w) proj_value_w = nn.Conv2d(in_channels=C,out_channels=inter_channel,kernel_size=(height,1)).cuda()(x_w) out_w = torch.bmm(proj_value_w.view(batchsize,-1,width),atten_w.permute(0,2,1)).view(batchsize,C//8,height,width) return torch.cat((out_h,out_w),dim=1)+torch.cat((x_h,x_w),dim=1) ``` --- ### 总结比较表 | 属性 | CBAM | SE | CA | |--------------|-------------------------------|--------------------------------|------------------------------| | 关注维度 | 双重:通道 & 空间 | 单独:通道 | 坐标 | | 参数复杂度 | 较低 | 极低 | 中等 | | 计算成本 | 轻微增长 | 几乎不变 | 显著提高 | | 应用范围 | 图像分类、目标检测 | 多样化视觉任务 | 高精度定位 | ---
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值