)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
2、CA模块(Coordinate attention)
论文:[https://arxiv.org/abs/2103.02907]( )
参考:[CA(Coordinate attention) 注意力机制 - 知乎 (zhihu.com)]( ) 注意力机制 - 知乎 (zhihu.com)")
[CVPR 2021 | 即插即用! CA:新注意力机制,助力分类/检测/分割涨点!\_Amusi(CVer)的博客-优快云博客]( )

[CVPR 2021 | 即插即用! CA:新注意力机制,助力分类/检测/分割涨点!\_Amusi(CVer)的博客-优快云博客]( )CA对宽度和高度两个方向分别全局平均池化,分别获得在宽度和高度两个方向的特征图,然后将两个方向的特征图concat,然后送入共享卷积将维度降为C/r,再通过批量归一化处理和激活函数后得到特征图。
import torch
from torch import nn
class CA_Block(nn.Module):
def init(self, channel, h, w, reduction=16):
super(CA_Block, self).init()
self.h = h
self.w = w
self.avg_pool_x = nn.AdaptiveAvgPool2d((h, 1))
self.avg_pool_y = nn.AdaptiveAvgPool2d((1, w))
self.conv_1x1 = nn.Conv2d(in_channels=channel, out_channels=channel//reduction, kernel_size=1, stride=1, bias=False)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm2d(channel//reduction)
self.F_h = nn.Conv2d(in_channels=channel//reduction, out_channels=channel, kernel_size=1, stride=1, bias=False)
self.F_w = nn.Conv2d(in_channels=channel//reduction, out_channels=channel, kernel_size=1, stride=1, bias=False)
self.sigmoid_h = nn.Sigmoid()
self.sigmoid_w = nn.Sigmoid()
def forward(self, x):
x_h = self.avg_pool_x(x).permute(0, 1, 3, 2)
x_w = self.avg_pool_y(x)
x_cat_conv_relu = self.relu(self.conv_1x1(torch.cat((x_h, x_w), 3)))
x_cat_conv_split_h, x_cat_conv_split_w = x_cat_conv_relu.split([self.h, self.w], 3)
s_h = self.sigmoid_h(self.F_h(x_cat_conv_split_h.permute(0, 1, 3, 2)))
s_w = self.sigmoid_w(self.F_w(x_cat_conv_split_w))
out = x * s_h.expand_as(x) * s_w.expand_as(x)
return out
if name == ‘main’:
x = torch.randn(1, 16, 128, 64) # b, c, h, w
ca_model = CA_Block(channel=16, h=128, w=64)
y = ca_model(x)
print(y.shape)
3、ECA模块
论文:[(PDF) ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks (researchgate.net)]( ) ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks (researchgate.net)")
参考:[注意力机制(SE、Coordinate Attention、CBAM、ECA,SimAM)、即插即用的模块整理\_吴大炮的博客-优快云博客\_se注意力机制]( )

ECA首先通过全局平均池化,然后利用卷积进行特征提取,实现跨通道的交互。
4、CBAM模块
论文: [[1807.06521] CBAM:卷积块注意模块 (arxiv.org)]( )")
参考:[注意力机制之《CBAM: Convolutional Block Attention Module》论文阅读\_落樱弥城的博客-优快云博客]( )


CBAM模块分为channel-wise attention和spatial attention,通道注意力和SE结构相同,只是加了一个maxpooling,中间共享一个MLP,最后将两部分的输出相加经过sigmoid。
空间注意力使用平均池化和最大池化对输入特征层进行通道压缩,在使用卷积操作。
class ChannelAttention(nn.Module):
def init(self, in_planes, ratio=16):
super(ChannelAttention, self).init()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def init(self, kernel_size=7):
super(SpatialAttention, self).init()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
5、GAM模块
论文:[https://paperswithcode.com/paper/global-attention-mechanism-retain-information]( )
GAM注意力机制分为两个模块:CAM和SAM,通道注意是学习不同通道的权值,并用权值对不同通道进行多重划分,空间注意关注目标在图像上的位置信息,并通过空间特征的加权选择性的聚焦每个空间的特征。

通道注意力模块首先重新排列图像三维信息,然后通过MLP来放大跨维通道空间,如图6所示。在空间注意子模块中,使用两个卷积层进行空间信息融合,如图7所示,这样使通道更能关注空间信息。

import torch.nn as nn
import torch
class GAM_Attention(nn.Module):
def init(self, in_channels, out_channels, rate=4):
super(GAM_Attention, self).init()
self.channel_attention = nn.Sequential(
nn.Linear(in_channels, int(in_channels / rate)),
nn.ReLU(inplace=True),
nn.Linear(int(in_channels / rate), in_channels)
)
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, int(in_channels / rate), kernel_size=7, padding=3),
nn.BatchNorm2d(int(in_channels / rate)),
nn.ReLU(inplace=True),
最后的话
最近很多小伙伴找我要Linux学习资料,于是我翻箱倒柜,整理了一些优质资源,涵盖视频、电子书、PPT等共享给大家!
资料预览
给大家整理的视频资料:
给大家整理的电子书资料:
如果本文对你有帮助,欢迎点赞、收藏、转发给朋友,让我有持续创作的动力!
加入社区》https://bbs.youkuaiyun.com/forums/4304bb5a486d4c3ab8389e65ecb71ac0
…(img-hvphPEJI-1725796839687)]
给大家整理的电子书资料:
[外链图片转存中…(img-cOP8b5Gb-1725796839688)]
如果本文对你有帮助,欢迎点赞、收藏、转发给朋友,让我有持续创作的动力!
加入社区》https://bbs.youkuaiyun.com/forums/4304bb5a486d4c3ab8389e65ecb71ac0