import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd
class ODConv(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=nn.BatchNorm2d,
reduction=0.0625, kernel_num=1):
padding = (kernel_size - 1) // 2
super(ODConv, self).__init__(
ODConv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups,
reduction=reduction, kernel_num=kernel_num),
norm_layer(out_planes),
nn.SiLU()
)
class Attention(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size,
groups=1,
reduction=0.0625,
kernel_num=4,
min_channel=16):
super(Attention, self).__init__()
attention_channel = max(int(in_planes * reduction), min_channel)
self.kernel_size = kernel_size
self.kernel_num = kernel_num
self.temperature = 1.0
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(in_planes, attention_channel, 1, bias=False)
self.bn = nn.BatchNorm2d(attention_channel)
self.relu = nn.ReLU(inplace=True)
self.channel_fc = nn.Conv2d(attention_channel, in_planes, 1, bias=True)
self.func_channel = self.get_channel_attention
if in_planes == groups and in_planes == out_planes: # depth-wise convolution
self.func_filter = self.skip
else:
self.filter_fc = nn.Conv2d(attention_channel, out_planes, 1, bias=True)
self.func_filter = self.get_filter_attention
if kernel_size == 1: # point-wise convolution
self.func_spatial = self.skip
else:
self.spatial_fc = nn.Conv2d(attention_channel, kernel_size * kernel_size, 1, bias=True)
self.func_spatial = self.get_spatial_attention
if kernel_num == 1:
self.func_kernel = self.skip
else:
self.kernel_fc = nn.Conv2d(attention_channel, kernel_num, 1, bias=True)
self.func_kernel = self.get_kernel_attention
self.bn_1 = nn.LayerNorm(
yolov5(ODConv改进)
最新推荐文章于 2024-12-27 22:39:54 发布