class adjust_net(nn.Module):
def __init__(self, out_channels=64, middle_channels=32):
super(adjust_net, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(2, middle_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.AvgPool2d(2),
nn.Conv2d(middle_channels, middle_channels * 2, 3, padding=1),
nn.ReLU(inplace=True),
nn.AvgPool2d(2),
nn.Conv2d(middle_channels * 2, middle_channels * 4, 3, padding=1),
nn.ReLU(inplace=True),
nn.AvgPool2d(2),
nn.Conv2d(middle_channels * 4, out_channels * 2, 1, padding=0)
)
self.model2 = nn.Sequential(
nn.Conv2d(2, middle_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(middle_channels, middle_channels * 2, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(middle_channels * 2, middle_channels * 4, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(middle_channels * 4, out_channels * 2, 1, padding=0)
)
self.conv = nn.Sequential(
nn.Conv2d(out_channels * 4, out_channels * 2, kernel_size=5, stride=2, padding=2, bias=False),
nn.ReLU(inplace=True)
)
def forward(self, x):
out = self.model(x)
out2 = self.model2(x)
out = torch.concat((out, out2), dim=1)
out = self.conv(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out1 = out[:, :out.shape[1] // 2]
out2 = out[:, out.shape[1] // 2:]
return out1, out2
11202326
最新推荐文章于 2025-12-20 22:12:43 发布
229

被折叠的 条评论
为什么被折叠?



