Unet3D分割模型——pytorch

本文介绍了一个3D版本的UNet网络实现,包括下采样、上采样模块及最终的卷积层。该网络适用于三维医学影像分割任务,详细展示了各模块的构造方法,并使用了批量归一化和上采样技术。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

from torch import nn


class pub(nn.Module):

    def __init__(self, in_channels, out_channels, batch_norm=True):
        super(pub, self).__init__()
        inter_channels = in_channels/2 if in_channels > out_channels else out_channels/2
        layers = [
                    nn.Conv3d(in_channels, inter_channels, 3, stride=1, padding=0),
                    nn.ReLU(True),
                    nn.Conv3d(inter_channels, out_channels, 3, stride=1, padding=0),
                    nn.ReLU(True)
                 ]
        if batch_norm:
            layers.insert(1, nn.BatchNorm3d(inter_channels))
            layers.insert(len(layers)-1, nn.BatchNorm3d(out_channels))
        self.pub = nn.Sequential(*layers)

    def forward(self, x):
        return self.pub(x)


class unet3dDown(nn.Module):

    def __init__(self, in_channels, out_channels, batch_norm=True):
        super(unet3dDown, self).__init__()
        self.pub = pub(in_channels, out_channels, batch_norm)
        self.pool = nn.MaxPool3d(2, stride=2)

    def forward(self, x):
        x = self.pool(x)
        x = self.pub(x)
        return x


class unet3dUp(nn.Module):
    def __init__(self, in_channels, out_channels, batch_norm=True, sample=True):
        super(unet3dUp, self).__init__()
        self.pub = pub(in_channels/2+in_channels, out_channels, batch_norm)
        if sample:
            self.sample = nn.Upsample(scale_factor=2, mode='nearest')
        else:
            self.sample = nn.ConvTranspose3d(in_channels, in_channels, 2, stride=2)

    def forward(self, x, x1):
        x = self.sample(x)
        c1 = (x1.size(2) - x.size(2)) // 2
        c2 = (x1.size(3) - x.size(3)) // 2
        x1 = x1[:, :, c1:-c1, c2:-c2, c2:-c2]
        x = torch.cat((x, x1), dim=1)
        x = self.pub(x)
        return x


class unet3d(nn.Module):
    def __init__(self, init_channels=1, class_nums=1, batch_norm=True, sample=True):
        super(unet3d, self).__init__()
        self.down1 = pub(init_channels, 64, batch_norm)
        self.down2 = unet3dDown(64, 128, batch_norm)
        self.down3 = unet3dDown(128, 256, batch_norm)
        self.down4 = unet3dDown(256, 512, batch_norm)
        self.up3 = unet3dUp(512, 256, batch_norm, sample)
        self.up2 = unet3dUp(256, 128, batch_norm, sample)
        self.up1 = unet3dUp(128, 64, batch_norm, sample)
        self.con_last = nn.Conv3d(64, class_nums, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x1 = self.down1(x)
        x2 = self.down2(x1)
        x3 = self.down3(x2)
        x4 = self.down4(x3)
        x = self.up3(x4, x3)
        x = self.up2(x, x2)
        x = self.up1(x, x1)
        x = self.con_last(x)
        return self.sigmoid(x)

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                nn.init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm3d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
### 如何使用 PyTorch 实现 CT 图像分割nnUNet 模型 #### 构建 ROI 数据结构并处理图像 为了有效地进行CT图像分割,可以从算法实现角度出发,基于提取到的目标轮廓点构建ROI数据结构,再将ROI转化为遮罩图像。此后,对遮罩图像进行填充,再与原始图像进行遮罩运算,从而将图像划分为不同区域[^1]。 #### 设计网络架构 对于具体的神经网络设计方面,可以借鉴MedNeXt这一纯卷积模型的优势。该模型保留了ConvNets的归纳偏差,能够更好地适应稀疏医学数据集,并且可以在标准和上/下采样层上实现宽度(更多通道)以及感受野(更大卷积核)缩放,除此之外还有深度上的扩展。这种多维度的灵活性有助于提高模型性能[^2]。 #### 利用 Unet 进行分割任务 考虑到Unet在网络设计方面的独特之处——即拥有编码器用于捕捉全局特征信息,解码器用来恢复空间分辨率的同时保持语义一致性;因此,在实际操作过程中也可以考虑采用类似的U形架构来完成CT图像中的器官或病变部位识别工作。此方法已被证明适用于多种医疗影像分析场景,并提供了良好的泛化能力[^3]。 ```python import torch.nn as nn class UNet(nn.Module): def __init__(self, n_channels=1, n_classes=2): super(UNet, self).__init__() # 定义编码路径 self.encoder = Encoder(n_channels) # 定义解码路径 self.decoder = Decoder() # 输出层定义 self.out_conv = OutConv() def forward(self, x): features = [] for down in self.encoder: x = down(x) if isinstance(down, DownBlock): # 如果当前模块是下采样块,则保存其输出供跳跃连接使用 features.append(x) for up, feature in zip(reversed(list(self.decoder)), reversed(features)): x = up(x, feature) logits = self.out_conv(x) return logits def main(): model = UNet().cuda() # 假设GPU可用情况下初始化模型实例 input_tensor = torch.randn((batch_size, channels, height, width)).cuda() output = model(input_tensor) if __name__ == "__main__": batch_size = 8 channels = 1 height = 512 width = 512 main() ```
评论 52
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

水煮城府、器

谢谢你的欣赏

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值