Dilated U-Net Keras版本实现

本文介绍一种改进的U-Net模型,用于MRI头颈部肿瘤的分割。该模型引入了空洞卷积和普通卷积的双通路结构,提高了特征提取能力。通过Keras实现并在肿瘤分割任务上取得良好效果。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

最近读了一篇会议文章,提出在编码路径每个阶段汇中,使用除了一般卷积两个卷积之外还有一个空洞卷积路径的双通路结构,文章中称为Modified U-Net分割MRI中头颈部位的肿瘤,文章名称为:Segmentation of Head and Neck Tumours Using Modified U-net。网络结构为:

网络结构

下面给出该网络Keras版本的复现:

#dilated UNet
import tensorflow.python.keras.backend as K
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Input, ZeroPadding2D, concatenate, add
from tensorflow.python.keras.layers.core import Dropout, Activation
from tensorflow.python.keras.layers.convolutional import UpSampling2D, Conv2D, Conv2DTranspose
from tensorflow.python.keras.layers.pooling import AveragePooling2D, MaxPooling2D
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.optimizers import nadam, adam, SGD
import losses


def doubleConv2D(input, outdim, is_dilated = False, is_batchnorm=False, name = ''):
    #Double  convolution layer
    if is_dilated:
        dilated_rated = (3, 3)
    else:
        dilated_rated = (1, 1)
    x = Conv2D(outdim, (3, 3), strides=(1, 1), dilation_rate = dilated_rated, kernel_initializer='glorot_normal', padding="same", name=name + '_1')(input)
    if is_batchnorm:
        x =BatchNormalization(name=name + '_1_bn')(x)
    x = Activation('relu',name=name + '_1_act')(x)

    x = Conv2D(outdim, (3, 3), strides=(1, 1), dilation_rate = dilated_rated, kernel_initializer='glorot_normal', padding="same", name=name + '_2')(x)
    if is_batchnorm:
        x = BatchNormalization(name=name + '_2_bn')(x)
    x = Activation('relu', name=name + '_2_act')(x)
    return x

def dualConv2D(input, outdim, is_batchnorm=False, name = ''):
    '''
    parallel convolution block in which the upper is dilated convolution and the lower is
    common 3*3 convolution
    '''
    conv_x = doubleConv2D(input, outdim, is_batchnorm=False, name = name + '_conv')
    dilated_x = doubleConv2D(input, outdim, is_dilated = True, is_batchnorm=False, name = name + '_dilated')
    concat_x = concatenate([dilated_x , conv_x], name = name + '_concat')
    return concat_x

def dilatedUNet(opt, input_size, classes, lossfxn):

    img_input = Input(shape=input_size, name='input')
    dualblock1 = dualConv2D(img_input, 32, is_batchnorm=False, name = 'encoder_stage_1')
    pool1 = MaxPooling2D(pool_size=(2, 2))(dualblock1)

    dualblock2 = dualConv2D(pool1, 64, is_batchnorm=False, name='encoder_stage_2')
    pool2 = MaxPooling2D(pool_size=(2, 2))(dualblock2)

    dualblock3 = dualConv2D(pool2, 128, is_batchnorm=False, name='encoder_stage_3')
    pool3 = MaxPooling2D(pool_size=(2, 2))(dualblock3)

    dualblock4 = dualConv2D(pool3, 256, is_batchnorm=False, name='encoder_stage_4')
    pool4 = MaxPooling2D(pool_size=(2, 2))(dualblock4)

    conv5 = doubleConv2D(pool4, 1024, is_batchnorm=False, name = 'encoder_stage_5')

    up1 = concatenate(
        [Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', activation='relu', kernel_initializer='glorot_normal')(conv5),
         dualblock4], name='up1')
    upconv1 = doubleConv2D(up1, 512, is_batchnorm=False, name = 'decoder_stage_1')

    up2 = concatenate(
        [Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same', activation='relu',
                         kernel_initializer='glorot_normal')(upconv1),
         dualblock3], name='up2')

    upconv2 = doubleConv2D(up2, 256, is_batchnorm=False, name='decoder_stage_2')

    up3 = concatenate(
        [Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', activation='relu',
                         kernel_initializer='glorot_normal')(upconv2),
         dualblock2], name='up3')

    upconv3 = doubleConv2D(up3, 128, is_batchnorm=False, name='decoder_stage_3')

    up4 = concatenate(
        [Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', activation='relu',
                         kernel_initializer='glorot_normal')(upconv3),
         dualblock1], name='up4')

    upconv4 = doubleConv2D(up4, 128, is_batchnorm=False, name='decoder_stage_4')

    out = Conv2D(classes, (1, 1), activation='softmax', name='final')(upconv4)

    model = Model(inputs=img_input, outputs= out )

    model.compile(optimizer=opt, loss=lossfxn, metrics=[losses.dsc])

    return model

实际表现尚待检验!

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值