最近读了一篇会议文章,提出在编码路径每个阶段汇中,使用除了一般卷积两个卷积之外还有一个空洞卷积路径的双通路结构,文章中称为Modified U-Net分割MRI中头颈部位的肿瘤,文章名称为:Segmentation of Head and Neck Tumours Using Modified U-net。网络结构为:
下面给出该网络Keras版本的复现:
#dilated UNet
import tensorflow.python.keras.backend as K
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Input, ZeroPadding2D, concatenate, add
from tensorflow.python.keras.layers.core import Dropout, Activation
from tensorflow.python.keras.layers.convolutional import UpSampling2D, Conv2D, Conv2DTranspose
from tensorflow.python.keras.layers.pooling import AveragePooling2D, MaxPooling2D
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.optimizers import nadam, adam, SGD
import losses
def doubleConv2D(input, outdim, is_dilated = False, is_batchnorm=False, name = ''):
#Double convolution layer
if is_dilated:
dilated_rated = (3, 3)
else:
dilated_rated = (1, 1)
x = Conv2D(outdim, (3, 3), strides=(1, 1), dilation_rate = dilated_rated, kernel_initializer='glorot_normal', padding="same", name=name + '_1')(input)
if is_batchnorm:
x =BatchNormalization(name=name + '_1_bn')(x)
x = Activation('relu',name=name + '_1_act')(x)
x = Conv2D(outdim, (3, 3), strides=(1, 1), dilation_rate = dilated_rated, kernel_initializer='glorot_normal', padding="same", name=name + '_2')(x)
if is_batchnorm:
x = BatchNormalization(name=name + '_2_bn')(x)
x = Activation('relu', name=name + '_2_act')(x)
return x
def dualConv2D(input, outdim, is_batchnorm=False, name = ''):
'''
parallel convolution block in which the upper is dilated convolution and the lower is
common 3*3 convolution
'''
conv_x = doubleConv2D(input, outdim, is_batchnorm=False, name = name + '_conv')
dilated_x = doubleConv2D(input, outdim, is_dilated = True, is_batchnorm=False, name = name + '_dilated')
concat_x = concatenate([dilated_x , conv_x], name = name + '_concat')
return concat_x
def dilatedUNet(opt, input_size, classes, lossfxn):
img_input = Input(shape=input_size, name='input')
dualblock1 = dualConv2D(img_input, 32, is_batchnorm=False, name = 'encoder_stage_1')
pool1 = MaxPooling2D(pool_size=(2, 2))(dualblock1)
dualblock2 = dualConv2D(pool1, 64, is_batchnorm=False, name='encoder_stage_2')
pool2 = MaxPooling2D(pool_size=(2, 2))(dualblock2)
dualblock3 = dualConv2D(pool2, 128, is_batchnorm=False, name='encoder_stage_3')
pool3 = MaxPooling2D(pool_size=(2, 2))(dualblock3)
dualblock4 = dualConv2D(pool3, 256, is_batchnorm=False, name='encoder_stage_4')
pool4 = MaxPooling2D(pool_size=(2, 2))(dualblock4)
conv5 = doubleConv2D(pool4, 1024, is_batchnorm=False, name = 'encoder_stage_5')
up1 = concatenate(
[Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', activation='relu', kernel_initializer='glorot_normal')(conv5),
dualblock4], name='up1')
upconv1 = doubleConv2D(up1, 512, is_batchnorm=False, name = 'decoder_stage_1')
up2 = concatenate(
[Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same', activation='relu',
kernel_initializer='glorot_normal')(upconv1),
dualblock3], name='up2')
upconv2 = doubleConv2D(up2, 256, is_batchnorm=False, name='decoder_stage_2')
up3 = concatenate(
[Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', activation='relu',
kernel_initializer='glorot_normal')(upconv2),
dualblock2], name='up3')
upconv3 = doubleConv2D(up3, 128, is_batchnorm=False, name='decoder_stage_3')
up4 = concatenate(
[Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', activation='relu',
kernel_initializer='glorot_normal')(upconv3),
dualblock1], name='up4')
upconv4 = doubleConv2D(up4, 128, is_batchnorm=False, name='decoder_stage_4')
out = Conv2D(classes, (1, 1), activation='softmax', name='final')(upconv4)
model = Model(inputs=img_input, outputs= out )
model.compile(optimizer=opt, loss=lossfxn, metrics=[losses.dsc])
return model
实际表现尚待检验!