记录参加DataWhale的CV实践_语义分割--赛题理解与baseline

该博客介绍了如何利用ResNet系列模型进行医学图像分割任务,包括RLE编码的读取与解码、数据增强、Dice系数评估、模型训练以及结果处理。首先,通过示例展示了RLE编码的转换过程,然后定义了数据增强方法,如水平翻转、垂直翻转等。接着,创建了一个自定义的数据集类并进行了数据划分,选用ResNet模型并定制损失函数进行训练。最后,对预测结果进行了处理并保存为CSV文件。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

赛题:

地址:https://tianchi.aliyun.com/competition/entrance/531872/information

 

rle编码的具体的读取代码如下:

import numpy as np
import pandas as pd
import cv2

# 将图片编码为rle格式
def rle_encode(im):
    '''
    im: numpy array, 1 - mask, 0 - background
    Returns run length as string formated
    '''
    pixels = im.flatten(order = 'F')
    pixels = np.concatenate([[0], pixels, [0]])
    runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
    runs[1::2] -= runs[::2]
    return ' '.join(str(x) for x in runs)

# 将rle格式进行解码为图片
def rle_decode(mask_rle, shape=(512, 512)):
    '''
    mask_rle: run-length as string formated (start length)
    shape: (height,width) of array to return 
    Returns numpy array, 1 - mask, 0 - background

    '''
    s = mask_rle.split()
    starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
    starts -= 1
    ends = starts + lengths
    img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
    for lo, hi in zip(starts, ends):
        img[lo:hi] = 1
    return img.reshape(shape, order='F')

读取样例:

train_mask = pd.read_csv('train_mask.csv', sep='\t', names=['name', 'mask'])

# 读取第一张图,并将对于的rle解码为mask矩阵
img = cv2.imread('train/'+ train_mask['name'].iloc[0])
mask = rle_decode(train_mask['mask'].iloc[0])

print(rle_encode(mask) == train_mask['mask'].iloc[0])
# 结果为True

根据评估标准,采用Dice coefficient来比较结果的差异性。

读取第一张图,并将对于的rle解码为mask矩阵:

train_mask = pd.read_csv('dataset/train_mask.csv', sep='\t', names=['name', 'mask'])

# 读取第一张图,并将对于的rle解码为mask矩阵
img = cv2.imread('train/'+ train_mask['name'].iloc[0])
mask = rle_decode(train_mask['mask'].iloc[0])

print(rle_encode(mask) == train_mask['mask'].iloc[0])
# 结果为True

对数据进行增强:

import albumentations as A


IMAGE_SIZE = 256



trfm = A.Compose([
    A.Resize(IMAGE_SIZE, IMAGE_SIZE),
    A.HorizontalFlip(p=0.5),
    A.VerticalFlip(p=0.5),
    A.RandomRotate90(),
])


 

对数据集进行处理:

class TianChiDataset(D.Dataset):
    def __init__(self, paths, rles, transform, test_mode=False):
        self.paths     = paths
        self.rles      = rles
        self.transform = transform
        self.test_mode = test_mode
        
        self.len       = len(paths)
        self.as_tensor = T.Compose([
            T.ToPILImage(),
            T.Resize(IMAGE_SIZE),
            T.ToTensor(),
            T.Normalize([0.625, 0.448, 0.488],
                        [0.131, 0.177, 0.101]),
        ])
        
    def __getitem__(self, index):
        img = cv2.imread(self.paths[index])
        if not self.test_mode:
            mask     = rle_decode(self.rles[index])
            augments = self.transform(image=img, mask=mask)
            return self.as_tensor(augments['image']), augments['mask'][None]
        else:
            return self.as_tensor(img), ''
        
    def __len__(self):
        return self.len
train_mask = pd.read_csv('dataset/train_mask.csv', sep='\t', names=['name', 'mask'])
train_mask['name'] = train_mask['name'].apply(lambda x: 'dataset/train/' + x)

img  = cv2.imread(train_mask['name'].iloc[0])
mask = rle_decode(train_mask['mask'].iloc[0])

print(rle_encode(mask) == train_mask['mask'].iloc[0])



dataset = TianChiDataset(
    train_mask['name'].values,
    train_mask['mask'].fillna('').values,
    trfm, False
)

划分有效集和训练集:

valid_idx, train_idx = [], []

for i in range(len(dataset)):
    if i % 7 == 0:
        valid_idx.append(i)
    elif i % 7 == 1:
        train_idx.append(i)
        
train_ds = D.Subset(dataset, train_idx)
valid_ds = D.Subset(dataset, valid_idx)

loader  = D.DataLoader(
    train_ds, batch_size=BATCH_SIZE, shuffle=True,  num_workers=0
)

vloader = D.DataLoader(
    valid_ds, batch_size=BATCH_SIZE, shuffle=False, num_workers=0
)

选择模型:(我这里选择的模型主要是resnet系列的模型,resnet50, resnet101)

def get_model():
    model = torchvision.models.segmentation.fcn_resnet101(True)
    model.classifier[4] = nn.Conv2d(512, 1, kernel_size=(1, 1), stride=(1, 1))
    
    return model

@torch.no_grad()
def validation(model, loader, loss_fn):
    losses = []
    model.eval()
    for image, target in loader:
        image, target = image.to(DEVICE), target.float().to(DEVICE)
        output        = model(image)['out']
        loss          = loss_fn(output, target)
        losses.append(loss.item())
        
    return np.array(losses).mean()

选择损失函数:

model = get_model()
model.to(DEVICE)

optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5, weight_decay=1e-6, amsgrad=True)

class SoftDiceLoss(nn.Module):
    def __init__(self, smooth=1., dims=(-2, -1)):
        super(SoftDiceLoss, self).__init__()
        self.smooth = smooth
        self.dims   = dims
        
    def forward(self, x, y):
        tp = (x * y).sum(self.dims)
        fp = (x * (1 - y)).sum(self.dims)
        fn = ((1 - x) * y).sum(self.dims)
        
        dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth)
        dc = dc.mean()
        return 1 - dc



bce_fn  = nn.BCEWithLogitsLoss()
dice_fn = SoftDiceLoss()

def loss_fn(y_pred, y_true):
    bce  = bce_fn(y_pred, y_true)
    dice = dice_fn(y_pred.sigmoid(), y_true)
    return 0.8 * bce + 0.2 * dice

跑模型:

header = r'''
        Train | Valid
Epoch |  Loss |  Loss | Time, m
'''

raw_line = '{:6d}' + '\u2502{:7.3f}' * 2 + '\u2502{:6.2f}'
print(header)



EPOCHES   = 15
best_loss = 10
for epoch in range(1, EPOCHES+1):
    losses     = []
    start_time = time.time()
    model.train()
    for image, target in tqdm_notebook(loader):
        image, target = image.to(DEVICE), target.float().to(DEVICE)
        optimizer.zero_grad()
        output = model(image)['out']
        loss   = loss_fn(output, target)
        loss.backward()
        optimizer.step()
        losses.append(loss.item())
        
    vloss = validation(model, vloader, loss_fn)
    print(raw_line.format(epoch, np.array(losses).mean(), vloss,
                         (time.time() - start_time) / 60 ** 1))
    losses = []
    if vloss < best_loss:
        best_loss = vloss
        torch.save(model.state_dict(), 'model_best.pth')

对结果进行处理:

trfm = T.Compose([
    T.ToPILImage(),
    T.Resize(IMAGE_SIZE),
    T.ToTensor(),
    T.Normalize([0.625, 0.448, 0.288],
                [0.131, 0.177, 0.101]),
])

subm = []
model.load_state_dict(torch.load("./model_best.pth"))
model.eval()


test_mask = pd.read_csv('dataset/test_a_samplesubmit.csv', sep='\t', names=['name', 'mask'])
test_mask['name'] = test_mask['name'].apply(lambda x: 'dataset/test_a/' + x)

for idx, name in enumerate(tqdm_notebook(test_mask['name'].iloc[:])):
    image = cv2.imread(name)
    image = trfm(image)
    
    with torch.no_grad():
        image = image.to(DEVICE)[None]
        score = model(image)['out'][0][0]
        score_sigmoid = score.sigmoid().cpu().numpy()
        score_sigmoid = (score_sigmoid > 0.5).astype(np.uint8)
        score_sigmoid = cv2.resize(score_sigmoid, (512, 512))
        
    subm.append([name.split('/')[-1], rle_encode(score_sigmoid)])

将预测结果存入csv文件中:

subm = pd.DataFrame(subm)
subm.to_csv('./sub_tmp.csv', index=None, header=None, sep='\t')

 

 

 

目前结果:

 

 

参考

https://tianchi.aliyun.com/notebook-ai/detail?postId=170488

 

 

 

 

 

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值