CV语义分割-地表建筑物识别-task4 评价函数和学习

本文介绍了常用的评价函数和损失函数,包括Dice、IoU、BCE、Focal Loss及Lovász-Softmax等,并提供了详细的实现代码及示例。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

一、学习目标

• 掌握常见的评价函数和损失函数 Dice、IoU、BCE、Focal Loss、Lovász-Softmax;
• 掌握评价/损失函数的实践;

二、各个评价函数练习

1,dice 与 dice loss

import numpy as np
def dice(output, target):
    '''计算Dice系数'''
    smooth = 1e-6 # 避免0为除数 - 
    intersection = (output * target).sum()
    return (2. * intersection + smooth) / (output.sum() + target.sum() +
    smooth)
# 生成随机两个矩阵测试
target = np.random.randint(0, 2, (3, 3))
output = np.random.randint(0, 2, (3, 3))
d = dice(output, target)
print(target)
print(output)
print(d)
print(1-d)#dice  loss

[[0 1 0]
[0 1 1]
[0 0 0]]
[[0 1 0]
[0 1 0]
[0 0 1]]
0.666666722222213
0.333333277777787
2,IOU评价指标

def iou_score(output, target):
    '''计算IoU指标'''
    intersection = np.logical_and(target, output)
    union = np.logical_or(target, output)
    return np.sum(intersection) / np.sum(union)
# 生成随机两个矩阵测试 - 
target = np.random.randint(0, 2, (3, 3))
output = np.random.randint(0, 2, (3, 3))
d = iou_score(output, target)

3,BCE损失函数
用来评价二分类问题,调用torch封装的API接口即可

import torch
import torch.nn as nn
bce = nn.BCELoss()
bce_sig = nn.BCEWithLogitsLoss()
input = torch.randn(5, 1, requires_grad=True)
target = torch.empty(5, 1).random_(2)
pre = nn.Sigmoid()(input)
loss_bce = bce(pre, target)
loss_bce_sig = bce_sig(input, target)

4,FOCAL LOSS
当数据不均衡时,可以根据比例设置合适的 ,这个很好理解,为了能够使得正负样本得到的损失能够均衡,因此对 loss 前面加上一定的权重,其中负样本数量多,因此占用的权重可以设置的小一点;正样本数量少,就对正样本产生的损失的权重设的高一点。

import torch.nn as nn
import torch
import torch.nn.functional as F

class FocalLoss(nn.Module):
    def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.logits = logits # 如果Bce带logits则损失函数在计算Bceloss之前会 自动计算softmax/sigmoid将其映射到[0,1]
        self.reduce = reduce
    def forward(self, inputs, targets):
        if self.logits:
            BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets,reduce=False)
        else: 
            BCE_loss = F.binary_cross_entropy(inputs, targets, reduce=False)
        pt = torch.exp(-BCE_loss)
        F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
        if self.reduce: 
            return torch.mean(F_loss)
        else:
            return F_loss
FL1 = FocalLoss(logits=False)
FL2 = FocalLoss(logits=True)
inputs = torch.randn(5, 1, requires_grad=True)
targets = torch.empty(5, 1).random_(2)
pre = nn.Sigmoid()(inputs)
f_loss_1 = FL1(pre, targets)
f_loss_2 = FL2(inputs, targets)

5, Lovász-Softmax
适用于多分类问题评价

import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
try: 
    from itertools import ifilterfalse
except ImportError: # py3k
    from itertools import filterfalse as ifilterfalse
def lovasz_softmax(probas, labels, classes=’present’, per_image=False, ignore=None):
    if per_image:
        loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
                for prob, lab in zip(probas, labels))
    else:
        loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore),classes=classes)
    return loss
def lovasz_softmax_flat(probas, labels, classes=’present’):
    if probas.numel() == 0:
    # only void pixels, the gradients should be 0 - 
        return probas * 0.
    C = probas.size(1)
    losses = []
    class_to_sum = list(range(C)) if classes in [’all’, ’present’] else
        classes
    for c in class_to_sum:
        fg = (labels == c).float() # foreground for class c - 
        if (classes is ’present’ and fg.sum() == 0):
            continue
        if C == 1:
            if len(classes) > 1:
                raise ValueError(’Sigmoid output possible only with 1 class’)
            class_pred = probas[:, 0]
        else:
            class_pred = probas[:, c]
        errors = (Variable(fg) - class_pred).abs()
        errors_sorted, perm = torch.sort(errors, 0, descending=True)
        perm = perm.data
        fg_sorted = fg[perm]
        losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
    return mean(losses)

def flatten_probas(probas, labels, ignore=None):
    if probas.dim() == 3:
        # assumes output of a sigmoid layer
        B, H, W = probas.size()
        probas = probas.view(B, 1, H, W)
    B, C, H, W = probas.size()
    probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
    labels = labels.view(-1)
    if ignore is None:
        return probas, labels
    valid = (labels != ignore)
    vprobas = probas[valid.nonzero().squeeze()]
    vlabels = labels[valid]
    return vprobas, vlabels


def xloss(logits, labels, ignore=None):
'''Cross entropy loss'''
    return F.cross_entropy(logits, Variable(labels), ignore_index=255)
def isnan(x):
    return x != x
def mean(l, ignore_nan=False, empty=0):
 """
nanmean compatible with generators. - """
    l = iter(l)
    if ignore_nan:
        l = ifilterfalse(isnan, l)
    try: 
        n = 1
        acc = next(l)
    except StopIteration:
        if empty == ’raise’:
            raise ValueError(’Empty mean’)
        return empty
    for n, v in enumerate(l, 2):
        acc += v
    if n == 1:
        return acc
    return acc / n
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值