unet测试评估metric脚本

该代码实现了将PaddleSeg的UNet模型转换为PyTorch并进行评估。主要功能包括计算accuracy、classprecision、classrecall和kappa指标。通过定义calculate_area函数来计算交集、预测和标签区域,然后使用mean_iou、class_measurement和kappa等函数计算评估指标。在训练网络后,对验证集进行评估并打印结果。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

全部复制的paddleseg的代码转torch

import argparse
import logging
import os

import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms

from utils.data_loading import BasicDataset
from unet import UNet
from utils.utils import plot_img_and_mask
from torch.utils.data import DataLoader, random_split
from utils.data_loading import BasicDataset, CarvanaDataset
from tqdm import tqdm
import torch.nn.functional as F


# 使用python写一个评估使用pytorch训练的unet模型的好坏,模型输出nchw格式的数据,真实标签数据为nhw格式,请计算模型的accuracy, calss precision ,class recall,kappa指标

EPSILON = 1e-32

def calculate_area(pred, label, num_classes, ignore_index=255):
    """
    Calculate intersect, prediction and label area

    Args:
        pred (Tensor): The prediction by model.
        label (Tensor): The ground truth of image.
        num_classes (int): The unique number of target classes.
        ignore_index (int): Specifies a target value that is ignored. Default: 255.

    Returns:
        Tensor: The intersection area of prediction and the ground on all class.
        Tensor: The prediction area on all class.
        Tensor: The ground truth area on all class
    """
    if len(pred.shape) == 4:
        pred = torch.squeeze(pred, axis=1)
    if len(label.shape) == 4:
        label = torch.squeeze(label, axis=1)
    if not pred.shape == label.shape:
        
        raise ValueError('Shape of `pred` and `label should be equal, '
                         'but there are {} and {}.'.format(pred.shape,
                                                           label.shape))
    pred_area = []
    label_area = []
    intersect_area = []
    mask = label != ignore_index

    for i in range(num_classes):
        pred_i = torch.logical_and(pred == i, mask)
        label_i = label == i
        intersect_i = torch.logical_and(pred_i, label_i)
        pred_area.append(torch.sum(pred_i))  
        label_area.append(torch.sum(label_i))  
        intersect_area.append(torch.sum(intersect_i))  

    pred_area = torch.stack(pred_area)  
    label_area = torch.stack(label_area)  
    intersect_area = torch.stack(intersect_area)  

    return intersect_area, pred_area, label_area


def get_args():
    parser = argparse.ArgumentParser(description='Train the UNet on images and target masks')
    parser.add_argument('--batch-size', '-b', dest='batch_size', metavar='B', type=int, default=1, help='Batch size')
    parser.add_argument('--load', '-f', type=str, default=False, help='Load model from a .pth file')
    parser.add_argument('--scale', '-s', type=float, default=0.5, help='Downscaling factor of the images')
    parser.add_argument('--validation', '-v', dest='val', type=float, default=10.0,
                        help='Percent of the data that is used as validation (0-100)')
    parser.add_argument('--amp', action='store_true', default=False, help='Use mixed precision')
    parser.add_argument('--root', '-r', type=str, default=False, help='root dir')
    parser.add_argument('--num', '-n', type=int, default=False, help='num of classes')

    return parser.parse_args()


dir_img_path = 'imgs'
dir_mask_path = 'masks'

import metrics

def train_net(net,
              device,
              epochs: int = 5,
              batch_size: int = 1,
              learning_rate: float = 0.001,
              val_percent: float = 0.1,
              save_checkpoint: bool = True,
              img_scale: float = 0.5,
              amp: bool = False,root_dir: str = '/data/yangbo/unet/datas/data1'):

    train_dir_img=os.path.join(root_dir,'train',dir_img_path)
    train_dir_mask=os.path.join(root_dir,'train',dir_mask_path)

    val_dir_img=os.path.join(root_dir,'val',dir_img_path)
    val_dir_mask=os.path.join(root_dir,'val',dir_mask_path)
    # 1. Create dataset
    try:
        train_dataset = CarvanaDataset(train_dir_img, train_dir_mask, img_scale)
        val_dataset = CarvanaDataset(val_dir_img, val_dir_mask, img_scale)
    except (AssertionError, RuntimeError):
        train_dataset = BasicDataset(train_dir_img, train_dir_mask, img_scale)
        val_dataset = BasicDataset(val_dir_img, val_dir_mask, img_scale)

    n_val = len(val_dataset)
    n_train = len(train_dataset)

    # 3. Create data loaders
    loader_args = dict(batch_size=batch_size, num_workers=4, pin_memory=True)
    train_loader = DataLoader(train_dataset, shuffle=True, **loader_args)
    val_loader = DataLoader(val_dataset, shuffle=False, drop_last=True, **loader_args)


    # (Initialize logging)

    logging.info(f'''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {learning_rate}
        Training size:   {n_train}
        Validation size: {n_val}
        Checkpoints:     {save_checkpoint}
        Device:          {device.type}
        Images scaling:  {img_scale}
        Mixed Precision: {amp}
    ''')

    # 4. Set up the optimizer, the loss, the learning rate scheduler and the loss scaling for AMP
    #optimizer = optim.RMSprop(net.parameters(), lr=learning_rate, weight_decay=1e-8, momentum=0.9)
    #scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=2)  # goal: maximize Dice score

    # 5. Begin training
    intersect_area_all=torch.zeros([1])
    pred_area_all=torch.zeros([1])
    label_area_all=torch.zeros([1])
    for idx,batch in tqdm(enumerate(val_loader)):
        images = batch['image']
        true_masks = batch['mask']

        assert images.shape[1] == net.n_channels, \
            f'Network has been defined with {net.n_channels} input channels, ' \
            f'but loaded images have {images.shape[1]} channels. Please check that ' \
            'the images are loaded correctly.'

        images = images.to(device=device, dtype=torch.float32)
        true_masks = true_masks.to(device=device, dtype=torch.long)
        with torch.no_grad():
            masks_pred = net(images)
            masks_pred=torch.argmax(masks_pred,axis=1,keepdim=True)
            intersect_area, pred_area, label_area=calculate_area(masks_pred,true_masks,3)
            intersect_area_all = intersect_area_all + intersect_area
            pred_area_all = pred_area_all + pred_area
            label_area_all = label_area_all + label_area
    metrics_input = (intersect_area_all, pred_area_all, label_area_all)
    class_iou, miou = metrics.mean_iou(*metrics_input)
    acc, class_precision, class_recall = metrics.class_measurement(
        *metrics_input)
    kappa = metrics.kappa(*metrics_input)
    class_dice, mdice = metrics.dice(*metrics_input)
    infor="[EVAL] #Images: {} mIoU: {:.4f} Acc: {:.4f} Kappa: {:.4f} Dice: {:.4f}".format(
            len(val_loader), miou, acc, kappa, mdice)
    print(infor)
    print("[EVAL] Class IoU: " + str(np.round(class_iou, 4)))
    print("[EVAL] Class Precision: " + str(
            np.round(class_precision, 4)))
    print("[EVAL] Class Recall: " + str(np.round(class_recall, 4)))

if __name__ == '__main__':
    args = get_args()

    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')

    # Change here to adapt to your data
    # n_channels=3 for RGB images
    # n_classes is the number of probabilities you want to get per pixel
    # 修改numclass
    net = UNet(n_channels=3, n_classes=args.num, bilinear=True)
    net.eval()
    logging.info(f'Network:\n'
                 f'\t{net.n_channels} input channels\n'
                 f'\t{net.n_classes} output channels (classes)\n'
                 f'\t{"Bilinear" if net.bilinear else "Transposed conv"} upscaling')

    if args.load:
        net.load_state_dict(torch.load(args.load, map_location=device))
        logging.info(f'Model loaded from {args.load}')

    net.to(device=device)
    try:
        train_net(net=net,
                  epochs=0,
                  batch_size=args.batch_size,
                  learning_rate=0,
                  device=device,
                  img_scale=args.scale,
                  val_percent=args.val / 100,
                  amp=args.amp,
                  root_dir=args.root)
    except KeyboardInterrupt:
        torch.save(net.state_dict(), 'INTERRUPTED.pth')
        logging.info('Saved interrupt')


metris.py

import numpy as np
import torch
import sklearn.metrics as skmetrics

def mean_iou(intersect_area, pred_area, label_area):
    """
    Calculate iou.

    Args:
        intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.
        pred_area (Tensor): The prediction area on all classes.
        label_area (Tensor): The ground truth area on all classes.

    Returns:
        np.ndarray: iou on all classes.
        float: mean iou of all classes.
    """
    intersect_area = intersect_area.numpy()
    pred_area = pred_area.numpy()
    label_area = label_area.numpy()
    union = pred_area + label_area - intersect_area
    class_iou = []
    for i in range(len(intersect_area)):
        if union[i] == 0:
            iou = 0
        else:
            iou = intersect_area[i] / union[i]
        class_iou.append(iou)
    miou = np.mean(class_iou)
    return np.array(class_iou), miou

def class_measurement(intersect_area, pred_area, label_area):
    """
    Calculate accuracy, calss precision and class recall.

    Args:
        intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.
        pred_area (Tensor): The prediction area on all classes.
        label_area (Tensor): The ground truth area on all classes.

    Returns:
        float: The mean accuracy.
        np.ndarray: The precision of all classes.
        np.ndarray: The recall of all classes.
    """
    intersect_area = intersect_area.numpy()
    pred_area = pred_area.numpy()
    label_area = label_area.numpy()

    mean_acc = np.sum(intersect_area) / np.sum(pred_area)
    class_precision = []
    class_recall = []
    for i in range(len(intersect_area)):
        precision = 0 if pred_area[i] == 0 \
            else intersect_area[i] / pred_area[i]
        recall = 0 if label_area[i] == 0 \
            else intersect_area[i] / label_area[i]
        class_precision.append(precision)
        class_recall.append(recall)

    return mean_acc, np.array(class_precision), np.array(class_recall)

def kappa(intersect_area, pred_area, label_area):
    """
    Calculate kappa coefficient

    Args:
        intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..
        pred_area (Tensor): The prediction area on all classes.
        label_area (Tensor): The ground truth area on all classes.

    Returns:
        float: kappa coefficient.
    """
    intersect_area = intersect_area.numpy().astype(np.float64)
    pred_area = pred_area.numpy().astype(np.float64)
    label_area = label_area.numpy().astype(np.float64)
    total_area = np.sum(label_area)
    po = np.sum(intersect_area) / total_area
    pe = np.sum(pred_area * label_area) / (total_area * total_area)
    kappa = (po - pe) / (1 - pe)
    return kappa

def dice(intersect_area, pred_area, label_area):
    """
    Calculate DICE.

    Args:
        intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.
        pred_area (Tensor): The prediction area on all classes.
        label_area (Tensor): The ground truth area on all classes.

    Returns:
        np.ndarray: DICE on all classes.
        float: mean DICE of all classes.
    """
    intersect_area = intersect_area.numpy()
    pred_area = pred_area.numpy()
    label_area = label_area.numpy()
    union = pred_area + label_area
    class_dice = []
    for i in range(len(intersect_area)):
        if union[i] == 0:
            dice = 0
        else:
            dice = (2 * intersect_area[i]) / union[i]
        class_dice.append(dice)
    mdice = np.mean(class_dice)
    return np.array(class_dice), mdice

使用示例

python .\test2.py --root D:\pic\23\0403\851-1003339-H01\bend --scale 0.25 --load C:\Users\Admin\Desktop\fsdownload\checkpoint_epoch485.pth --num 3

结果展示

[EVAL] #Images: 74 mIoU: 0.5119 Acc: 0.9996 Kappa: 0.4405 Dice: 0.6002
[EVAL] Class IoU: [0.9997 0.4177 0.1183]
[EVAL] Class Precision: [0.9998 0.6767 0.1858]
[EVAL] Class Recall: [0.9998 0.5219 0.2456]

# ====================== # UNETR 训练脚本 # ====================== import os import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader from monai.transforms import ( Compose, LoadImaged, EnsureChannelFirstd, Spacingd, Orientationd, ScaleIntensityRanged, RandCropByPosNegLabeld, RandFlipd, RandRotate90d, EnsureTyped, Activations, AsDiscrete, Resized, RandZoomd, RandGaussianNoised, CenterSpatialCropd ) from monai.data import list_data_collate, Dataset # 使用普通Dataset from monai.networks.nets import UNETR from monai.losses import DiceCELoss from monai.metrics import DiceMetric from glob import glob from sklearn.model_selection import train_test_split from torch.optim.lr_scheduler import LambdaLR from tqdm import tqdm from torch.cuda.amp import GradScaler, autocast import matplotlib.pyplot as plt import gc import nibabel as nib import sys import monai # 自定义Transform:用于把RandCropByPosNegLabeld返回的list转成Tensor class ExtractFirstSampledDict(monai.transforms.Transform): def __call__(self, data): out = {} for k, v in data.items(): if isinstance(v, list) and len(v) == 1: out[k] = v[0] else: out[k] = v return out # ====================== # 配置参数 # ====================== root_dir = "datasets/LiTS/processed" images_dir = os.path.join(root_dir, "images") labels_dir = os.path.join(root_dir, "labels") max_epochs = 200 batch_size = 1 learning_rate = 1e-4 num_classes = 3 warmup_epochs = 10 use_amp = False # AMP 对 UNETR 不稳定,建议关闭 # 禁用MetaTensor以避免decollate错误 os.environ["MONAI_USE_META_DICT"] = "0" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 打印环境信息 print("===== 环境信息 =====") print(f"Python版本: {sys.version}") print(f"PyTorch版本: {torch.__version__}") print(f"MONAI版本: {monai.__version__}") print(f"nibabel版本: {nib.__version__}") if torch.cuda.is_available(): print(f"CUDA版本: {torch.version.cuda}") print(f"cuDNN版本: {torch.backends.cudnn.version()}") # 尺寸设置 - 确保能被16整除 def get_valid_size(size, divisor=16): return tuple([max(divisor, (s // divisor) * divisor) for s in size]) base_size = (128, 128, 64) resized_size = get_valid_size(base_size) crop_size = get_valid_size((64, 64, 64)) # 减小尺寸以节省显存 print(f"输入尺寸: resized_size={resized_size}, crop_size={crop_size}") # ====================== # 数据预处理 # ====================== train_transforms = Compose([ LoadImaged(keys=["image", "label"]), EnsureChannelFirstd(keys=["image", "label"]), Orientationd(keys=["image", "label"], axcodes="RAS"), Spacingd(keys=["image", "label"], pixdim=(1.5, 1.5, 2.0), mode=("bilinear", "nearest")), ScaleIntensityRanged(keys=["image"], a_min=-200, a_max=200, b_min=0.0, b_max=1.0, clip=True), Resized(keys=["image", "label"], spatial_size=resized_size, mode=("trilinear", "nearest")), RandCropByPosNegLabeld( keys=["image", "label"], label_key="label", spatial_size=crop_size, pos=1.0, neg=1.0, num_samples=1, image_threshold=0 ), ExtractFirstSampledDict(), RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0), RandRotate90d(keys=["image", "label"], prob=0.5, max_k=3), RandZoomd(keys=["image", "label"], prob=0.5, min_zoom=0.9, max_zoom=1.1, mode=("trilinear", "nearest")), RandGaussianNoised(keys=["image"], prob=0.2, mean=0.0, std=0.05), EnsureTyped(keys=["image", "label"], data_type="tensor"), ]) val_transforms = Compose([ LoadImaged(keys=["image", "label"]), EnsureChannelFirstd(keys=["image", "label"]), Orientationd(keys=["image", "label"], axcodes="RAS"), Spacingd(keys=["image", "label"], pixdim=(1.5, 1.5, 2.0), mode=("bilinear", "nearest")), ScaleIntensityRanged(keys=["image"], a_min=-200, a_max=200, b_min=0.0, b_max=1.0, clip=True), Resized(keys=["image", "label"], spatial_size=resized_size, mode=("trilinear", "nearest")), CenterSpatialCropd(keys=["image", "label"], roi_size=crop_size), EnsureTyped(keys=["image", "label"], data_type="tensor"), ]) images = sorted(glob(os.path.join(images_dir, "*.nii.gz"))) labels = sorted(glob(os.path.join(labels_dir, "*.nii.gz"))) data = [{"image": img, "label": lbl} for img, lbl in zip(images, labels)] train_files, val_files = train_test_split(data, test_size=0.2, random_state=42) train_ds = Dataset(data=train_files, transform=train_transforms) val_ds = Dataset(data=val_files, transform=val_transforms) train_loader = DataLoader( train_ds, batch_size=batch_size, shuffle=True, num_workers=0, # 避免多进程导致的问题 collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() ) val_loader = DataLoader( val_ds, batch_size=1, shuffle=False, num_workers=0, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() ) # ====================== # 模型构建 # ====================== model = UNETR( in_channels=1, out_channels=num_classes, img_size=crop_size, feature_size=16, hidden_size=512, mlp_dim=2048, num_heads=8, pos_embed="perceptron", norm_name="batch", res_block=True, dropout_rate=0.1 ).to(device) total_params = sum(p.numel() for p in model.parameters()) print(f"模型参数总数: {total_params / 1e6:.2f}M") # ====================== # 损失 + 优化器 # ====================== class_weights = torch.tensor([0.2, 0.3, 0.5]).to(device) loss_function = DiceCELoss(to_onehot_y=True, softmax=True, ce_weight=class_weights, lambda_dice=0.5, lambda_ce=0.5) optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=1e-5) def lr_lambda(epoch): if epoch < warmup_epochs: return (epoch + 1) / warmup_epochs progress = (epoch - warmup_epochs) / (max_epochs - warmup_epochs) return 0.5 * (1 + np.cos(np.pi * progress)) scheduler = LambdaLR(optimizer, lr_lambda=lr_lambda) # ====================== # 评估器 # ====================== post_pred = Compose([Activations(softmax=True), AsDiscrete(argmax=True)]) post_label = Compose([AsDiscrete(to_onehot=num_classes)]) dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False, num_classes=num_classes) scaler = GradScaler(enabled=use_amp) # ====================== # 训练循环 # ====================== best_metric = -1 best_metric_epoch = -1 train_loss_history = [] val_dice_history = [] os.makedirs("unetr_checkpoints", exist_ok=True) os.makedirs("logs", exist_ok=True) print("\n===== 测试数据加载 =====") try: test_sample = train_ds[0] print("数据加载测试成功!") print(f"图像形状: {test_sample['image'].shape}") print(f"标签形状: {test_sample['label'].shape}") except Exception as e: print(f"数据加载失败: {str(e)}") print("\n尝试替代加载方式...") from monai.data import NibabelReader sample_file = train_files[0] reader = NibabelReader() img = reader.read(sample_file['image']) label = reader.read(sample_file['label']) print(f"手动加载成功 - 图像形状: {img.shape}, 标签形状: {label.shape}") for epoch in range(max_epochs): print(f"\nEpoch {epoch+1}/{max_epochs}") model.train() epoch_loss, step = 0, 0 pbar_train = tqdm(total=len(train_loader), desc=f"训练 Epoch {epoch+1}") for batch_data in train_loader: step += 1 try: inputs = batch_data["image"].to(device) labels = batch_data["label"].to(device) optimizer.zero_grad() with autocast(enabled=use_amp): outputs = model(inputs) loss = loss_function(outputs, labels) if use_amp: scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() else: loss.backward() optimizer.step() epoch_loss += loss.item() pbar_train.update(1) pbar_train.set_postfix({"loss": f"{loss.item():.4f}"}) if step % 10 == 0: torch.cuda.empty_cache() except RuntimeError as e: if 'CUDA out of memory' in str(e): print("\nCUDA内存不足,跳过该批次") torch.cuda.empty_cache() gc.collect() else: print(f"\n训练时发生错误: {str(e)}") continue except Exception as e: print(f"\n训练时发生未知错误: {str(e)}") continue pbar_train.close() epoch_loss /= step train_loss_history.append(epoch_loss) print(f"训练平均损失: {epoch_loss:.4f}") scheduler.step() current_lr = optimizer.param_groups[0]['lr'] print(f"当前学习率: {current_lr:.7f}") model.eval() dice_vals = [] pbar_val = tqdm(total=len(val_loader), desc=f"验证 Epoch {epoch+1}") with torch.no_grad(): for val_data in val_loader: try: val_images = val_data["image"].to(device) val_labels = val_data["label"].to(device) val_outputs = model(val_images) val_preds = post_pred(val_outputs.cpu()) val_truth = post_label(val_labels.cpu()) dice_metric(y_pred=[val_preds], y=[val_truth]) metric = dice_metric.aggregate().item() dice_metric.reset() dice_vals.append(metric) pbar_val.update(1) pbar_val.set_postfix({"dice": f"{metric:.4f}"}) except RuntimeError as e: print(f"\n验证时发生错误: {str(e)}") continue except Exception as e: print(f"\n验证时发生未知错误: {str(e)}") continue pbar_val.close() avg_metric = np.mean(dice_vals) if dice_vals else 0.0 val_dice_history.append(avg_metric) print(f"验证平均Dice: {avg_metric:.4f}") if avg_metric > best_metric: best_metric = avg_metric best_metric_epoch = epoch + 1 torch.save(model.state_dict(), f"unetr_checkpoints/best_model_epoch{best_metric_epoch}_dice{best_metric:.4f}.pth") print(f"保存新的最佳模型! Epoch: {best_metric_epoch}, Dice: {best_metric:.4f}") if (epoch + 1) % 10 == 0: torch.save({ 'epoch': epoch + 1, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': epoch_loss, 'dice': avg_metric }, f"unetr_checkpoints/checkpoint_epoch_{epoch+1}.pth") plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) plt.plot(train_loss_history, label='训练损失') plt.title('训练损失') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.subplot(1, 2, 2) plt.plot(val_dice_history, label='验证Dice', color='orange') plt.title('验证Dice') plt.xlabel('Epoch') plt.ylabel('Dice') plt.legend() plt.tight_layout() plt.savefig("logs/unetr_training_metrics.png") plt.close() torch.cuda.empty_cache() gc.collect() print(f"\n训练完成! 最佳Dice: {best_metric:.4f} at epoch {best_metric_epoch}") 这个代码covid_seg) (base) liulicheng@ailab-MS-7B79:~/MultiModal_MedSeg_2025$ /home/liulicheng/anaconda3/envs/covid_seg/bin/python /home/liulicheng/MultiModal_MedSeg_2025/train/train_unetr.py ===== 环境信息 ===== Python版本: 3.8.12 | packaged by conda-forge | (default, Sep 29 2021, 19:52:28) [GCC 9.4.0] PyTorch版本: 2.1.0+cu118 MONAI版本: 1.3.2 nibabel版本: 5.2.1 CUDA版本: 11.8 cuDNN版本: 8700 输入尺寸: resized_size=(128, 128, 64), crop_size=(64, 64, 64) /home/liulicheng/anaconda3/envs/covid_seg/lib/python3.8/site-packages/monai/utils/deprecate_utils.py:221: FutureWarning: monai.networks.nets.unetr UNETR.__init__:pos_embed: Argument `pos_embed` has been deprecated since version 1.2. It will be removed in version 1.4. please use `proj_type` instead. warn_deprecated(argname, msg, warning_category) 模型参数总数: 43.67M /home/liulicheng/anaconda3/envs/covid_seg/lib/python3.8/site-packages/monai/utils/deprecate_utils.py:221: FutureWarning: monai.losses.dice DiceCELoss.__init__:ce_weight: Argument `ce_weight` has been deprecated since version 1.2. It will be removed in version 1.4. please use `weight` instead. warn_deprecated(argname, msg, warning_category) ===== 测试数据加载 ===== 数据加载测试成功! 数据加载失败: list indices must be integers or slices, not str 尝试替代加载方式... 手动加载成功 - 图像形状: (512, 512, 94), 标签形状: (512, 512, 94) Epoch 1/200 训练 Epoch 1: 1%|█ | 1/104 [00:11<19:08, 11.15s/it, loss=1.0541]这样也太慢了吧
最新发布
06-27
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值