detectron2安装(ubuntu)及训练自定义数据集(maskrcnn)

本文档详细介绍了在Ubuntu系统中如何搭建Detectron2环境,包括创建虚拟环境、解决gcc版本问题,以及安装依赖。接着,演示了测试预训练模型的过程,并指导如何将数据集转换为COCO格式,注册自定义数据集。在训练自定义数据集时,还解决了可能出现的模块找不到错误。最后,讨论了在推理阶段可能遇到的类别顺序不一致问题及其解决方案。

目录

环境搭建

测试demo

自定义数据集注册

训练自己的数据集

推理图片

常见问题


环境搭建

1、创建虚拟环境

conda create -n det-maskrcnn python=3.7

2、进入环境安装依赖包

conda activate det-maskrcnn    # 进入虚拟环境
pip install opencv-python matplotlib cython tensorboardX   
conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=10.1 -c pytorch  # 根据自己电脑情况到官网https://pytorch.org/安装
# 需要注意detectron2不同版本对pytorch版本要求不同,安装前可先查看detectron2的setup.py文件

下载cocoapi放到项目目录下
cd cocoapi/PythonAPI
python setup.py build_ext install

安装detectron2,回到detectron2根目录
python -m pip install -e .

可能出现问题:

报错:ERROR: Command errored out with exit status 1: /root/miniconda3/envs/det-maskrcnn/bin/python -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '"'"'/root/workspace/maskrcnn/detectron2-maskrcnn/setup.py'"'"'; __file__='"'"'/root/workspace/maskrcnn/detectron2-maskrcnn/setup.py'"'"';f = getattr(tokenize, '"'"'open'"'"', open)(__file__) if os.path.exists(__file__) else io.StringIO('"'"'from setuptools import setup; setup()'"'"');code = f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' develop --no-deps Check the logs for full command output.

原因:gcc版本有问题,卸载重新安装


测试demo

python demo/demo.py --config-file configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml --input demo/demo.jpg --opts MODEL.WEIGHTS weights/model_final_r50_fpn_1x.pkl

预训练模型可以通过官方的 MODEL_ZOO 下载。


自定义数据集注册

1、将自己的数据集转换为coco格式,并按照以下格式放置数据集

datasets
   -| coco
      -| annotations      # 存放注释文件
          -| instances_train2017.json
          -| instances_val2017.json
      -| train2017        # 存放训练图片
      -| val2017          # 存放验证图片

2、在train_net.py的基础上,对其进行更改并另存为train.py文件

import logging
import os
from collections import OrderedDict
import torch

import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data.datasets import load_coco_json
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
    CityscapesInstanceEvaluator,
    CityscapesSemSegEvaluator,
    COCOEvaluator,
    COCOPanopticEvaluator,
    DatasetEvaluators,
    LVISEvaluator,
    PascalVOCDetectionEvaluator,
    SemSegEvaluator,
    verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
from detectron2.data.datasets import register_coco_instances


# 单GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#声明类别,尽量保持
CLASS_NAMES = ["bv", "rb"]
DATASET_CATEGORIES = [
    {"name": "bv", "id": 1, "isthing": 1, "color": [220, 20, 60]},
    {"name": "rb", "id": 2, "isthing": 1, "color": [219, 142, 185]},
]
# 声明数据集的子集
PREDEFINED_SPLITS_DATASET = {
    "coco_my_train": ("./datasets/coco/train2017", "./datasets/coco/annotations/instances_train2017.json"),
    "coco_my_val": ("./datasets/coco/val2017", "./datasets/coco/annotations/instances_val2017.json"),
}
# 注册数据集
def register_dataset():
    """
    purpose: register all splits of dataset with PREDEFINED_SPLITS_DATASET
    """
    for key, (image_root, json_file) in PREDEFINED_SPLITS_DATASET.items():
        register_dataset_instances(name=key,
                                   metadate=get_dataset_instances_meta(),
                                   json_file=json_file,
                                   image_root=image_root)


def get_dataset_instances_meta():
    """
    purpose: get metadata of dataset from DATASET_CATEGORIES
    return: dict[metadata]
    """
    thing_ids = [k["id"] for k in DATASET_CATEGORIES if k["isthing"] == 1]
    thing_colors = [k["color"] for k in DATASET_CATEGORIES if k["isthing"] == 1]
    # assert len(thing_ids) == 2, len(thing_ids)
    thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
    thing_classes = [k["name"] for k in DATASET_CATEGORIES if k["isthing"] == 1]
    ret = {
        "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
        "thing_classes": thing_classes,
        "thing_colors": thing_colors,
    }
    return ret


def register_dataset_instances(name, metadate, json_file, image_root):
    """
    purpose: register dataset to DatasetCatalog,
             register metadata to MetadataCatalog and set attribute
    """
    DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
    MetadataCatalog.get(name).set(json_file=json_file,
                                  image_root=image_root,
                                  evaluator_type="coco",
                                  **metadate)


def build_evaluator(cfg, dataset_name, output_folder=None):
    """
    Create evaluator(s) for a given dataset.
    This uses the special metadata "evaluator_type" associated with each builtin dataset.
    For your own dataset, you can simply create an evaluator manually in your
    script and do not have to worry about the hacky if-else logic here.
    """
    if output_folder is None:
        output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
    evaluator_list = []
    evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
    if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
        evaluator_list.append(
            SemSegEvaluator(
                dataset_name,
                distributed=True,
                output_dir=output_folder,
            )
        )
    if evaluator_type in ["coco", "coco_panoptic_seg"]:
        evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
    if evaluator_type == "coco_panoptic_seg":
        evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
    if evaluator_type == "cityscapes_instance":
        assert (
            torch.cuda.device_count() >= comm.get_rank()
        ), "CityscapesEvaluator currently do not work with multiple machines."
        return CityscapesInstanceEvaluator(dataset_name)
    if evaluator_type == "cityscapes_sem_seg":
        assert (
            torch.cuda.device_count() >= comm.get_rank()
        ), "CityscapesEvaluator currently do not work with multiple machines."
        return CityscapesSemSegEvaluator(dataset_name)
    elif evaluator_type == "pascal_voc":
        return PascalVOCDetectionEvaluator(dataset_name)
    elif evaluator_type == "lvis":
        return LVISEvaluator(dataset_name, output_dir=output_folder)
    if len(evaluator_list) == 0:
        raise NotImplementedError(
            "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
        )
    elif len(evaluator_list) == 1:
        return evaluator_list[0]
    return DatasetEvaluators(evaluator_list)


class Trainer(DefaultTrainer):
    """
    We use the "DefaultTrainer" which contains pre-defined default logic for
    standard training workflow. They may not work for you, especially if you
    are working on a new research project. In that case you can write your
    own training loop. You can use "tools/plain_train_net.py" as an example.
    """

    @classmethod
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        return build_evaluator(cfg, dataset_name, output_folder)

    @classmethod
    def test_with_TTA(cls, cfg, model):
        logger = logging.getLogger("detectron2.trainer")
        # In the end of training, run an evaluation with TTA
        # Only support some R-CNN models.
        logger.info("Running inference with test-time augmentation ...")
        model = GeneralizedRCNNWithTTA(cfg, model)
        evaluators = [
            cls.build_evaluator(
                cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
            )
            for name in cfg.DATASETS.TEST
        ]
        res = cls.test(cfg, model, evaluators)
        res = OrderedDict({k + "_TTA": v for k, v in res.items()})
        return res


def setup(args):
    """
    Create configs and perform basic setups.
    """
    cfg = get_cfg()
    args.config_file = "./configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
    cfg.merge_from_file(args.config_file)   # 从config file 覆盖配置
    cfg.merge_from_list(args.opts)          # 从CLI参数 覆盖配置

    # 更改配置参数
    cfg.DATASETS.TRAIN = ("coco_my_train",)       # 训练数据集名称
    cfg.DATASETS.TEST = ("coco_my_val",)
    cfg.DATALOADER.NUM_WORKERS = 2  # 单线程

    cfg.INPUT.CROP.ENABLED = True
    cfg.INPUT.MAX_SIZE_TRAIN = 2000 # 训练图片输入的最大尺寸
    cfg.INPUT.MAX_SIZE_TEST = 2000 # 测试数据输入的最大尺寸
    cfg.INPUT.MIN_SIZE_TRAIN = (512, 768) # 训练图片输入的最小尺寸,可以设定为多尺度训练
    cfg.INPUT.MIN_SIZE_TEST = 1200
    #cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,其存在两种配置,分别为 choice 与 range :
    # range 让图像的短边从 512-768随机选择
    #choice : 把输入图像转化为指定的,有限的几种图片大小进行训练,即短边只能为 512或者768
    cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING = 'range'
#  本句一定要看下注释!!!!!!!!
    cfg.MODEL.RETINANET.NUM_CLASSES = 2+1  # 类别数+1(因为有background,也就是你的 cate id 从 1 开始,如果您的数据集Json下标从 0 开始,这个改为您对应的类别就行,不用再加背景类!!!!!)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2      # 不含背景
    cfg.MODEL.WEIGHTS = "./weights/model_final_r50_fpn_1x.pkl"    # 预训练模型权重
    cfg.OUTPUT_DIR = "./output"       # 权重输出路径
    cfg.SOLVER.IMS_PER_BATCH = 2  # batch_size=2; iters_in_one_epoch = dataset_imgs/batch_size

    # 根据训练数据总数目以及batch_size,计算出每个epoch需要的迭代次数
    #9000为你的训练数据的总数目,可自定义
    ITERS_IN_ONE_EPOCH = int(44*5 / cfg.SOLVER.IMS_PER_BATCH)

    # 指定最大迭代次数
    cfg.SOLVER.MAX_ITER = (ITERS_IN_ONE_EPOCH * 12) - 1 # 12 epochs,
    # 初始学习率
    cfg.SOLVER.BASE_LR = 0.002
    # 优化器动能
    cfg.SOLVER.MOMENTUM = 0.9
    #权重衰减
    cfg.SOLVER.WEIGHT_DECAY = 0.0001
    cfg.SOLVER.WEIGHT_DECAY_NORM = 0.0
    # 学习率衰减倍数
    cfg.SOLVER.GAMMA = 0.1
    # 迭代到指定次数,学习率进行衰减
    cfg.SOLVER.STEPS = (7000,)
    # 在训练之前,会做一个热身运动,学习率慢慢增加初始学习率
    cfg.SOLVER.WARMUP_FACTOR = 1.0 / 1000
    # 热身迭代次数
    cfg.SOLVER.WARMUP_ITERS = 1000

    cfg.SOLVER.WARMUP_METHOD = "linear"
    # 保存模型文件的命名数据减1
    cfg.SOLVER.CHECKPOINT_PERIOD = ITERS_IN_ONE_EPOCH*2 - 1

    # 迭代到指定次数,进行一次评估
    cfg.TEST.EVAL_PERIOD = ITERS_IN_ONE_EPOCH*2
    #cfg.TEST.EVAL_PERIOD = 100

    cfg.freeze()
    default_setup(cfg, args)
    return cfg


def main(args):
    cfg = setup(args)
    register_dataset()
    
    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )
        res = Trainer.test(cfg, model)
        if cfg.TEST.AUG.ENABLED:
            res.update(Trainer.test_with_TTA(cfg, model))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    """
    If you'd like to do anything fancier than the standard training logic,
    consider writing your own training loop (see plain_train_net.py) or
    subclassing the trainer.
    """
    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks(
            [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
        )
    return trainer.train()


if __name__ == "__main__":
    args = default_argument_parser().parse_args()
    print("Command Line Args:", args)
    launch(
        main,
        args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        args=(args,),
    )

可能出现的问题:

ModuleNotFoundError: No module named 'shapely'

解决办法:在虚拟环境中安装此包


训练自己的数据集

python tools/train.py


推理图片

import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm

from detectron2.engine import default_argument_parser
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.data.datasets import load_coco_json
from detectron2.utils.visualizer import ColorMode
from detectron2.data.datasets import register_coco_instances
from demo.predictor import VisualizationDemo

# constants
WINDOW_NAME = "Branch Reconstruction"
# 图片路径
INPUT_IMG_PATH = "./demo/test"
OUTPUT_IMG_PATH = "./demo/results"
# 单GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#声明类别,尽量保持
CLASS_NAMES = ["bv", "rb"]
DATASET_CATEGORIES = [
    {"name": "bv", "id": 1, "isthing": 1, "color": [220, 20, 60]},
    {"name": "rb", "id": 2, "isthing": 1, "color": [219, 142, 185]},
]
# 声明数据集的子集
PREDEFINED_SPLITS_DATASET = {
    "coco_my_train": ("./datasets/coco/train2017", "./datasets/coco/annotations/instances_train2017.json"),
    "coco_my_val": ("./datasets/coco/val2017", "./datasets/coco/annotations/instances_val2017.json"),
}
# 注册数据集
def register_dataset():
    """
    purpose: register all splits of dataset with PREDEFINED_SPLITS_DATASET
    """
    for key, (image_root, json_file) in PREDEFINED_SPLITS_DATASET.items():
        register_dataset_instances(name=key,
                                   metadate=get_dataset_instances_meta(),
                                   json_file=json_file,
                                   image_root=image_root)


def get_dataset_instances_meta():
    """
    purpose: get metadata of dataset from DATASET_CATEGORIES
    return: dict[metadata]
    """
    thing_ids = [k["id"] for k in DATASET_CATEGORIES if k["isthing"] == 1]
    thing_colors = [k["color"] for k in DATASET_CATEGORIES if k["isthing"] == 1]
    # assert len(thing_ids) == 2, len(thing_ids)
    thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
    thing_classes = [k["name"] for k in DATASET_CATEGORIES if k["isthing"] == 1]
    ret = {
        "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
        "thing_classes": thing_classes,
        "thing_colors": thing_colors,
    }
    return ret


def register_dataset_instances(name, metadate, json_file, image_root):
    """
    purpose: register dataset to DatasetCatalog,
             register metadata to MetadataCatalog and set attribute
    """
    DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
    MetadataCatalog.get(name).set(json_file=json_file,
                                  image_root=image_root,
                                  evaluator_type="coco",
                                  **metadate)


def setup_cfg(args):
    # load config from file and command-line arguments
    cfg = get_cfg()

    args.config_file = "configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # Set score_threshold for builtin models
    cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.5              # args.confidence_threshold
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5              # args.confidence_threshold
    cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5          # args.confidence_threshold

    cfg.DATASETS.TRAIN = ("coco_my_train",)
    cfg.DATASETS.TEST = ("coco_my_val",)
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.INPUT.MAX_SIZE_TRAIN = 2000
    cfg.INPUT.MAX_SIZE_TEST = 2000
    cfg.INPUT.MIN_SIZE_TRAIN = (512, 768)
    cfg.INPUT.MIN_SIZE_TEST = 1200
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2  # 类别数
    cfg.MODEL.WEIGHTS = './output/model_final.pth'  # 最终权重
    cfg.SOLVER.IMS_PER_BATCH = 2  # batch_size=2; iteration = 1434/batch_size = 717 iters in one epoch
    ITERS_IN_ONE_EPOCH = int(44*5 / cfg.SOLVER.IMS_PER_BATCH)
    cfg.SOLVER.MAX_ITER = (ITERS_IN_ONE_EPOCH * 30) - 1  # 12 epochs
    cfg.SOLVER.BASE_LR = 0.002
    cfg.SOLVER.MOMENTUM = 0.9
    cfg.SOLVER.WEIGHT_DECAY = 0.0001
    cfg.SOLVER.WEIGHT_DECAY_NORM = 0.0
    cfg.SOLVER.GAMMA = 0.1
    cfg.SOLVER.STEPS = (7000,)
    cfg.SOLVER.WARMUP_FACTOR = 1.0 / 1000
    cfg.SOLVER.WARMUP_ITERS = 1000
    cfg.SOLVER.WARMUP_METHOD = "linear"
    cfg.SOLVER.CHECKPOINT_PERIOD = ITERS_IN_ONE_EPOCH*2 - 1

    cfg.freeze()
    return cfg


if __name__ == "__main__":
    cfg = setup_cfg()
    register_dataset()
    # demo = VisualizationDemo(cfg)
    demo = VisualizationDemo(cfg, instance_mode=ColorMode.SEGMENTATION)

# 遍历INPUT_IMG_PATH文件夹下的所有图片检测并保存至OUTPUT_IMG_PATH
    for imgfile in os.listdir(INPUT_IMG_PATH):
        image_path = os.path.join(INPUT_IMG_PATH, imgfile)
        img = read_image(image_path, format="BGR")
        # start_time = time.time()
        predictions, visualized_output = demo.run_on_image(img)
        # logger.info(
        #     "{}: {} in {:.2f}s".format(
        #         image_path,
        #         "detected {} instances".format(len(predictions["instances"]))
        #         if "instances" in predictions
        #         else "finished",
        #         time.time() - start_time,
        #     )
        # )
        cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
        cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
        if cv2.waitKey(0) == 27:              # 等待Esc退出
            output_path = os.path.join(OUTPUT_IMG_PATH, imgfile+"_r101_44*5_100")
            cv2.imwrite(output_path+".jpg", visualized_output.get_image()[:, :, ::-1])
            cv2.destroyAllWindows()

常见问题

1、AssertionError: Attribute 'thing_classes' in the metadata of 'coco_my_train' cannot be set to a different value!

解决办法:该问题主要是json文件中类别顺序与注册数据的类别顺序不一致造成的,查看json文件中各个类别对应的id,修改修改注册数据集类别顺序(按照上边方式配置detectron2的情况下,在tools/train.py文件中修改DATASET_CATEGORIES = [
    {"name": "bv", "id": 1, "isthing": 1, "color": [220, 20, 60]},
    {"name": "rb", "id": 2, "isthing": 1, "color": [219, 142, 185]},
])

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值