1、获取val()结果并保存json文件
import warnings
warnings.filterwarnings('ignore')
from ultralytics import YOLO
# https://docs.ultralytics.com/modes/train/
if __name__ == '__main__':
model = YOLO('runs/models/v8m/weights/best.pt')
# model.load('') # loading pretrain weights
model.val(data='ultralytics/cfg/datasets/VisDrone.yaml',
cache=False,
imgsz=640,
epochs=200,
batch=4,
split='test', # 测试,不使用测试集则删除该行
workers=8,
device='0',
resume=True, # last.pt path
project='runs/val',
name='exp',
# amp=True
)
2、获取coco指标
import argparse
import json
import os
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def parse_opt():
parser = argparse.ArgumentParser()
# 原数据集生成的json文件
parser.add_argument('--anno_json', type=str, default='E:/1-Data/DataSet/VisDrone2019/Anno/test.json',
help='training model path')
# val时生成的json文件
parser.add_argument('--pred_json', type=str, default='runs/val/exp14/predictions.json', help='data yaml path')
return parser.parse_known_args()[0]
def cover_pred_json_id(anno_json_path, pred_json_path):
with open(anno_json_path, "r") as f:
ann_json = json.load(f)
with open(pred_json_path, "r") as f:
pred_json = json.load(f)
for pred_item in tqdm(pred_json,desc="读取中:",):
img_id = pred_item["image_id"]
ann_id = [ann_item["id"] for ann_item in ann_json["images"] if ann_item["file_name"][:-4] == img_id]
try:
pred_item["image_id"] = ann_id[0]
except IndexError:
print(img_id)
out_json_path = os.path.join(os.path.dirname(pred_json_path), "newpred.json")
with open(out_json_path, 'w') as file:
json.dump(pred_json, file, indent=4)
return out_json_path
if __name__ == '__main__':
opt = parse_opt()
anno_json = opt.anno_json
pred_json = opt.pred_json
pred_json = cover_pred_json_id(anno_json, pred_json) # cover yolo id to coco id
anno = COCO(anno_json) # init annotations api
print(pred_json)
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
eval.evaluate()
eval.accumulate()
eval.summarize()
3、yolo2coco 格式转化
import os
import json
from PIL import Image
"""
首先满足YOLO格式要求:
images:
train
val
test
labels:
train
val
test
"""
# 设置数据集路径
output_dir = r"E:\1-Data\DataSet\UAV-DT" # 修改为YOLO格式的数据集路径;
dataset_path = r"E:\1-Data\DataSet\UAV-DT\yolo" # 修改你想输出的coco格式数据集路径
images_path = os.path.join(dataset_path, "images")
labels_path = os.path.join(dataset_path, "labels")
# 类别映射
categories = [
{"id": 1, "name": "car"},
{"id": 2, "name": "truck"},
{"id": 3, "name": "bus"}
# 添加更多类别
]
# YOLO格式转COCO格式的函数
def convert_yolo_to_coco(x_center, y_center, width, height, img_width, img_height):
x_min = (x_center - width / 2) * img_width
y_min = (y_center - height / 2) * img_height
width = width * img_width
height = height * img_height
return [x_min, y_min, width, height]
# 初始化COCO数据结构
def init_coco_format():
return {
"images": [],
"annotations": [],
"categories": categories
}
# 处理每个数据集分区
# 可根据你的数据集,是否有test分区,修改代码。有的话修改如下:
# for split in ['train', 'val', 'test']
for split in ['train', 'val']:
coco_format = init_coco_format()
annotation_id = 0
for img_name in os.listdir(os.path.join(images_path, split)):
if img_name.lower().endswith(('.png', '.jpg', '.jpeg')):
img_path = os.path.join(images_path, split, img_name)
label_path = os.path.join(labels_path, split, img_name.replace("jpg", "txt"))
img = Image.open(img_path)
img_width, img_height = img.size
image_info = {
"file_name": img_name,
"id": len(coco_format["images"]) + 1,
"width": img_width,
"height": img_height
}
coco_format["images"].append(image_info)
if os.path.exists(label_path):
with open(label_path, "r") as file:
for line in tqdm(file,desc="转化中:",):
category_id, x_center, y_center, width, height = map(float, line.split())
bbox = convert_yolo_to_coco(x_center, y_center, width, height, img_width, img_height)
annotation = {
"id": annotation_id,
"image_id": image_info["id"],
# 根据你的数据集修改category_id是否需要减1或者加1
"category_id": int(category_id),
"bbox": bbox,
"area": bbox[2] * bbox[3],
"iscrowd": 0
}
coco_format["annotations"].append(annotation)
annotation_id += 1
# 为每个分区保存JSON文件
with open(os.path.join(output_dir, f"{split}_coco_format.json"), "w") as json_file:
json.dump(coco_format, json_file, indent=4)