ultralytics的YOLOv8改为自用版本

由于需要用pyqt给yolov8做一个界面,而ultralytics一层嵌一层,不是很好用,所以对它的这个源码进行精简,具体代码我放到了这里,ultralytics使用的版本是8.0.54。

 具体代码如下,需要根据自己的情况来修改data的配置文件以及权值文件,在代码的49和50行

import torch
import cv2
import numpy as np

from ultralytics.yolo.data.augment import LetterBox
from ultralytics.yolo.engine.results import Results
from ultralytics.yolo.utils import ops
from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
from ultralytics.nn.autobackend import AutoBackend


def get_annotator(img):
    return Annotator(img, line_width=3, example=str(model.names))

def preprocess(img):
    img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(model.device)
    img = img.float()
    img /= 255  # 0 - 255 to 0.0 - 1.0
    return img

def postprocess(preds, img, orig_imgs):
    preds = ops.non_max_suppression(preds,
                                    conf,
                                    iou,
                                    agnostic=False,
                                    max_det=300,
                                    classes=None)

    results = []
    for i, pred in enumerate(preds):
        orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs # 返回的orig_img变量表示原始图像
        if not isinstance(orig_imgs, torch.Tensor):
            pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
        path = 0
        img_path = path[i] if isinstance(path, list) else path
        results.append(Results(orig_img=orig_img, path=img_path, names=model.names, boxes=pred))
    return results


def save_preds(vid_cap, im0):
    fps = int(vid_cap.get(cv2.CAP_PROP_FPS))  # integer required, floats produce error in MP4 codec
    w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    vid_writer = cv2.VideoWriter('1.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
    im0 = (im0 * 255).astype(np.uint8)  # Convert to byte type
    vid_writer.write(im0)


model = ''
data = ''
imgsz = 640
visualize = False
conf = 0.25
iou = 0.5
model = AutoBackend(model,
                     device=torch.device('cuda:0'),
                     data=data,
                     verbose=True)
device = torch.device('cuda:0')
model.eval()
cam = cv2.VideoCapture(0)

while 1:
    im0 = cam.read()[1]

    im = np.stack([LetterBox(imgsz, True, stride=32)(image=im0)])
    im = im[..., ::-1].transpose((0, 3, 1, 2))  # BGR to RGB, BHWC to BCHW
    im = np.ascontiguousarray(im)  # contiguous
    dt = (ops.Profile(), ops.Profile(), ops.Profile())

    # preprocess
    with dt[0]:
        im = preprocess(im)
        if len(im.shape) == 3:
            im = im[None]  # expand for batch dim

    # inference
    with dt[1]:
        preds = model(im, augment=False, visualize=visualize)

    # postprocess
    with dt[2]:
        results = postprocess(preds, im, im0)

    det = results[0].boxes  # TODO: make boxes inherit from tensors

    # write
    for d in reversed(det):
        cls, conf, id = d.cls.squeeze(), d.conf.squeeze(), None if d.id is None else int(d.id.item())

        c = int(cls)  # integer class
        name = ('' if id is None else f'id:{id} ') + model.names[c]
        label = f'{name} {conf:.2f}'
        p1, p2 = (int(d.xyxy.squeeze()[0]), int(d.xyxy.squeeze()[1])), (int(d.xyxy.squeeze()[2]), int(d.xyxy.squeeze()[3]))
        lw = max(round(sum(im.shape) / 2 * 0.003), 2)
        cv2.rectangle(im0, p1, p2, colors(c, True), thickness=lw, lineType=cv2.LINE_AA)
        if label:
            tf = max(lw - 1, 1)  # font thickness
            w, h = cv2.getTextSize(label, 0, fontScale=lw / 3, thickness=tf)[0]  # text width, height
            outside = p1[1] - h >= 3
            p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
            # cv2.rectangle(im0, p1, p2, (128, 128, 128), -1, cv2.LINE_AA)  # filled
            cv2.putText(im0,
                        label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
                        0,
                        lw / 3,
                        (0, 0, 255),
                        thickness=tf,
                        lineType=cv2.LINE_AA)


    cv2.imshow("result", im0)
    # save_preds(cam, im0)
    print('preprocess:{},inference:{},postprocess:{}'.format(dt[0].dt * 1E3 ,dt[1].dt * 1E3 ,dt[2].dt * 1E3 ))
    if cv2.waitKey(1) & 0xff == ord('q'):  # 1 millisecond
        break

cam.release()
cv2.destroyAllWindows()

对于修改yolov8的训练方式,有两种方法可以选择。第一种方法是通过命令行输入指令来开始训练。你可以使用以下命令:`yolo task=detect mode=train model=models/v8/yolov8n.yaml data=data/coco.yaml batch=8 epochs=300 workers=2`。第二种方法是通过修改default.yaml文件来进行修改。主要需要修改以下三个参数:将mode从train改为val,将model改为你自己训练后的权重,同时将split设置为test或val,分别表示测试集或验证集。 此外,还有一种训练方式是使用yolov8s进行训练,该训练方式需要进行2000轮的训练。在第一次训练中,map50的结果为0.915。在第二次训练中,同样进行了2000轮的训练,得到的map为0.91,其中map相差了0.5个点。 希望以上信息对你有所帮助!<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* *2* [YOLOv8自用训练教程——训练、测试、推理](https://blog.csdn.net/retainenergy/article/details/129199116)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v92^chatsearchT3_1"}}] [.reference_item style="max-width: 50%"] - *3* [关于yolov8一些训练的情况](https://blog.csdn.net/zhangdaoliang1/article/details/128691837)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v92^chatsearchT3_1"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值