1、使用Deepstream-YOLO
https://github.com/marcoslucianops/DeepStream-Yolo
2、使用集群服务器
conda activate yolov11
3、模型转换
使用yolov5-6.0,一定使用6.0,其他版本,使用下面的脚本会有些问题
import os
import onnx
import torch
import torch.nn as nn
from models.experimental import attempt_load
class DeepStreamOutput(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x[0]
boxes = x[:, :, :4]
convert_matrix = torch.tensor(
[[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], dtype=boxes.dtype, device=boxes.device
)
boxes @= convert_matrix
objectness = x[:, :, 4:5]
scores, labels = torch.max(x[:, :, 5:], dim=-1, keepdim=True)
scores *= objectness
return torch.cat([boxes, scores, labels.to(boxes.dtype)], dim=-1)
def yolov5_export(weights, device, inplace=True, fuse=True):
model = attempt_load(weights, inplace=inplace, fuse=fuse) # , device=device
model.eval()
for k, m in model.named_modules():
if m.__class__.__name__ == 'Detect':
m.inplace = False
m.dynamic = False
m.export = True
return model
def suppress_warnings():
import warnings
warnings.filterwarnings('ignore', category=torch.jit.TracerWarning)
warnings.filterwarnings('ignore', category=UserWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=ResourceWarning)
def main(args):
suppress_warnings()
print(f'\nStarting: {args.weights}')
print('Opening YOLOv5 model')
device = torch.device('cpu')
model = yolov5_export(args.weights, device)
print()
if len(model.names) > 0:
print('Creating labels.txt file')
with open('labels.txt', 'w', encoding='utf-8') as f:
for name in model.names:
f.write(f'{name}\n')
model = nn.Sequential(model, DeepStreamOutput())
img_size = args.size * 2 if len(args.size) == 1 else args.size
if img_size == [640, 640] and args.p6:
img_size = [1280] * 2
onnx_input_im = torch.zeros(args.batch, 3, *img_size).to(device)
onnx_output_file = f'{args.weights}.onnx'
dynamic_axes = {
'input': {
0: 'batch'
},
'output': {
0: 'batch'
}
}
print('Exporting the model to ONNX')
torch.onnx.export(
model, onnx_input_im, onnx_output_file, verbose=False, opset_version=args.opset, do_constant_folding=True,
input_names=['input'], output_names=['output'], dynamic_axes=dynamic_axes if args.dynamic else None
)
if args.simplify:
print('Simplifying the ONNX model')
import onnxslim
model_onnx = onnx.load(onnx_output_file)
model_onnx = onnxslim.slim(model_onnx)
onnx.save(model_onnx, onnx_output_file)
print(f'Done: {onnx_output_file}\n')
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='DeepStream YOLOv5 conversion')
parser.add_argument('-w', '--weights', required=True, type=str, help='Input weights (.pt) file path (required)')
parser.add_argument('-s', '--size', nargs='+', type=int, default=[640], help='Inference size [H,W] (default [640])')
parser.add_argument('--p6', action='store_true', help='P6 model')
parser.add_argument('--opset', type=int, default=12, help='ONNX opset version')
parser.add_argument('--simplify', action='store_true', help='ONNX simplify model')
parser.add_argument('--dynamic', action='store_true', help='Dynamic batch-size')
parser.add_argument('--batch', type=int, default=1, help='Static batch-size')
args = parser.parse_args()
if not os.path.isfile(args.weights):
raise SystemExit('Invalid weights file')
if args.dynamic and args.batch > 1:
raise SystemExit('Cannot set dynamic batch-size and static batch-size at same time')
return args
if __name__ == '__main__':
args = parse_args()
main(args)
4、提示silu激活函数相关的错误
解决参考:
错误:nvbufsurface: Failed to create EGLImage.
解决:
$ export DISPLAY=:0
$ xhost +
5、在NX上完成模型格式转换即可
config_infer_primary_yoloV5.txt
[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
model-color-format=0
onnx-file=best0116-2.onnx
# custom-network-config=yolov5_best.cfg
# model-file=yolov5_best111.wts
model-engine-file=model_b1_gpu0_fp32.engine
#int8-calib-file=calib.table
labelfile-path=labels1.txt
batch-size=1
network-mode=0
num-detected-classes=4
interval=0
gie-unique-id=1
process-mode=1
network-type=0
cluster-mode=2
maintain-aspect-ratio=1
symmetric-padding=1
#workspace-size=2000
parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so
engine-create-func-name=NvDsInferYoloCudaEngineGet
[class-attrs-all]
nms-iou-threshold=0.45
pre-cluster-threshold=0.25
topk=300
deepstream-app -C config_infer_primary_yoloV5.txt