cmake的onnxruntime使用(YOLOv8上演示)

记录一下自己遇到的问题

关于编译,可以借助vs2022或其他版本编译,或者下载到本地编译

借助vs2022编译的指令

mkdir build
cd build
cmake ..
cmake --build . --config release
cd release
#执行的你的.exe

 本地编译好后,借助MinGW编译

mkdir build
cd build
cmake -G"MinGW Makefiles" ..
mingw32-make -j8
# 执行你的.exe文件

网上下载下来的onnxruntime没有.cmake文件,需要手动指一下链接地址

比如opencv里就有这个.cmake文件,那么只要指定一下包的路径就行 

先讲一下opencv的cmake怎么设置

cmake_minimum_required(VERSION 3.5)

project(Yolov8CPPInference VERSION 0.1)

set(CMAKE_INCLUDE_CURRENT_DIR ON)

# CUDA
set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda")
find_package(CUDA 11 REQUIRED)

set(CMAKE_CUDA_STANDARD 11)
set(CMAKE_CUDA_STANDARD_REQUIRED ON)
# !CUDA

# OpenCV
set(OpenCV_DIR "E:\\Software\\package\\opencv\\build\\x64\\MinGW\\install")
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
# !OpenCV

set(PROJECT_SOURCES
    main.cpp
    inference.h
    inference.cpp
)
add_executable(Yolov8CPPInference ${PROJECT_SOURCES})
target_link_libraries(Yolov8CPPInference ${OpenCV_LIBS})

 分别对应以下内容

  • cmake_minimum_required(VERSION 3.5):要求CMake最低版本为3.5
  • project(Yolov8CPPInference VERSION 0.1):定义项目名称和版本号
  • 设置CUDA工具包路径为"/usr/local/cuda"
  • 查找CUDA 11或更高版本(find_package(CUDA 11 REQUIRED)
  • 设置CUDA标准为C++11
  • 指定OpenCV的安装路径(Windows路径格式)
  • 查找OpenCV包(find_package(OpenCV REQUIRED)
  • 包含OpenCV的头文件目录
  • 定义项目源文件:main.cpp、inference.h、inference.cpp
  • 创建可执行文件Yolov8CPPInference
  • 将OpenCV库链接到可执行文件(target_link_libraries

add_executable(Yolov8CPPInference ${PROJECT_SOURCES})  创建可执行文件,前面部分是可执行文件的名字

main.cpp如下

#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include "inference.h"

using namespace std;
using namespace cv;

int main()
{
    std::string projectBasePath = "E:/YOLOv8"; // Set your ultralytics base path
    bool runOnGPU = false;

    // Load YOLOv8 ONNX model
    Inference inf(projectBasePath + "/yolov8n.onnx", cv::Size(640, 640), "classes.txt", runOnGPU);
    

    // Hardcoded image paths
    std::vector<std::string> imageNames = {
        projectBasePath + "/ultralytics/assets/bus.jpg",
        projectBasePath + "/ultralytics/assets/zidane.jpg"
    };

    for (const auto& imageName : imageNames)
    {
        cv::Mat frame = cv::imread(imageName);

        // Run inference
        std::vector<Detection> output = inf.runInference(frame);
        std::cout << "Number of detections: " << output.size() << std::endl;

        // Draw detections
        for (const auto& detection : output)
        {
            cv::Rect box = detection.box;
            cv::Scalar color = detection.color;

            // Draw bounding box
            cv::rectangle(frame, box, color, 2);

            // Draw class label and confidence
            std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4);
            cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
            cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);

            cv::rectangle(frame, textBox, color, cv::FILLED);
            cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);
        }

        // Display results
        float scale = 0.8;
        cv::resize(frame, frame, cv::Size(frame.cols * scale, frame.rows * scale));
        cv::imshow("Inference", frame);
        cv::waitKey(-1);
    }

    return 0;
}

 inference.h代码如下

#ifndef INFERENCE_H
#define INFERENCE_H

// Cpp native
#include <fstream>
#include <vector>
#include <string>
#include <random>

// OpenCV / DNN / Inference
#include <opencv2/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>

struct Detection
{
    int class_id{0};
    std::string className{};
    float confidence{0.0};
    cv::Scalar color{};
    cv::Rect box{};
};

class Inference
{
public:
    Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape = {640, 640}, const std::string &classesTxtFile = "", const bool &runWithCuda = true);
    std::vector<Detection> runInference(const cv::Mat &input);

private:
    void loadClassesFromFile();
    void loadOnnxNetwork();
    cv::Mat formatToSquare(const cv::Mat &source);

    std::string modelPath{};
    std::string classesPath{};
    bool cudaEnabled{};

    std::vector<std::string> classes{"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"};

    cv::Size2f modelShape{};

    float modelConfidenceThreshold {0.25};
    float modelScoreThreshold      {0.45};
    float modelNMSThreshold        {0.50};

    bool letterBoxForSquare = true;

    cv::dnn::Net net;
};

#endif // INFERENCE_H

  inference.cpp代码如下

#include "inference.h"

Inference::Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape, const std::string &classesTxtFile, const bool &runWithCuda)
{
    modelPath = onnxModelPath;
    modelShape = modelInputShape;
    classesPath = classesTxtFile;
    cudaEnabled = runWithCuda;

    loadOnnxNetwork();
    // loadClassesFromFile(); The classes are hard-coded for this example
}

std::vector<Detection> Inference::runInference(const cv::Mat &input)
{
    cv::Mat modelInput = input;
    if (letterBoxForSquare && modelShape.width == modelShape.height)
        modelInput = formatToSquare(modelInput);

    cv::Mat blob;
    cv::dnn::blobFromImage(modelInput, blob, 1.0/255.0, modelShape, cv::Scalar(), true, false);
    net.setInput(blob);

    std::vector<cv::Mat> outputs;
    net.forward(outputs, net.getUnconnectedOutLayersNames());

    int rows = outputs[0].size[1];
    int dimensions = outputs[0].size[2];

    bool yolov8 = false;
    // yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c])
    // yolov8 has an output of shape (batchSize, 84,  8400) (Num classes + box[x,y,w,h])
    if (dimensions > rows) // Check if the shape[2] is more than shape[1] (yolov8)
    {
        yolov8 = true;
        rows = outputs[0].size[2];
        dimensions = outputs[0].size[1];

        outputs[0] = outputs[0].reshape(1, dimensions);
        cv::transpose(outputs[0], outputs[0]);
    }
    float *data = (float *)outputs[0].data;

    float x_factor = modelInput.cols / modelShape.width;
    float y_factor = modelInput.rows / modelShape.height;

    std::vector<int> class_ids;
    std::vector<float> confidences;
    std::vector<cv::Rect> boxes;

    for (int i = 0; i < rows; ++i)
    {
        if (yolov8)
        {
            float *classes_scores = data+4;

            cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);
            cv::Point class_id;
            double maxClassScore;

            minMaxLoc(scores, 0, &maxClassScore, 0, &class_id);

            if (maxClassScore > modelScoreThreshold)
            {
                confidences.push_back(maxClassScore);
                class_ids.push_back(class_id.x);

                float x = data[0];
                float y = data[1];
                float w = data[2];
                float h = data[3];

                int left = int((x - 0.5 * w) * x_factor);
                int top = int((y - 0.5 * h) * y_factor);

                int width = int(w * x_factor);
                int height = int(h * y_factor);

                boxes.push_back(cv::Rect(left, top, width, height));
            }
        }
        else // yolov5
        {
            float confidence = data[4];

            if (confidence >= modelConfidenceThreshold)
            {
                float *classes_scores = data+5;

                cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);
                cv::Point class_id;
                double max_class_score;

                minMaxLoc(scores, 0, &max_class_score, 0, &class_id);

                if (max_class_score > modelScoreThreshold)
                {
                    confidences.push_back(confidence);
                    class_ids.push_back(class_id.x);

                    float x = data[0];
                    float y = data[1];
                    float w = data[2];
                    float h = data[3];

                    int left = int((x - 0.5 * w) * x_factor);
                    int top = int((y - 0.5 * h) * y_factor);

                    int width = int(w * x_factor);
                    int height = int(h * y_factor);

                    boxes.push_back(cv::Rect(left, top, width, height));
                }
            }
        }

        data += dimensions;
    }

    std::vector<int> nms_result;
    cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold, modelNMSThreshold, nms_result);

    std::vector<Detection> detections{};
    for (unsigned long i = 0; i < nms_result.size(); ++i)
    {
        int idx = nms_result[i];

        Detection result;
        result.class_id = class_ids[idx];
        result.confidence = confidences[idx];

        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_int_distribution<int> dis(100, 255);
        result.color = cv::Scalar(dis(gen),
                                  dis(gen),
                                  dis(gen));

        result.className = classes[result.class_id];
        result.box = boxes[idx];

        detections.push_back(result);
    }

    return detections;
}

void Inference::loadClassesFromFile()
{
    std::ifstream inputFile(classesPath);
    if (inputFile.is_open())
    {
        std::string classLine;
        while (std::getline(inputFile, classLine))
            classes.push_back(classLine);
        inputFile.close();
    }
}

void Inference::loadOnnxNetwork()
{
    net = cv::dnn::readNetFromONNX(modelPath);
    if (cudaEnabled)
    {
        std::cout << "\nRunning on CUDA" << std::endl;
        net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
        net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);
    }
    else
    {
        std::cout << "\nRunning on CPU" << std::endl;
        net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
        net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
    }
}

cv::Mat Inference::formatToSquare(const cv::Mat &source)
{
    int col = source.cols;
    int row = source.rows;
    int _max = MAX(col, row);
    cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3);
    source.copyTo(result(cv::Rect(0, 0, col, row)));
    return result;
}

 cmakelists.txt

opencv要指定自己的路径

cmake_minimum_required(VERSION 3.5)

project(Yolov8CPPInference VERSION 0.1)

set(CMAKE_INCLUDE_CURRENT_DIR ON)

# CUDA
set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda")
find_package(CUDA 11 REQUIRED)

set(CMAKE_CUDA_STANDARD 11)
set(CMAKE_CUDA_STANDARD_REQUIRED ON)
# !CUDA

# OpenCV
set(OpenCV_DIR "E:\\Software\\package\\opencv\\build\\x64\\MinGW\\install")
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
# !OpenCV

set(PROJECT_SOURCES
    main.cpp
    inference.h
    inference.cpp
)
add_executable(Yolov8CPPInference ${PROJECT_SOURCES})
target_link_libraries(Yolov8CPPInference ${OpenCV_LIBS})

onnxruntime和opencv一起怎么设置

onnxruntime的下载 ,以及环境搭配

官网下载安装文件地址,根据自己的情况选择合适的版本【官方说明】。

 

cmake_minimum_required(VERSION 3.15)
project(YOLOv8_ONNXRuntime_CPP)

# 强制要求 C++17
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)  # 禁用编译器扩展

# 1. 设置 ONNX Runtime 路径(修改为你的实际路径)
set(ONNXRUNTIME_ROOT "E:/Software/package/onnxruntime-win-x64-gpu-1.14.1")

# 2. 查找 OpenCV(自动查找优先)
find_package(OpenCV REQUIRED)
message(STATUS "OpenCV found: ${OpenCV_VERSION}")

# 3. 手动配置 ONNX Runtime
if(WIN32)
    add_library(onnxruntime SHARED IMPORTED)
    set_target_properties(onnxruntime PROPERTIES
        IMPORTED_LOCATION "${ONNXRUNTIME_ROOT}/lib/onnxruntime.dll"
        IMPORTED_IMPLIB "${ONNXRUNTIME_ROOT}/lib/onnxruntime.lib"
    )
else()  # Linux/macOS
    find_library(ONNXRUNTIME_LIB onnxruntime HINTS "${ONNXRUNTIME_ROOT}/lib")
    add_library(onnxruntime SHARED IMPORTED)
    set_target_properties(onnxruntime PROPERTIES
        IMPORTED_LOCATION "${ONNXRUNTIME_LIB}"
    )
endif()

# 4. 包含头文件
include_directories(
    ${OpenCV_INCLUDE_DIRS}
    "${ONNXRUNTIME_ROOT}/include"
)

# 5. 添加可执行文件
add_executable(YOLOv8_ONNXRuntime_CPP
main.cpp
inference.h
inference.cpp
)

# 6. 链接库
target_link_libraries(YOLOv8_ONNXRuntime_CPP
    ${OpenCV_LIBS}
    onnxruntime
)

# 7. Windows 下自动复制 DLL
if(WIN32)
    add_custom_command(TARGET YOLOv8_ONNXRuntime_CPP POST_BUILD
        COMMAND ${CMAKE_COMMAND} -E copy
            "${ONNXRUNTIME_ROOT}/lib/onnxruntime.dll"
            $<TARGET_FILE_DIR:YOLOv8_ONNXRuntime_CPP>
    )
endif()

不同点在于,onnruntime没有提供.cmake文件,需要手动设置静态链接库和动态链接库

先设置路径 

set(ONNXRUNTIME_ROOT "E:/Software/package/onnxruntime-win-x64-gpu-1.14.1")

手动配置链接库

add_library(onnxruntime SHARED IMPORTED)
set_target_properties(onnxruntime PROPERTIES
    IMPORTED_LOCATION "${ONNXRUNTIME_ROOT}/lib/onnxruntime.dll"
    IMPORTED_IMPLIB "${ONNXRUNTIME_ROOT}/lib/onnxruntime.lib"
)

头文件包含

include_directories(
    ${OpenCV_INCLUDE_DIRS}
    "${ONNXRUNTIME_ROOT}/include"
)

可执行文件构建

add_executable(YOLOv8_ONNXRuntime_CPP
    main.cpp
    inference.h
    inference.cpp
)

库连接

target_link_libraries(YOLOv8_ONNXRuntime_CPP
    ${OpenCV_LIBS}
    onnxruntime
)

执行命令和前面一致

上面的cmakelists针对的是官方的代码。

下面更新一份我自己跑通的版本

detect.cpp

#include "detect.h"
using namespace std;
using namespace cv;
using namespace cv::dnn;
using namespace Ort;
 
// bool Yolov8Onnx::ReadModel(const std::string &modelPath, bool isCuda, int cudaId, bool warmUp){
//     if (_batchSize < 1) _batchSize =1;
//     try
//     {
//         std::vector<std::string> available_providers = GetAvailableProviders();
//         auto cuda_available = std::find(available_providers.begin(), available_providers.end(), "CUDAExecutionProvider");
 
 
//         if (isCuda && (cuda_available == available_providers.end()))
//         {
//             std::cout << "Your ORT build without GPU. Change to CPU." << std::endl;
//             std::cout << "************* Infer model on CPU! *************" << std::endl;
//         }
//         else if (isCuda && (cuda_available != available_providers.end()))
//         {
//             std::cout << "************* Infer model on GPU! *************" << std::endl;
// // #if ORT_API_VERSION < ORT_OLD_VISON
// // 			OrtCUDAProviderOptions cudaOption;
// // 			cudaOption.device_id = cudaID;
// //            _OrtSessionOptions.AppendExecutionProvider_CUDA(cudaOption);
// // #else
// // 			OrtStatus* status = OrtSessionOptionsAppendExecutionProvider_CUDA(_OrtSessionOptions, cudaID);
// // #endif
//         }
//         else
//         {
//             std::cout << "************* Infer model on CPU! *************" << std::endl;
//         }
bool Yolov8Onnx::ReadModel(const std::string &modelPath, bool isCuda, int cudaId, bool warmUp) {
    if (_batchSize < 1) _batchSize = 1;
    
    std::vector<std::string> providers = Ort::GetAvailableProviders();
    for (const auto& provider : providers) {
        std::cout << "Available provider: " << provider << std::endl;
    }
    try {
        std::vector<std::string> available_providers = GetAvailableProviders();
        auto cuda_available = std::find(available_providers.begin(), available_providers.end(), "CUDAExecutionProvider");

        if (isCuda && (cuda_available == available_providers.end())) {
            std::cout << "Your ORT build without GPU. Change to CPU." << std::endl;
            std::cout << "************* Infer model on CPU! ​*************" << std::endl;
        }
        else if (isCuda && (cuda_available != available_providers.end())) {
            std::cout << "************* Infer model on GPU! ​*************" << std::endl;
            OrtCUDAProviderOptions cudaOption;
            cudaOption.device_id = cudaId;
            _OrtSessionOptions.AppendExecutionProvider_CUDA(cudaOption);
        }
        else {
            std::cout << "************* Infer model on CPU! ​*************" << std::endl;
        }
        //
 
        _OrtSessionOptions.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
 
#ifdef _WIN32
        std::wstring model_path(modelPath.begin(), modelPath.end());
        _OrtSession = new Ort::Session(_OrtEnv, model_path.c_str(), _OrtSessionOptions);
#else
        _OrtSession = new Ort::Session(_OrtEnv, modelPath.c_str(), _OrtSessionOptions);
#endif
 
        Ort::AllocatorWithDefaultOptions allocator;
        //init input
        _inputNodesNum = _OrtSession->GetInputCount();
#if ORT_API_VERSION < ORT_OLD_VISON
        _inputName = _OrtSession->GetInputName(0, allocator);
        _inputNodeNames.push_back(_inputName);
#else
        _inputName = std::move(_OrtSession->GetInputNameAllocated(0, allocator));
        _inputNodeNames.push_back(_inputName.get());
#endif
        //cout << _inputNodeNames[0] << endl;
        Ort::TypeInfo inputTypeInfo = _OrtSession->GetInputTypeInfo(0);
        auto input_tensor_info = inputTypeInfo.GetTensorTypeAndShapeInfo();
        _inputNodeDataType = input_tensor_info.GetElementType();
        _inputTensorShape = input_tensor_info.GetShape();
 
        if (_inputTensorShape[0] == -1)
        {
            _isDynamicShape = true;
            _inputTensorShape[0] = _batchSize;
 
        }
        if (_inputTensorShape[2] == -1 || _inputTensorShape[3] == -1) {
            _isDynamicShape = true;
            _inputTensorShape[2] = _netHeight;
            _inputTensorShape[3] = _netWidth;
        }
        //init output
        _outputNodesNum = _OrtSession->GetOutputCount();
#if ORT_API_VERSION < ORT_OLD_VISON
        _output_name0 = _OrtSession->GetOutputName(0, allocator);
        _outputNodeNames.push_back(_output_name0);
#else
        _output_name0 = std::move(_OrtSession->GetOutputNameAllocated(0, allocator));
        _outputNodeNames.push_back(_output_name0.get());
#endif
        Ort::TypeInfo type_info_output0(nullptr);
        type_info_output0 = _OrtSession->GetOutputTypeInfo(0);  //output0
 
        auto tensor_info_output0 = type_info_output0.GetTensorTypeAndShapeInfo();
        _outputNodeDataType = tensor_info_output0.GetElementType();
        _outputTensorShape = tensor_info_output0.GetShape();
 
        //_outputMaskNodeDataType = tensor_info_output1.GetElementType(); //the same as output0
        //_outputMaskTensorShape = tensor_info_output1.GetShape();
        //if (_outputTensorShape[0] == -1)
        //{
        //	_outputTensorShape[0] = _batchSize;
        //	_outputMaskTensorShape[0] = _batchSize;
        //}
        //if (_outputMaskTensorShape[2] == -1) {
        //	//size_t ouput_rows = 0;
        //	//for (int i = 0; i < _strideSize; ++i) {
        //	//	ouput_rows += 3 * (_netWidth / _netStride[i]) * _netHeight / _netStride[i];
        //	//}
        //	//_outputTensorShape[1] = ouput_rows;
 
        //	_outputMaskTensorShape[2] = _segHeight;
        //	_outputMaskTensorShape[3] = _segWidth;
        //}
 
        //warm up
        if (isCuda && warmUp) {
            //draw run
            cout << "Start warming up" << endl;
            size_t input_tensor_length = VectorProduct(_inputTensorShape);
            float* temp = new float[input_tensor_length];
            std::vector<Ort::Value> input_tensors;
            std::vector<Ort::Value> output_tensors;
            input_tensors.push_back(Ort::Value::CreateTensor<float>(
                _OrtMemoryInfo, temp, input_tensor_length, _inputTensorShape.data(),
                _inputTensorShape.size()));
            for (int i = 0; i < 3; ++i) {
                output_tensors = _OrtSession->Run(Ort::RunOptions{ nullptr },
                    _inputNodeNames.data(),
                    input_tensors.data(),
                    _inputNodeNames.size(),
                    _outputNodeNames.data(),
                    _outputNodeNames.size());
            }
 
            delete[]temp;
        }
    }
    catch (const std::exception&) {
        return false;
    }
    return true;
 
}
 
int Yolov8Onnx::Preprocessing(const std::vector<cv::Mat> &SrcImgs,
                              std::vector<cv::Mat> &OutSrcImgs,
                              std::vector<cv::Vec4d> &params){
    OutSrcImgs.clear();
    Size input_size = Size(_netWidth, _netHeight);
 
    // 信封处理
    for (size_t i=0; i<SrcImgs.size(); ++i){
        Mat temp_img = SrcImgs[i];
        Vec4d temp_param = {1,1,0,0};
        if (temp_img.size() != input_size){
            Mat borderImg;
            LetterBox(temp_img, borderImg, temp_param, input_size, false, false, true, 32);
            OutSrcImgs.push_back(borderImg);
            params.push_back(temp_param);
        }
        else {
            OutSrcImgs.push_back(temp_img);
            params.push_back(temp_param);
        }
    }
 
    int lack_num = _batchSize - SrcImgs.size();
    if (lack_num > 0){
        Mat temp_img = Mat::zeros(input_size, CV_8UC3);
        Vec4d temp_param = {1,1,0,0};
        OutSrcImgs.push_back(temp_img);
        params.push_back(temp_param);
    }
    return 0;
}
 
bool Yolov8Onnx::OnnxBatchDetect(std::vector<cv::Mat> &srcImgs, std::vector<std::vector<OutputDet> > &output)
{
    vector<Vec4d> params;
    vector<Mat> input_images;
    cv::Size input_size(_netWidth, _netHeight);
 
    //preprocessing (信封处理)
    Preprocessing(srcImgs, input_images, params);
    // [0~255] --> [0~1]; BGR2RGB
    Mat blob = cv::dnn::blobFromImages(input_images, 1 / 255.0, input_size, Scalar(0,0,0), true, false);
 
    // 前向传播得到推理结果
    int64_t input_tensor_length = VectorProduct(_inputTensorShape);// ?
    std::vector<Ort::Value> input_tensors;
    std::vector<Ort::Value> output_tensors;
    input_tensors.push_back(Ort::Value::CreateTensor<float>(_OrtMemoryInfo, (float*)blob.data,
                                                            input_tensor_length, _inputTensorShape.data(),
                                                            _inputTensorShape.size()));
 
    output_tensors = _OrtSession->Run(Ort::RunOptions{ nullptr },
        _inputNodeNames.data(),
        input_tensors.data(),
        _inputNodeNames.size(),
        _outputNodeNames.data(),
        _outputNodeNames.size()
    );
 
    //post-process
    int net_width = _className.size() + 4;
    float* all_data = output_tensors[0].GetTensorMutableData<float>(); // 第一张图片的输出
 
    _outputTensorShape = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape(); // 一张图片输出的维度信息 [1, 84, 8400]
 
    int64_t one_output_length = VectorProduct(_outputTensorShape) / _outputTensorShape[0]; // 一张图片输出所占内存长度 8400*84
 
    for (int img_index = 0; img_index < srcImgs.size(); ++img_index){
        Mat output0 = Mat(Size((int)_outputTensorShape[2], (int)_outputTensorShape[1]), CV_32F, all_data).t(); // [1, 84 ,8400] -> [1, 8400, 84]
 
        all_data += one_output_length; //指针指向下一个图片的地址
 
        float* pdata = (float*)output0.data; // [x,y,w,h,class1,class2.....class80]
        int rows = output0.rows; // 预测框的数量 8400
 
        // 一张图片的预测框
        vector<int> class_ids;
        vector<float> confidences;
        vector<Rect> boxes;
        for (int r=0; r<rows; ++r){//如果是yolov5则需要做修改
            Mat scores(1, _className.size(), CV_32F, pdata + 4); // 80个类别的概率
 
            // 得到最大类别概率、类别索引
            Point classIdPoint;
            double max_class_soces;
            minMaxLoc(scores, 0, &max_class_soces, 0, &classIdPoint);
            max_class_soces = (float)max_class_soces;
 
            // 预测框坐标映射到原图上
            if (max_class_soces >= _classThreshold){
                // rect [x,y,w,h]
                float x = (pdata[0] - params[img_index][2]) / params[img_index][0]; //x
                float y = (pdata[1] - params[img_index][3]) / params[img_index][1]; //y
                float w = pdata[2] / params[img_index][0]; //w
                float h = pdata[3] / params[img_index][1]; //h
 
                int left = MAX(int(x - 0.5 *w +0.5), 0);
                int top = MAX(int(y - 0.5*h + 0.5), 0);
 
                class_ids.push_back(classIdPoint.x);
                confidences.push_back(max_class_soces);
                boxes.push_back(Rect(left, top, int(w + 0.5), int(h + 0.5)));
            }
            pdata += net_width; //下一个预测框
        }
 
        // 对一张图的预测框执行NMS处理
        vector<int> nms_result;
        cv::dnn::NMSBoxes(boxes, confidences, _classThreshold, _nmsThrehold, nms_result); // 还需要classThreshold?
 
        // 对一张图片:依据NMS处理得到的索引,得到类别id、confidence、box,并置于结构体OutputDet的容器中
        vector<OutputDet> temp_output;
        for (size_t i=0; i<nms_result.size(); ++i){
            int idx = nms_result[i];
            OutputDet result;
            result.id = class_ids[idx];
            result.confidence = confidences[idx];
            result.box = boxes[idx];
            temp_output.push_back(result);
        }
        output.push_back(temp_output); // 多张图片的输出;添加一张图片的输出置于此容器中
 
    }
    if (output.size())
        return true;
    else
        return false;
 
}
 
 
bool Yolov8Onnx::OnnxDetect(cv::Mat &srcImg, std::vector<OutputDet> &output){
    vector<Mat> input_data = {srcImg};
    vector<vector<OutputDet>> temp_output;
 
    if(OnnxBatchDetect(input_data, temp_output)){
        output = temp_output[0];
        return true;
    }
    else return false;
}
 

detect.h

#pragma once
#include <iostream>
#include <memory>
#include <opencv2/opencv.hpp>
#include "utils.h"
#include <onnxruntime_cxx_api.h>
#include <numeric>
 
class Yolov8Onnx{
private:
    template<typename T>
    T VectorProduct(const std::vector<T>& v)
    {
        return std::accumulate(v.begin(), v.end(), 1, std::multiplies<T>());
    }
 
    int Preprocessing(const std::vector<cv::Mat>& SrcImgs,
                      std::vector<cv::Mat>& OutSrcImgs,
                      std::vector<cv::Vec4d>& params);
 
    const int _netWidth = 640;   //ONNX-net-input-width
    const int _netHeight = 640;  //ONNX-net-input-height
 
    int _batchSize = 1; //if multi-batch,set this
    bool _isDynamicShape = false;//onnx support dynamic shape
 
    float _classThreshold = 0.25;
    float _nmsThrehold= 0.45;
    float _maskThreshold = 0.5;
 
 
    //ONNXRUNTIME
    Ort::Env _OrtEnv = Ort::Env(OrtLoggingLevel::ORT_LOGGING_LEVEL_ERROR, "Yolov5-Seg");
    Ort::SessionOptions _OrtSessionOptions = Ort::SessionOptions();
    Ort::Session* _OrtSession = nullptr;
    Ort::MemoryInfo _OrtMemoryInfo;
 
    std::shared_ptr<char> _inputName, _output_name0;
    std::vector<char*> _inputNodeNames; //输入节点名
    std::vector<char*> _outputNodeNames; // 输出节点名
 
    size_t _inputNodesNum = 0;        // 输入节点数
    size_t _outputNodesNum = 0;      // 输出节点数
 
    ONNXTensorElementDataType _inputNodeDataType;  //数据类型
    ONNXTensorElementDataType _outputNodeDataType;
 
    std::vector<int64_t> _inputTensorShape;  // 输入张量shape
    std::vector<int64_t> _outputTensorShape;
 
public:
    Yolov8Onnx():_OrtMemoryInfo(Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtDeviceAllocator, OrtMemType::OrtMemTypeCPUOutput)) {};
    ~Yolov8Onnx() {};// delete _OrtMemoryInfo;
 
public:
    /** \brief Read onnx-model
    * \param[in] modelPath:onnx-model path
    * \param[in] isCuda:if true,use Ort-GPU,else run it on cpu.
    * \param[in] cudaID:if isCuda==true,run Ort-GPU on cudaID.
    * \param[in] warmUp:if isCuda==true,warm up GPU-model.
    */
    bool ReadModel(const std::string& modelPath, bool isCuda=false, int cudaId=0, bool warmUp=true);
 
    /** \brief  detect.
    * \param[in] srcImg:a 3-channels image.
    * \param[out] output:detection results of input image.
    */
    bool OnnxDetect(cv::Mat& srcImg, std::vector<OutputDet>& output);
 
    /** \brief  detect,batch size= _batchSize
    * \param[in] srcImg:A batch of images.
    * \param[out] output:detection results of input images.
    */
    bool OnnxBatchDetect(std::vector<cv::Mat>& srcImgs, std::vector<std::vector<OutputDet>>& output);
 
public:
    std::vector<std::string> _className = {
        "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
        "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
        "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
        "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
        "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
        "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
        "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
        "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
        "hair drier", "toothbrush"
    };
 
 
 
};

main.cpp

#include <iostream>
#include <opencv2/opencv.hpp>
#include "detect.h"
#include <chrono>
#include <ctime>

using namespace std;
using namespace cv;
using namespace cv::dnn;
#include <fstream>

bool isModelFileValid(const std::string& path) {
    std::ifstream file(path, std::ios::binary);
    return file.good();
}

int main() {
    // 1. 加载模型
    string detect_model_path = "E:\\C++\\Code_study\\YOLOv8_test\\models\\yolov8n.onnx";

    if (!isModelFileValid(detect_model_path)) {
        cerr << "Error: Model file not found or inaccessible!" << endl;
        cerr << "Tried path: " << detect_model_path << endl;
        return -1;
    }
    
    Yolov8Onnx yolov8;
    // if (!yolov8.ReadModel(detect_model_path, true, 0, true)) {  // 启用GPU
    if (!yolov8.ReadModel(detect_model_path)) {  // 启用GPU

        cerr << "Error: Failed to load model!" << endl;
        return -1;
    }
    cout << "Model loaded successfully!" << endl;

    // 2. 初始化摄像头
    VideoCapture capture(0);
    if (!capture.isOpened()) {
        cerr << "Error: Could not open camera!" << endl;
        return -1;
    }
    cout << "Camera opened successfully!" << endl;
    
    // 设置摄像头分辨率
    capture.set(CAP_PROP_FRAME_WIDTH, 640);
    capture.set(CAP_PROP_FRAME_HEIGHT, 480);

    // 3. 生成随机颜色
    vector<Scalar> color;
    srand(static_cast<unsigned>(time(0)));
    for (int i = 0; i < 80; i++) {
        color.emplace_back(rand() % 256, rand() % 256, rand() % 256);
    }

    // 4. 创建显示窗口
    namedWindow("YOLOv8 Detection", WINDOW_NORMAL);
    resizeWindow("YOLOv8 Detection", 800, 600);

    // 5. 主循环
    while (true) {
        Mat frame;
        if (!capture.read(frame)) {  // 更安全的帧读取方式
            cerr << "Warning: Failed to read frame!" << endl;
            break;
        }

        // 推理
        vector<OutputDet> result;
        auto start = chrono::high_resolution_clock::now();
        bool find = yolov8.OnnxDetect(frame, result);
        auto end = chrono::high_resolution_clock::now();

        // 绘制结果
        if (find) {
            DrawPred(frame, result, yolov8._className, color);
        } else {
            cout << "No objects detected." << endl;
        }

        // 显示性能信息
        auto inference_time = chrono::duration_cast<chrono::milliseconds>(end - start);
        string label = "FPS: " + to_string(1000.0 / inference_time.count()) + 
                      " | Inference: " + to_string(inference_time.count()) + "ms";
        putText(frame, label, Point(10, 30), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 255, 0), 2);

        // 显示画面
        imshow("YOLOv8 Detection", frame);

        // 退出控制
        if (waitKey(30) == 'q') break;  // 适当增加延迟
    }

    // 6. 释放资源
    capture.release();
    destroyAllWindows();
    return 0;
}

utils.cpp

#pragma once
#include "utils.h"
using namespace cv;
using namespace std;
 
void LetterBox(const cv::Mat& image, cv::Mat& outImage,
               cv::Vec4d& params,
               const cv::Size& newShape,
               bool autoShape,
               bool scaleFill,
               bool scaleUp,
               int stride,
               const cv::Scalar& color)
{
    if (false) {
        int maxLen = MAX(image.rows, image.cols);
        outImage = Mat::zeros(Size(maxLen, maxLen), CV_8UC3);
        image.copyTo(outImage(Rect(0, 0, image.cols, image.rows)));
        params[0] = 1;
        params[1] = 1;
        params[3] = 0;
        params[2] = 0;
    }
 
    // 取较小的缩放比例
    cv::Size shape = image.size();
    float r = std::min((float)newShape.height / (float)shape.height,
                       (float)newShape.width / (float)shape.width);
    if (!scaleUp)
        r = std::min(r, 1.0f);
 
    // 依据前面的缩放比例后,原图的尺寸
    float ratio[2]{r,r};
    int new_un_pad[2] = { (int)std::round((float)shape.width  * r), (int)std::round((float)shape.height * r)};
 
    // 计算距离目标尺寸的padding像素数
    auto dw = (float)(newShape.width - new_un_pad[0]);
    auto dh = (float)(newShape.height - new_un_pad[1]);
    if (autoShape)
    {
        dw = (float)((int)dw % stride);
        dh = (float)((int)dh % stride);
    }
    else if (scaleFill)
    {
        dw = 0.0f;
        dh = 0.0f;
        new_un_pad[0] = newShape.width;
        new_un_pad[1] = newShape.height;
        ratio[0] = (float)newShape.width / (float)shape.width;
        ratio[1] = (float)newShape.height / (float)shape.height;
    }
 
    dw /= 2.0f;
    dh /= 2.0f;
 
    // 等比例缩放
    if (shape.width != new_un_pad[0] && shape.height != new_un_pad[1])
    {
        cv::resize(image, outImage, cv::Size(new_un_pad[0], new_un_pad[1]));
    }
    else{
        outImage = image.clone();
    }
 
    // 图像四周padding填充,至此原图与目标尺寸一致
    int top = int(std::round(dh - 0.1f));
    int bottom = int(std::round(dh + 0.1f));
    int left = int(std::round(dw - 0.1f));
    int right = int(std::round(dw + 0.1f));
    params[0] = ratio[0]; // width的缩放比例
    params[1] = ratio[1]; // height的缩放比例
    params[2] = left; // 水平方向两边的padding像素数
    params[3] = top; //垂直方向两边的padding像素数
    cv::copyMakeBorder(outImage, outImage, top, bottom, left, right, cv::BORDER_CONSTANT, color);
}
 
void DrawPred(cv::Mat& img, std::vector<OutputDet> result,
              std::vector<std::string> classNames,
              std::vector<cv::Scalar> color)
{
    for (size_t i=0; i<result.size(); i++){
        int  left,top;
        left = result[i].box.x;
        top = result[i].box.y;
 
        // 框出目标
        rectangle(img, result[i].box,color[result[i].id], 2, 8);
 
        // 在目标框左上角标识目标类别以及概率
        string label = classNames[result[i].id] + ":" + to_string(result[i].confidence);
        int baseLine;
        Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
        top = max(top, labelSize.height);
        putText(img, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 1, color[result[i].id], 2);
    }
}
 

utils.h

#pragma once
#include <iostream>
#include <opencv2/opencv.hpp>
 
struct OutputDet{
    int id;
    float confidence;
    cv::Rect box;
};
 
void DrawPred(cv::Mat& img, std::vector<OutputDet> result,
              std::vector<std::string> classNames, std::vector<cv::Scalar> color);
void LetterBox(const cv::Mat& image, cv::Mat& outImage,
               cv::Vec4d& params,
               const cv::Size& newShape = cv::Size(640, 640),
               bool autoShape = false,
               bool scaleFill=false,
               bool scaleUp=true,
               int stride= 32,
               const cv::Scalar& color = cv::Scalar(114,114,114));

CMakeLists.txt

cmake_minimum_required(VERSION 3.15)
project(YOLOv8_ONNXRuntime_CPP)

# 强制要求 C++17
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)  # 禁用编译器扩展

# 1. 设置 ONNX Runtime 路径(修改为你的实际路径)
set(ONNXRUNTIME_ROOT "E:/Software/package/onnxruntime-win-x64-gpu-1.14.1")

# 2. 查找 OpenCV(自动查找优先)
find_package(OpenCV REQUIRED)
message(STATUS "OpenCV found: ${OpenCV_VERSION}")

# 3. 手动配置 ONNX Runtime
if(WIN32)
    add_library(onnxruntime SHARED IMPORTED)
    set_target_properties(onnxruntime PROPERTIES
        IMPORTED_LOCATION "${ONNXRUNTIME_ROOT}/lib/onnxruntime.dll"
        IMPORTED_IMPLIB "${ONNXRUNTIME_ROOT}/lib/onnxruntime.lib"
    )
else()  # Linux/macOS
    find_library(ONNXRUNTIME_LIB onnxruntime HINTS "${ONNXRUNTIME_ROOT}/lib")
    add_library(onnxruntime SHARED IMPORTED)
    set_target_properties(onnxruntime PROPERTIES
        IMPORTED_LOCATION "${ONNXRUNTIME_LIB}"
    )
endif()

# 4. 包含头文件
include_directories(
    ${OpenCV_INCLUDE_DIRS}
    "${ONNXRUNTIME_ROOT}/include"
)

# 5. 添加可执行文件
add_executable(YOLOv8_ONNXRuntime_CPP
main.cpp
detect.cpp
detect.h
utils.cpp
utils.h
)

# 6. 链接库
target_link_libraries(YOLOv8_ONNXRuntime_CPP
    ${OpenCV_LIBS}
    onnxruntime
)

# 7. Windows 下自动复制 DLL
if(WIN32)
    add_custom_command(TARGET YOLOv8_ONNXRuntime_CPP POST_BUILD
        COMMAND ${CMAKE_COMMAND} -E copy
            "${ONNXRUNTIME_ROOT}/lib/onnxruntime.dll"
            $<TARGET_FILE_DIR:YOLOv8_ONNXRuntime_CPP>
    )
endif()

if(WIN32)
    add_definitions(-D_CRT_SECURE_NO_WARNINGS)  # 禁用某些MSVC安全警告
endif()

以上onnxruntime的全部代码,目前可以跑通cpu版本的,调用GPU出错,后续在进行调整,后续也会更新tensorRT的版本

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

陈子迩

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值