QT开发技术 [opencv加载onnx模型,dnn推理]

一、导出onnx 模型

yolo export model=xx\xx\best.pt format=onnx

二、qt加载onnx模型,推理显示

 std::string fileName = QCoreApplication::applicationDirPath().toStdString() + "/Model/best.onnx";

 cv::dnn::Net net = cv::dnn::readNetFromONNX(fileName);
 if (net.empty()) {
     std::cerr << "Failed to load ONNX model. Check: " << std::endl
         << "1. File path: " << fileName << std::endl
         << "2. OpenCV version (require >= 4.5)" << std::endl
         << "3. ONNX opset compatibility" << std::endl;
     return;
 }

 cv::Mat image = cv::imread(QCoreApplication::applicationDirPath().toStdString() + "/Data/test3.jpg");
 if (image.empty()) {
     std::cerr << "Failed to load image" << std::endl;
     return;
 }

 // 预处理增强
 cv::Mat blob;
 try {
     bool swapRB = true;  // OpenCV默认BGR,YOLO需要RGB
     bool crop = false;
     cv::Scalar mean = cv::Scalar(0, 0, 0);
     double scale = 1.0 / 255.0;

     blob = cv::dnn::blobFromImage(image,
         scale,
         cv::Size(640, 640),
         mean,
         swapRB,
         crop,
         CV_32F);
 }
 catch (...) {
     std::cerr << "Blob creation failed" << std::endl;
     return;
 }

 // 设置计算后端(根据环境配置)
 net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
 net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);

 std::vector<std::string> outLayerNames = net.getUnconnectedOutLayersNames();
 std::vector<cv::Mat> predictions;

 try {
     net.setInput(blob);
     net.forward(predictions, outLayerNames);
 }
 catch (const cv::Exception& e) {
     std::cerr << "Forward pass failed: " << e.what() << std::endl;
     return;
 }


 // 后处理
 std::vector<int> classIds;
 std::vector<float> confidences;
 std::vector<cv::Rect> boxes;
 float x_factor = image.cols / 640.0;
 float y_factor = image.rows / 640.0;
 // YOLO 专用预处理参数
 float confThreshold = 0.25;
 float nmsThreshold = 0.45;

 for (const auto& pred : predictions) {
     for (int i = 0; i < pred.rows; ++i) {
         cv::Mat scores = pred.row(i).colRange(5, pred.cols);
         cv::Point classIdPoint;
         double confidence;
         cv::minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
         if (confidence > confThreshold) {
             int centerX = static_cast<int>(pred.at<float>(i, 0) * x_factor);
             int centerY = static_cast<int>(pred.at<float>(i, 1) * y_factor);
             int width = static_cast<int>(pred.at<float>(i, 2) * x_factor);
             int height = static_cast<int>(pred.at<float>(i, 3) * y_factor);
             int left = centerX - width / 2;
             int top = centerY - height / 2;

             classIds.push_back(classIdPoint.x);
             confidences.push_back(static_cast<float>(confidence));
             boxes.push_back(cv::Rect(left, top, width, height));
         }
     }
 }

 // 非极大值抑制
 std::vector<int> indices;
 cv::dnn::NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);

 // 在图像上绘制边界框和标签
 for (int idx : indices) {
     cv::Rect box = boxes[idx];
     cv::rectangle(image, box, cv::Scalar(0, 255, 0), 2);
     std::string label = cv::format("%.2f", confidences[idx]);
     cv::putText(image, label, cv::Point(box.x, box.y - 10), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 2);
 }

 // 在 ui->label_Map 上显示图像
 cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
 QPixmap pixmap = QPixmap::fromImage(QImage(image.data, image.cols, image.rows, image.step, QImage::Format_RGB888));
 ui->label_Map->setPixmap(pixmap);
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值