【opencv】示例-detect_blob.cpp

05e10ae22e0f10301f0f1ace0e13a56f.png

764203c69015800b0c0d6f58223f1868.png

6e2add132197607c548e86e92aae02c3.png

e197626577d5b4ba5e91c4bf366be8d0.png

ecc63488fee20b0fde586c7bd1f65959.png

0814de97e6dd604f81da14ff860a379f.png

d77827d8c37e379862cca7ac8b890bd5.png

e3efe6ee0ef4fd6b7b0039269e4dc7bf.png

// 导入所需的OpenCV头文件
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/features2d.hpp>
// 导入向量和映射容器
#include <vector>
#include <map>
// 导入输入输出流库
#include <iostream>


// 使用标准命名空间和OpenCV命名空间,避免在使用这些命名空间下的类型和函数时反复输入std::和cv::
using namespace std;
using namespace cv;




// 帮助函数,用于提供程序的使用说明
static void help(char** argv)
{
    cout << "\n This program demonstrates how to use BLOB to detect and filter region \n"
         << "Usage: \n"
         << argv[0]
         << " <image1(detect_blob.png as default)>\n"
         << "Press a key when image window is active to change descriptor";
}


// 函数Legende用于根据SimpleBlobDetector的参数pAct生成不同Blob检测条件的文字描述
static String Legende(SimpleBlobDetector::Params &pAct)
{
    // 创建一个空字符串s,用于存放最终生成的描述文字
    String s = "";
    // 如果启用了面积过滤器filterByArea
    if (pAct.filterByArea)
    {
        // 将最小面积minArea和最大面积maxArea转换成字符串表示,并追加到s中
        String inf = static_cast<const ostringstream&>(ostringstream() << pAct.minArea).str();
        String sup = static_cast<const ostringstream&>(ostringstream() << pAct.maxArea).str();
        s = " Area range [" + inf + " to  " + sup + "]";
    }
    // 如果启用了圆度过滤器filterByCircularity
    if (pAct.filterByCircularity)
    {
        // 将最小圆度minCircularity和最大圆度maxCircularity转换成字符串表示,并追加到s中
        String inf = static_cast<const ostringstream&>(ostringstream() << pAct.minCircularity).str();
        String sup = static_cast<const ostringstream&>(ostringstream() << pAct.maxCircularity).str();
        // 判断之前的描述文字s是否为空,如果为空则直接赋值,不为空则添加"AND"进行连接
        if (s.length() == 0)
            s = " Circularity range [" + inf + " to  " + sup + "]";
        else
            s += " AND Circularity range [" + inf + " to  " + sup + "]";
    }
    // 如果启用了颜色过滤器filterByColor
    if (pAct.filterByColor)
    {
        // 将Blob的颜色blobColor转换为整数并转换成字符串表示,然后追加到s中
        String inf = static_cast<const ostringstream&>(ostringstream() << (int)pAct.blobColor).str();
        // 判断之前的描述文字s是否为空,如果为空则直接赋值,不为空则添加"AND"进行连接
        if (s.length() == 0)
            s = " Blob color " + inf;
        else
            s += " AND Blob color " + inf;
    }
    // 如果启用了凸度过滤器filterByConvexity
    if (pAct.filterByConvexity)
    {
        // 将最小凸度minConvexity和最大凸度maxConvexity转换成字符串表示,并追加到s中
        String inf = static_cast<const ostringstream&>(ostringstream() << pAct.minConvexity).str();
        String sup = static_cast<const ostringstream&>(ostringstream() << pAct.maxConvexity).str();
        // 判断之前的描述文字s是否为空,如果为空则直接赋值,不为空则添加"AND"进行连接
        if (s.length() == 0)
            s = " Convexity range[" + inf + " to  " + sup + "]";
        else
            s += " AND  Convexity range[" + inf + " to  " + sup + "]";
    }
    // 如果启用了惯性比过滤器filterByInertia
    if (pAct.filterByInertia)
    {
        // 将最小惯性比minInertiaRatio和最大惯性比maxInertiaRatio转换成字符串表示,并追加到s中
        String inf = static_cast<const ostringstream&>(ostringstream() << pAct.minInertiaRatio).str();
        String sup = static_cast<const ostringstream&>(ostringstream() << pAct.maxInertiaRatio).str();
        // 判断之前的描述文字s是否为空,如果为空则直接赋值,不为空则添加"AND"进行连接
        if (s.length() == 0)
            s = " Inertia ratio range [" + inf + " to  " + sup + "]";
        else
            s += " AND  Inertia ratio range [" + inf + " to  " + sup + "]";
    }
    // 返回最终生成的Blob检测条件描述文字
    return s;
}






// 主函数
int main(int argc, char *argv[])
{
    // 用于存储读取的文件名
    String fileName;
    // 创建命令行解析器,用于处理通过命令行传入的参数
    cv::CommandLineParser parser(argc, argv, "{@input |detect_blob.png| }{h help | | }");
    // 如果有"-h"或"--help"参数,显示帮助信息后结束程序
    if (parser.has("h"))
    {
        help(argv);
        return 0;
    }
    // 如果没有提供输入文件名参数,则使用默认的"detect_blob.png"
    fileName = parser.get<string>("@input");
    // 读取并存储图像
    Mat img = imread(samples::findFile(fileName), IMREAD_COLOR);
    // 如果读取失败或图像为空,则输出错误信息并结束程序
    if (img.empty())
    {
        cout << "Image " << fileName << " is empty or cannot be found\n";
        return 1;
    }


    // 初始化SimpleBlobDetector的默认参数
    SimpleBlobDetector::Params pDefaultBLOB;
    // 设置默认的BLOB检测器的参数
    // 设置SimpleBlobDetector的阈值步长
    pDefaultBLOB.thresholdStep = 10;
    // 设置SimpleBlobDetector的最小阈值
    pDefaultBLOB.minThreshold = 10;
    // 设置SimpleBlobDetector的最大阈值
    pDefaultBLOB.maxThreshold = 220;
    // 设置SimpleBlobDetector的最小重复性
    pDefaultBLOB.minRepeatability = 2;
    // 设置SimpleBlobDetector的BLOB之间的最小距离
    pDefaultBLOB.minDistBetweenBlobs = 10;
    pDefaultBLOB.filterByColor = false; // 不按颜色过滤
    pDefaultBLOB.blobColor = 0; // BLOB的默认颜色
    pDefaultBLOB.filterByArea = false; // 不按区域大小过滤
    pDefaultBLOB.minArea = 25; // 最小区域大小
    pDefaultBLOB.maxArea = 5000; // 最大区域大小
    pDefaultBLOB.filterByCircularity = false; // 不按圆度过滤
    pDefaultBLOB.minCircularity = 0.9f; // 最小圆度
    pDefaultBLOB.maxCircularity = (float)1e37; // 设置一个非常大的数,代表无上限
    pDefaultBLOB.filterByInertia = false; // 不按惯性比过滤
    pDefaultBLOB.minInertiaRatio = 0.1f; // 最小惯性比
    pDefaultBLOB.maxInertiaRatio = (float)1e37; // 设置一个非常大的数,代表无上限
    pDefaultBLOB.filterByConvexity = false; // 不按凸度过滤
    pDefaultBLOB.minConvexity = 0.95f; // 最小凸度
    pDefaultBLOB.maxConvexity = (float)1e37; // 设置一个非常大的数,代表无上限


    // 存储BLOB类型描述符的字符串向量
    vector<String> typeDesc;
    // 存储不同BLOB参数的向量
    vector<SimpleBlobDetector::Params> pBLOB;
    // BLOB参数向量的迭代器
    vector<SimpleBlobDetector::Params>::iterator itBLOB;
    // 初始化一个颜色调色板,用于给不同的BLOB着色
    vector< Vec3b >  palette;
    // 随机生成调色板中的颜色
    for (int i = 0; i<65536; i++)
    {
        uchar c1 = (uchar)rand();
        uchar c2 = (uchar)rand();
        uchar c3 = (uchar)rand();
        palette.push_back(Vec3b(c1, c2, c3));
    }
    // 调用help函数显示帮助信息
    help(argv);


    // 下面代码将创建不同参数的BLOB检测器,并显示它们的结果
    // 配置六种不同参数的BLOB检测器
    // 例如,第一个检测器我们要检测所有BLOB
    // 对每种类型描述符进行初始化,然后按不同的过滤条件修改参数


    // 将"BLOB"类型推入描述符类型向量
    typeDesc.push_back("BLOB");    // 参见OpenCV官方文档SimpleBlobDetector类的描述
    pBLOB.push_back(pDefaultBLOB); // 将默认BLOB参数推入参数向量
    pBLOB.back().filterByArea = true; // 启用面积过滤
    pBLOB.back().minArea = 1; // 设置筛选的最小面积
    pBLOB.back().maxArea = float(img.rows * img.cols); // 设置筛选的最大面积为图像的总面积
    
    // 第二个BLOB检测器的参数设置:要求检测面积在500到2900像素之间的区域
    typeDesc.push_back("BLOB"); // 类型描述符追加"BLOB"
    pBLOB.push_back(pDefaultBLOB); // 使用默认参数作为基础
    pBLOB.back().filterByArea = true; // 启用面积过滤
    pBLOB.back().minArea = 500; // 设置最小面积为500像素
    pBLOB.back().maxArea = 2900; // 设置最大面积为2900像素
    
    // 第三个BLOB检测器的参数设置:仅检测圆形物体
    typeDesc.push_back("BLOB"); // 类型描述符追加"BLOB"
    pBLOB.push_back(pDefaultBLOB); // 使用默认参数作为基础
    pBLOB.back().filterByCircularity = true; // 启用圆度过滤
    
    // 第四个BLOB检测器的参数设置:根据惯性比进行筛选
    typeDesc.push_back("BLOB"); // 类型描述符追加"BLOB"
    pBLOB.push_back(pDefaultBLOB); // 使用默认参数作为基础
    pBLOB.back().filterByInertia = true; // 启用惯性比过滤
    pBLOB.back().minInertiaRatio = 0; // 设置最小惯性比为0
    pBLOB.back().maxInertiaRatio = (float)0.2; // 设置最大惯性比为0.2
    
    // 第五个BLOB检测器的参数设置:根据凸度进行筛选
    typeDesc.push_back("BLOB"); // 类型描述符追加"BLOB"
    pBLOB.push_back(pDefaultBLOB); // 使用默认参数作为基础
    pBLOB.back().filterByConvexity = true; // 启用凸度过滤
    pBLOB.back().minConvexity = 0.; // 设置最小凸度为0
    pBLOB.back().maxConvexity = (float)0.9; // 设置最大凸度为0.9
    
    // 第六个BLOB检测器的参数设置:检测重心颜色为0的BLOB
    typeDesc.push_back("BLOB"); // 类型描述符追加"BLOB"
    pBLOB.push_back(pDefaultBLOB); // 使用默认参数作为基础
    pBLOB.back().filterByColor = true; // 启用颜色过滤
    pBLOB.back().blobColor = 0; // 设置筛选的BLOB颜色为0


    // 迭代器指向BLOB参数向量的起始位置
    itBLOB = pBLOB.begin();


    // 存储比较结果的向量
    vector<double> desMethCmp;
    // 创建Feature2D的智能指针,用于后续的特征检测
    Ptr<Feature2D> b;
    // 用于存储文本标签
    String label;
    // 循环遍历所有类型描述符
    vector<String>::iterator itDesc;
    for (itDesc = typeDesc.begin(); itDesc != typeDesc.end(); ++itDesc)
    {
        // 存储检测到的关键点
        vector<KeyPoint> keyImg1;
        // 对于BLOB类型描述符
        if (*itDesc == "BLOB")
        {
            b = SimpleBlobDetector::create(*itBLOB); // 创建BLOB检测器
            label = Legende(*itBLOB); // 生成描述字符串
            ++itBLOB; // 移动到下一个参数集
        }
        // 错误处理
        try
        {
            // 存储检测到的关键点
            vector<KeyPoint>  keyImg;
            vector<Rect>  zone;
            vector<vector <Point> >  region;
            // 创建用于描述的矩阵和结果显示的图像
            Mat     desc, result(img.rows, img.cols, CV_8UC3);
            // 如果是SimpleBlobDetector
            if (b.dynamicCast<SimpleBlobDetector>().get())
            {
                // 动态转换为SimpleBlobDetector
                Ptr<SimpleBlobDetector> sbd = b.dynamicCast<SimpleBlobDetector>();
                // 使用SimpleBlobDetector检测关键点
                sbd->detect(img, keyImg, Mat());
                // 绘制检测到的关键点
                drawKeypoints(img, keyImg, result);
                // 遍历关键点,并在结果图中用圆圈表示
                int i = 0;
                for (vector<KeyPoint>::iterator k = keyImg.begin(); k != keyImg.end(); ++k, ++i)
                    circle(result, k->pt, (int)k->size, palette[i % 65536]);
            }
            // 创建窗口显示结果
            namedWindow(*itDesc + label, WINDOW_AUTOSIZE);
            imshow(*itDesc + label, result);
            // 显示原始图像
            imshow("Original", img);
            // 等待用户响应
            waitKey();
        }
        catch (const Exception& e)
        {
            // 如果发生错误,则打印错误信息
            cout << "Feature : " << *itDesc << "\n";
            cout << e.msg << endl;
        }
    }
    // 程序正常退出
    return 0;
}

这段代码是使用OpenCV库编写的C++源码,用于演示如何通过SimpleBlobDetector类检测图像中的BLOB(Binary Large Object,二进制大对象),并根据不同参数过滤和显示检测到的区域。BLOB主要用于分割图像中具有不同特性(如面积、颜色、凸性等)的连续区域。代码中包括Blob检测参数的配置、随机颜色调色板的生成、关键点检测、过滤条件的文字描述生成、图像的显示以及异常处理。通过更改SimpleBlobDetector的参数,用户可以筛选满足特定条件的图像区域,比如特定大小、形状或颜色的物体。

b.dynamicCast<SimpleBlobDetector>().get()

0b3d47b0f59195b7de388f2629a8a3f1.png

E:\anaconda\python.exe F:\ToDesk\PythonProject3\1.py [ERROR:0@0.705] global D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\onnx\onnx_importer.cpp (2516) cv::dnn::dnn4_v20211220::ONNXImporter::parseShape DNN/ONNX(Shape): dynamic 'zero' shapes are not supported, input 326 [ 0 255 0 0 ] [ERROR:0@0.705] global D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\onnx\onnx_importer.cpp (2516) cv::dnn::dnn4_v20211220::ONNXImporter::parseShape DNN/ONNX(Shape): dynamic 'zero' shapes are not supported, input 326 [ 0 255 0 0 ] [ERROR:0@0.705] global D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\onnx\onnx_importer.cpp (2516) cv::dnn::dnn4_v20211220::ONNXImporter::parseShape DNN/ONNX(Shape): dynamic 'zero' shapes are not supported, input 326 [ 0 255 0 0 ] [ERROR:0@0.706] global D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\onnx\onnx_importer.cpp (909) cv::dnn::dnn4_v20211220::ONNXImporter::handleNode DNN/ONNX: ERROR during processing node with 3 inputs and 1 outputs: [Range]:(354) from domain='ai.onnx' Traceback (most recent call last): File "F:\ToDesk\PythonProject3\1.py", line 190, in <module> process_video(video_source) File "F:\ToDesk\PythonProject3\1.py", line 107, in process_video detector = VehicleDetector() ^^^^^^^^^^^^^^^^^ File "F:\ToDesk\PythonProject3\1.py", line 22, in __init__ self.net = cv2.dnn.readNetFromONNX(model_path) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cv2.error: OpenCV(4.5.5) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\onnx\onnx_importer.cpp:928: error: (-2:Unspecified error) in function 'cv::dnn::dnn4_v20211220::ONNXImporter::handleNode' > Node [Range@ai.onnx]:(354) parse error: OpenCV(4.5.5) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\dnn.cpp:621: error: (-2:Unspecified error) Can't create layer "354" of type "Range" in function 'cv::dnn::dnn4_v20211220::LayerData::getLayerInstance' > 进程已结束,退出代码为 1
07-06
#include"FindGrab.h" #include <algorithm> #include <execution> #include"Vector.h" #include "Initialization.h" check_Point::check_Point(const Config& config) :confidenceThreshold(config.confidenceThreshold), nmsThreshold(config.nmsThreshold), offsetThreshold(config.offsetThreshold), SizeImage(config.SizeImage) { std::cout << "配置参数 - SizeImage: " << SizeImage << ", Confidence: " << confidenceThreshold << ", NMS: " << nmsThreshold << std::endl; /* net = cv::dnn::readNetFromDarknet(config.cfgPath, config.weightsPath); net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA); net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);*/ //std::cout << cv::getBuildInformation() << std::endl; std::cout << "模型路径: [" << config.modelPath << "]" << std::endl; // ✅ 打印路径 net = cv::dnn::readNetFromONNX(config.modelPath); net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA); net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); } auto check_Point::point_dectect(const PointCloud& cloud, Three_Views_Group Type)->resultData { resultData final_data; auto&& [results, rectZ, rectF, rectC, XYZ_Min] = pclToImage(cloud, Type); rects Z, F, C; time_t now = time(0); char filename[80]; strftime(filename, sizeof(filename), "img\\%Y-%m-%d_%H-%M-%S.jpg", localtime(&now)); cv::imwrite(filename, results); std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); auto rectGroup = detect(results, net); // »ñÈ¡·½·¨Ö´ÐкóµÄʱ¼äµã std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); std::chrono::duration<double> duration = end - start; double elapsed_seconds = duration.count() * 1000; // Êä³öºÄʱ std::cout << " time(ms)= " << elapsed_seconds << std::endl; cv::Mat test; results.copyTo(test); for (auto& rect : rectGroup) { cv::rectangle(test, rect, cv::Scalar(0, 255, 0), 4); } char filename1[80]; strftime(filename1, sizeof(filename1), "img1\\%Y-%m-%d_%H-%M-%S.jpg", localtime(&now)); cv::imwrite(filename1, test); for (auto& rect : rectGroup) { if (rect.x < rectF.x) { Z.push_back(rect); } else if (rect.x < rectC.x) { rect.x -= rectF.x; F.push_back(rect); } else { rect.x -= rectC.x; C.push_back(rect); } } for (auto& Z_data : Z) { for (auto& F_data : F) { if (std::abs(Z_data.x - F_data.x) < offsetThreshold && std::abs(Z_data.width - F_data.width) < offsetThreshold) { Vector3D XYZ; XYZ.x = Z_data.x / MAG + XYZ_Min.x; XYZ.y = F_data.y / MAG + XYZ_Min.y; XYZ.z = Z_data.y / MAG + XYZ_Min.z; Vector3D LWH; LWH.x = Z_data.width / MAG; LWH.y = F_data.height / MAG; LWH.z = Z_data.height / MAG; final_data.push_back(std::make_tuple(XYZ, LWH)); } } } for (auto& Z_data : Z) { for (auto& C_data : C) { if (std::abs(Z_data.y - C_data.y) < offsetThreshold && std::abs(Z_data.height - C_data.height) < offsetThreshold) { Vector3D XYZ; XYZ.x = Z_data.x / MAG + XYZ_Min.x; XYZ.y = C_data.x / MAG + XYZ_Min.y; XYZ.z = Z_data.y / MAG + XYZ_Min.z; Vector3D LWH; LWH.x = Z_data.width / MAG; LWH.y = C_data.width / MAG; LWH.z = Z_data.height / MAG; final_data.push_back(std::make_tuple(XYZ, LWH)); } } } for (auto& C_data : C) { for (auto& F_data : F) { if (std::abs(C_data.x - F_data.y) < offsetThreshold && std::abs(C_data.width - F_data.height) < offsetThreshold) { Vector3D XYZ; XYZ.x = F_data.x / MAG + XYZ_Min.x; XYZ.y = F_data.y / MAG + XYZ_Min.y; XYZ.z = C_data.y / MAG + XYZ_Min.z; Vector3D LWH; LWH.x = F_data.width / MAG; LWH.y = F_data.height / MAG; LWH.z = C_data.height / MAG; final_data.push_back(std::make_tuple(XYZ, LWH)); } } } return final_data; } auto check_Point::pclToImage(const PointCloud& cloud, Three_Views_Group type, uint8_t channels)->pclToImageData { XYZRGB max, min; pcl::getMinMax3D<XYZRGB>(cloud, min, max); min.x = configParam.lidarParam.cloudXMin; max.x = configParam.lidarParam.cloudXMax; min.y = configParam.lidarParam.cloudYMin; max.y = configParam.lidarParam.cloudYMax; min.z = configParam.lidarParam.cloudZMin; max.z = configParam.lidarParam.cloudZMax; Vector3D pointMax(max.x, max.y, max.z), pointMin(min.x, min.y, min.z); auto XYZLength = (pointMax - pointMin) * 10 + Vector3D(1, 1, 1); auto XYZLength_channels = XYZLength / channels; cv::Rect rectZ(0, 0, XYZLength.x, XYZLength.z); cv::Rect rectF(rectZ.width, 0, XYZLength.x, XYZLength.y); cv::Rect rectC(rectF.x + rectF.width, 0, XYZLength.y, XYZLength.z); auto ZFC_Zero = cv::Mat(std::max(rectZ.height, rectF.height), rectC.x + rectC.width, CV_8UC3, cv::Scalar(255, 255, 255)); for (auto&& point : cloud) { Vector3D temp(point.x, point.y, point.z); if (temp.x > max.x || temp.y > max.y || temp.z > max.z || temp.x < min.x || temp.y < min.y || temp.z < min.z) { continue; } temp -= pointMin; temp *= 10; auto&& Z_Layer = static_cast<int>(temp.y / XYZLength_channels.y); auto&& F_Layer = static_cast<int>(temp.z / XYZLength_channels.z); auto&& C_Layer = static_cast<int>(temp.x / XYZLength_channels.x); auto& Z_DATA = ZFC_Zero.at<cv::Vec3b>(temp.z, temp.x)[Z_Layer]; auto& F_DATA = ZFC_Zero.at<cv::Vec3b>(temp.y, temp.x + rectF.x)[F_Layer]; auto& C_DATA = ZFC_Zero.at<cv::Vec3b>(temp.z, temp.y + rectC.x)[C_Layer]; if (type != Three_Views_Group::FC) { Z_DATA = temp.y - Z_Layer * XYZLength_channels.y; } if (type != Three_Views_Group::ZC) { F_DATA = temp.z - F_Layer * XYZLength_channels.z; } if (type != Three_Views_Group::ZF) { C_DATA = temp.x - C_Layer * XYZLength_channels.x; } }; return { ZFC_Zero, rectZ, rectF, rectC, min }; } #if 0 auto check_Point::detect(const cv::Mat& image, cv::dnn::Net& net)->rects { //cv::resize(image, resizedImage, cv::Size(SizeImage, SizeImage)); //cv::Mat blob = cv::dnn::blobFromImage(resizedImage, 1.0 / 255.0, cv::Size(SizeImage, SizeImage), cv::Scalar(), true, false); cv::Mat blob = cv::dnn::blobFromImage(image, 1.0 / 255.0, cv::Size(SizeImage, SizeImage), cv::Scalar(), true, false); net.setInput(blob); mats outs; std::vector<std::string> outNames = net.getUnconnectedOutLayersNames(); net.forward(outs, outNames); std::vector<int> classIds; std::vector<float> confidences; rects detections; for (const auto& out : outs) { float* data = (float*)out.data; for (int j = 0; j < out.rows; ++j, data += out.cols) { cv::Mat scores = out.row(j).colRange(5, out.cols); cv::Point classIdPoint; double confidence; cv::minMaxLoc(scores, nullptr, &confidence, nullptr, &classIdPoint); if (confidence > confidenceThreshold) { int centerX = (int)(data[0] * image.cols); int centerY = (int)(data[1] * image.rows); int width = (int)(data[2] * image.cols); int height = (int)(data[3] * image.rows); int left = centerX - width / 2; int top = centerY - height / 2; classIds.push_back(classIdPoint.x); confidences.push_back((float)confidence); detections.push_back(cv::Rect(left, top, width, height)); } } } cv::dnn::NMSBoxes(detections, confidences, confidenceThreshold, nmsThreshold, indices); rects boxes; for (int idx : indices) { boxes.push_back(detections[idx]); } return boxes; } #else auto check_Point::detect(const cv::Mat& image, cv::dnn::Net& net)->rects { auto lenth = std::max(image.cols, image.rows); cv::Mat input = cv::Mat::zeros(lenth, lenth, CV_8UC3); image.copyTo(input(cv::Rect(0, 0, image.cols, image.rows))); auto widthScalar = (float)input.cols / (float)SizeImage; auto heightScalar = (float)input.rows / (float)SizeImage; cv::Mat blob = cv::dnn::blobFromImage(input, 1.0 / 255.0, cv::Size(SizeImage, SizeImage), cv::Scalar(), true, false); net.setInput(blob); mats outputs; std::vector<std::string> outNames = net.getUnconnectedOutLayersNames(); net.forward(outputs, outNames); Vector<int> classIds; Vector<float> confidences; rects detections; for (const auto& out : outputs) { for (int j = 0; j < out.size[1]; ++j) { cv::Mat scores = (cv::Mat_<float>(1, 1) << out.at<float>(0, j, 4)); cv::Point classIdPoint; double confidence; cv::minMaxLoc(scores, nullptr, &confidence, nullptr, &classIdPoint); if (confidence > confidenceThreshold) { auto centerX = out.at<float>(0, j, 0) * widthScalar; auto centerY = out.at<float>(0, j, 1) * heightScalar; auto width = out.at<float>(0, j, 2) * widthScalar; auto height = out.at<float>(0, j, 3) * heightScalar; //classIds .push_back (classIdPoint.x); confidences.push_back((float)confidence); detections.push_back(cv::Rect(centerX - width / 2, centerY - height / 2, width, height)); } } } cv::dnn::NMSBoxes(detections, confidences, confidenceThreshold, nmsThreshold, indices); rects boxes; for (int idx : indices) { boxes.push_back(detections[idx]); } return boxes; } #endif auto check_Point::addMatVector(const mats& data) ->cv::Mat { if (!data.size()) { std::cout << "图片容器是空的!!!" << std::endl; return cv::Mat(); } cv::Mat dst; mats Temp; mats channelsOut; Temp.push_back(cv::Mat::zeros(data.back().size(), data.back().type())); Temp.push_back(cv::Mat::zeros(data.back().size(), data.back().type())); Temp.push_back(cv::Mat::zeros(data.back().size(), data.back().type())); for (auto& mat : data) { int i = 0; switch (i % 3) { case 0:Temp[0] += mat; break; case 1:Temp[1] += mat; break; case 2:Temp[2] += mat; break; } i++; } for (auto&& image : Temp) { cv::Mat out; cv::normalize(image, out, 0, 255, cv::NORM_MINMAX, CV_8UC1); // equalizeHist(out, out); channelsOut.push_back(out); } merge(channelsOut, dst); return dst; } check_Grabs::check_Grabs(const check_Point::Config& config) :check_Point(config) { } auto check_Grabs::find_Grabs(const PointCloud& Point, const Vector3D& grabs_Point) -> resultData { auto targes = this->point_dectect(Point, check_Point::Three_Views_Group::FC); Vector3D resultXYZ = {}; Vector3D resultLWH = {}; for (auto& [XYZ, LWH] : targes) { //if (std::abs(0 - XYZ.x - LWH.x / 2) < 15 && std::abs(0 - XYZ.y - LWH.y / 2) < 15 && std::abs(0- XYZ.z) < 15) { if (XYZ.x != 0 && XYZ.y != 0 && XYZ.z != 0 && LWH.x != 0 && LWH.y != 0 && LWH.z != 0) { resultXYZ = XYZ; resultLWH = LWH; break; } } return { resultXYZ,resultLWH }; } 我应该怎么测试
11-06
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值