【opencv450-samples】面部特征检测-级联分类器

这个程序演示了如何使用Haarcascade分类器来检测图像中的人脸及其眼睛、鼻子和嘴巴。通过指定不同分类器的路径,程序可以进行特征检测。它已在日本女性面部表情(JAFFE)数据库上进行了测试,得到了相当准确的结果。

/*
 * Author: Samyak Datta (datta[dot]samyak[at]gmail.com)
 *
 * A program to detect facial feature points using
 * Haarcascade classifiers for face, eyes, nose and mouth
 *
 */

#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"

#include <iostream>
#include <cstdio>
#include <vector>
#include <algorithm>

using namespace std;
using namespace cv;

// Functions for facial feature detection
static void help(char** argv);
static void detectFaces(Mat&, vector<Rect_<int> >&, string);
static void detectEyes(Mat&, vector<Rect_<int> >&, string);
static void detectNose(Mat&, vector<Rect_<int> >&, string);
static void detectMouth(Mat&, vector<Rect_<int> >&, string);
static void detectFacialFeaures(Mat&, const vector<Rect_<int> >, string, string, string);

string input_image_path;
string face_cascade_path, eye_cascade_path, nose_cascade_path, mouth_cascade_path;

int main(int argc, char** argv)
{
    cv::CommandLineParser parser(argc, argv,
        "{eyes|haarcascades/haarcascade_eye.xml|}{nose|haarcascade_mcs_nose.xml|}{mouth|haarcascades/haarcascade_smile.xml|}{help h||}{@image|C:/Users/Zohar/Pictures/Test/xuqing.bmp|}{@facexml|haarcascades/haarcascade_frontalface_alt2.xml|}");
    if (parser.has("help"))
    {
        help(argv);
        return 0;
    }
    //获取级联分类器路径
    input_image_path = parser.get<string>("@image");
    face_cascade_path = parser.get<string>("@facexml");
    eye_cascade_path = parser.has("eyes") ? parser.get<string>("eyes") : "";
    nose_cascade_path = parser.has("nose") ? parser.get<string>("nose") : "";
    mouth_cascade_path = parser.has("mouth") ? parser.get<string>("mouth") : "";
    if (input_image_path.empty() || face_cascade_path.empty())
    {
        cout << "IMAGE or FACE_CASCADE are not specified";
        return 1;
    }
    //加载图像和级联分类器文件 Load image and cascade classifier files
    Mat image;
    image = imread(samples::findFile(input_image_path));

    //检测人脸和面部特征 Detect faces and facial features
    vector<Rect_<int> > faces;
    detectFaces(image, faces, face_cascade_path);
    detectFacialFeaures(image, faces, eye_cascade_path, nose_cascade_path, mouth_cascade_path);
    resize(image, image, Size(600, 600));
    imshow("Result", image);

    waitKey(0);
    return 0;
}

static void help(char** argv)
{
    cout << "\nThis file demonstrates facial feature points detection using Haarcascade classifiers.\n"
        "The program detects a face and eyes, nose and mouth inside the face."
        "The code has been tested on the Japanese Female Facial Expression (JAFFE) database and found"
        "to give reasonably accurate results. \n";

    cout << "\nUSAGE: " << argv[0] << " [IMAGE] [FACE_CASCADE] [OPTIONS]\n"
        "IMAGE\n\tPath to the image of a face taken as input.\n"
        "FACE_CASCSDE\n\t Path to a haarcascade classifier for face detection.\n"
        "OPTIONS: \nThere are 3 options available which are described in detail. There must be a "
        "space between the option and it's argument (All three options accept arguments).\n"
        "\t-eyes=<eyes_cascade> : Specify the haarcascade classifier for eye detection.\n"
        "\t-nose=<nose_cascade> : Specify the haarcascade classifier for nose detection.\n"
        "\t-mouth=<mouth-cascade> : Specify the haarcascade classifier for mouth detection.\n";


    cout << "EXAMPLE:\n"
        "(1) " << argv[0] << " image.jpg face.xml -eyes=eyes.xml -mouth=mouth.xml\n"
        "\tThis will detect the face, eyes and mouth in image.jpg.\n"
        "(2) " << argv[0] << " image.jpg face.xml -nose=nose.xml\n"
        "\tThis will detect the face and nose in image.jpg.\n"
        "(3) " << argv[0] << " image.jpg face.xml\n"
        "\tThis will detect only the face in image.jpg.\n";

    cout << " \n\nThe classifiers for face and eyes can be downloaded from : "
        " \nhttps://github.com/opencv/opencv/tree/master/data/haarcascades";

    cout << "\n\nThe classifiers for nose and mouth can be downloaded from : "
        " \nhttps://github.com/opencv/opencv_contrib/tree/master/modules/face/data/cascades\n";
}

static void detectFaces(Mat& img, vector<Rect_<int> >& faces, string cascade_path)
{
    CascadeClassifier face_cascade;
    face_cascade.load(samples::findFile(cascade_path));

    if (!face_cascade.empty())
        face_cascade.detectMultiScale(img, faces, 1.15, 3, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
    return;
}

static void detectFacialFeaures(Mat& img, const vector<Rect_<int> > faces, string eye_cascade,
    string nose_cascade, string mouth_cascade)
{
    for (unsigned int i = 0; i < faces.size(); ++i)
    {
        //标记包围人脸的边界框 Mark the bounding box enclosing the face
        Rect face = faces[i];
        rectangle(img, Point(face.x, face.y), Point(face.x + face.width, face.y + face.height),
            Scalar(255, 0, 0), 1, 4);

        //将在面部(感兴趣区域)内检测眼睛、鼻子和嘴巴 Eyes, nose and mouth will be detected inside the face (region of interest)
        Mat ROI = img(Rect(face.x, face.y, face.width, face.height));

        // 检查是否检测到所有特征(眼睛、鼻子和嘴巴)Check if all features (eyes, nose and mouth) are being detected
        bool is_full_detection = false;
        if ((!eye_cascade.empty()) && (!nose_cascade.empty()) && (!mouth_cascade.empty()))
            is_full_detection = true;

        //如果用户提供分类器,则检测眼睛 Detect eyes if classifier provided by the user
        if (!eye_cascade.empty())
        {
            vector<Rect_<int> > eyes;
            detectEyes(ROI, eyes, eye_cascade);

            // 标记与眼睛中心对应的点Mark points corresponding to the centre of the eyes
            for (unsigned int j = 0; j < eyes.size(); ++j)
            {
                Rect e = eyes[j];
                circle(ROI, Point(e.x + e.width / 2, e.y + e.height / 2), 3, Scalar(0, 255, 0), -1, 8);
                /* rectangle(ROI, Point(e.x, e.y), Point(e.x+e.width, e.y+e.height),
                    Scalar(0, 255, 0), 1, 4); */
            }
        }

        //如果用户提供分类器,则检测鼻子 Detect nose if classifier provided by the user
        double nose_center_height = 0.0;
        if (!nose_cascade.empty())
        {
            vector<Rect_<int> > nose;
            detectNose(ROI, nose, nose_cascade);

            // Mark points corresponding to the centre (tip) of the nose
            for (unsigned int j = 0; j < nose.size(); ++j)
            {
                Rect n = nose[j];
                circle(ROI, Point(n.x + n.width / 2, n.y + n.height / 2), 3, Scalar(0, 255, 0), -1, 8);
                nose_center_height = (n.y + n.height / 2);
            }
        }

        // 如果分类器由用户提供,则检测嘴巴 Detect mouth if classifier provided by the user
        double mouth_center_height = 0.0;
        if (!mouth_cascade.empty())
        {
            vector<Rect_<int> > mouth;
            detectMouth(ROI, mouth, mouth_cascade);

            for (unsigned int j = 0; j < mouth.size(); ++j)
            {
                Rect m = mouth[j];
                mouth_center_height = (m.y + m.height / 2);

                // The mouth should lie below the nose
                if ((is_full_detection) && (mouth_center_height > nose_center_height))
                {
                    rectangle(ROI, Point(m.x, m.y), Point(m.x + m.width, m.y + m.height), Scalar(0, 255, 0), 1, 4);
                }
                else if ((is_full_detection) && (mouth_center_height <= nose_center_height))
                    continue;
                else
                    rectangle(ROI, Point(m.x, m.y), Point(m.x + m.width, m.y + m.height), Scalar(0, 255, 0), 1, 4);
            }
        }

    }

    return;
}

static void detectEyes(Mat& img, vector<Rect_<int> >& eyes, string cascade_path)
{
    CascadeClassifier eyes_cascade;
    eyes_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));

    if (!eyes_cascade.empty())
        eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
    return;
}

static void detectNose(Mat& img, vector<Rect_<int> >& nose, string cascade_path)
{
    CascadeClassifier nose_cascade;
    // nose_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
    nose_cascade.load(cascade_path);

    if (!nose_cascade.empty())
        nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
    return;
}

static void detectMouth(Mat& img, vector<Rect_<int> >& mouth, string cascade_path)
{
    CascadeClassifier mouth_cascade;
    mouth_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));

    if (!mouth_cascade.empty())
        mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
    return;
}

原图

检测结果

 

 

利用Python结合OpenCV库训练Haar级联分类器是一个复杂的任务,特别是在针对特定场景如“车祸”识别方面。下面将为你提供详细的步骤: ### 准备工作 1. **环境搭建** 确保安装了必要的工具包,比如Python、OpenCV等。 ```bash pip install opencv-python-headless numpy ``` 2. **收集正样本和负样本** - 正样本(Positive Samples)是指包含目标物体(即车祸图像)的图片集合; - 负样本(Negative Samples)则是指不含该目标物的所有其他类型的随机背景图集。 这些数据应该尽可能多样化,并覆盖各种角度、光照条件等情况下的情况。 3. **标注数据** 对于正样本需要创建`.vec`文件用于存储每张图片的位置信息及大小描述符;而负样本只需列出路径即可无需额外处理。可以考虑使用一些开源工具帮助完成这项繁琐的工作。 4. **配置参数并生成向量文件** 通过命令行工具将准备好的jpg/png格式转换成opencv能够读取的形式——vector format(`*.vec`)。 例如,在Linux系统下你可以这样做: ```bash opencv_createsamples -info positives.txt -num <number_of_samples> -w 24 -h 24 -vec samples.vec ``` 其中 `-info` 参数指定你的正样本列表位置, `positives.txt` 文件内每一行记录着一幅带框选区域标记后的照片地址及其对应的x,y,w,h值; `-num` 后跟的是总数目,-w 和-h 分别表示窗口宽度高度. 5. **开始训练模型** 当你准备好所有必需品之后就可以启动正式培训过程啦!同样地借助于官方提供的实用程序: ```bash opencv_traincascade -data classifier -vec samples.vec -bg negatives.txt -numPos <positive_sample_count> -numNeg <negative_sample_count> -numStages <stages_number> -featureType HAAR -minHitRate 0.999 -maxFalseAlarmRate 0.5 -mode ALL -precalcValBufSize 1024 -precalcIdxBufSize 1024 -w 24 -h 24 ``` 这里的选项解释: - data : 输出目录名; - vec :输入矢量化特征点集(.vec); - bg: 包含负面示例文本文件; - numPos/numNeg 设置参与学习的数量限制; - stages_num 决定了迭代次数,默认为20足够大多数应用场合需求; - featureType 特征提取方式可以选择HAAR或其他类型像LBP等等. 6. **测试与优化** 最后一步是对所得结果进行验证,看看检测效果如何并对性能不佳之处作出相应调整直至满意为止! --- 以上就是基于Python环境下采用Opencv框架构建Crash Detection Haar Cascade Classifier的大致流程概述。请注意实际操作过程中会遇到许多细节上的差异,建议参考更多文档资料深入研究实践。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值