opencv前后景 分离

本文介绍了一种基于高斯混合模型(GMM)的目标检测与跟踪算法,详细阐述了算法的实现过程,包括模型初始化、参数更新、背景建模及前景提取等关键步骤。通过对连续视频帧的处理,有效实现了运动目标的检测。

主要利用到了高斯 闭合操作 根据特征点来分辨, 

#ifndef GETFOOTIMG_H
#define GETFOOTIMG_H
#include<QThread>
#include<QObject>
#include<opencv2/opencv.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/nonfree/nonfree.hpp>
#include<opencv2/legacy/legacy.hpp>
using namespace cv;
using namespace std;

//定义gmm模型用到的变量
 #define GMM_MAX_COMPONT 6			//每个GMM最多的高斯模型个数
 #define GMM_LEARN_ALPHA 0.005
 #define GMM_THRESHOD_SUMW 0.7
 #define TRAIN_FRAMES 60	// 对前 TRAIN_FRAMES 帧建模

struct MapInfo
{
    int x;
    int y;
    bool letf;
    bool right;
    bool positive;

};
class GetFootImg : public QThread
{

    Q_OBJECT
public:
    explicit GetFootImg(QObject *parent = nullptr);
    ~GetFootImg();
    MapInfo footInfo;
    void init(const Mat _image);
    void processFirstFrame(const Mat _image);
    void trainGMM(const Mat _image);
    void getFitNum(const Mat _image);
    void testGMM(const Mat _image);


    Mat getMask(void)
    {
        return m_mask;
    }
private:
    Mat m_weight[GMM_MAX_COMPONT];  //权值
    Mat m_mean[GMM_MAX_COMPONT];    //均值
    Mat m_sigma[GMM_MAX_COMPONT];   //方差

    Mat m_mask;
    Mat m_fit_num;
signals:
    //void push_img(QMap<QString,QImage>);
public slots:
protected:
    void run();
};

#endif // GETFOOTIMG_H
#include "getfootimg.h"

GetFootImg::GetFootImg(QObject *parent) : QThread(parent)
{
    footInfo.x=0;
    footInfo.y=0;
    footInfo.positive=false;
    footInfo.letf=false;
    footInfo.right=false;
}

GetFootImg::~GetFootImg()
{

}

// 全部初始化为0
void GetFootImg::init(const Mat _image)
{
    /****initialization the three parameters ****/
     for(int i = 0; i < GMM_MAX_COMPONT; i++)
     {
         m_weight[i] = Mat::zeros(_image.size(), CV_32FC1);
         m_mean[i] = Mat::zeros(_image.size(), CV_8UC1);
         m_sigma[i] = Mat::zeros(_image.size(), CV_32FC1);
     }
     m_mask = Mat::zeros(_image.size(),CV_8UC1);
     m_fit_num = Mat::ones(_image.size(),CV_8UC1);
}

//gmm第一帧初始化函数实现
//捕获到第一帧时对高斯分布进行初始化.主要包括对每个高斯分布的权值、期望和方差赋初值.
//其中第一个高斯分布的权值为1,期望为第一个像素数据.其余高斯分布权值为0,期望为0.
//每个高斯分布都被赋予适当的相等的初始方差 15
void GetFootImg::processFirstFrame(const Mat _image)
{
    for(int i = 0; i < GMM_MAX_COMPONT; i++)
    {
        if (i == 0)
        {
            m_weight[i].setTo(1.0);
            _image.copyTo(m_mean[i]);
            m_sigma[i].setTo(15.0);
        }
        else
        {
            m_weight[i].setTo(0.0);
            m_mean[i].setTo(0);
            m_sigma[i].setTo(15.0);
        }
    }
}

// 通过新的帧来训练GMM
void GetFootImg::trainGMM(const Mat _image)
{
    for(int i = 0; i < _image.rows; i++)
    {
        for(int j = 0; j < _image.cols; j++)
        {
             int num_fit = 0;

             /**************************** Update parameters Start ******************************************/
             for(int k = 0 ; k < GMM_MAX_COMPONT; k++)
             {
                 int delm = abs(_image.at<uchar>(i, j) - m_mean[k].at<uchar>(i, j));
                 long dist = delm * delm;
                 // 判断是否匹配:采样值与高斯分布的均值的距离小于3倍方差(表示匹配)
                 if( dist < 3.0 * m_sigma[k].at<float>(i, j))
                 {
                     // 如果匹配
                     /****update the weight****/
                     m_weight[k].at<float>(i, j) += GMM_LEARN_ALPHA * (1 - m_weight[k].at<float>(i, j));

                     /****update the average****/
                     m_mean[k].at<uchar>(i, j) += (GMM_LEARN_ALPHA / m_weight[k].at<uchar>(i, j)) * delm;

                     /****update the variance****/
                     m_sigma[k].at<float>(i, j) += (GMM_LEARN_ALPHA / m_weight[k].at<float>(i, j)) * (dist - m_sigma[k].at<float>(i, j));
                 }
                 else
                 {
                    // 如果不匹配。则该该高斯模型的权值变小
                     m_weight[k].at<float>(i, j) += GMM_LEARN_ALPHA * (0 - m_weight[k].at<float>(i, j));
                     num_fit++; // 不匹配的模型个数
                 }
             }
             /**************************** Update parameters End ******************************************/


             /*********************** Sort Gaussian component by 'weight / sigma' Start ****************************/
             //对gmm各个高斯进行排序,从大到小排序,排序依据为 weight / sigma
             for(int kk = 0; kk < GMM_MAX_COMPONT; kk++)
             {
                 for(int rr=kk; rr< GMM_MAX_COMPONT; rr++)
                 {
                     if(m_weight[rr].at<float>(i, j)/m_sigma[rr].at<float>(i, j) > m_weight[kk].at<float>(i, j)/m_sigma[kk].at<float>(i, j))
                     {
                         //权值交换
                         float temp_weight = m_weight[rr].at<float>(i, j);
                         m_weight[rr].at<float>(i, j) = m_weight[kk].at<float>(i, j);
                         m_weight[kk].at<float>(i, j) = temp_weight;

                         //均值交换
                         uchar temp_mean = m_mean[rr].at<uchar>(i, j);
                         m_mean[rr].at<uchar>(i, j) = m_mean[kk].at<uchar>(i, j);
                         m_mean[kk].at<uchar>(i, j) = temp_mean;

                         //方差交换
                         float temp_sigma = m_sigma[rr].at<float>(i, j);
                         m_sigma[rr].at<float>(i, j) = m_sigma[kk].at<float>(i, j);
                         m_sigma[kk].at<float>(i, j) = temp_sigma;
                     }
                 }
             }
             /*********************** Sort Gaussian model by 'weight / sigma' End ****************************/


             /*********************** Create new Gaussian component Start ****************************/
             if(num_fit == GMM_MAX_COMPONT && 0 == m_weight[GMM_MAX_COMPONT - 1].at<float>(i, j))
             {
                 //if there is no exit component fit,then start a new component
                 //当有新值出现的时候,若目前分布个数小于M,新添一个分布,以新采样值作为均值,并赋予较大方差和较小权值
                  for(int k = 0 ; k < GMM_MAX_COMPONT; k++)
                 {
                     if(0 == m_weight[k].at<float>(i, j))
                     {
                         m_weight[k].at<float>(i, j) = GMM_LEARN_ALPHA;
                         m_mean[k].at<uchar>(i, j) = _image.at<uchar>(i, j);
                         m_sigma[k].at<float>(i, j) = 15.0;

                         //normalization the weight,let they sum to 1
                         for(int q = 0; q < GMM_MAX_COMPONT && q != k; q++)
                         {
                            //对其他的高斯模型的权值进行更新,保持权值和为1
                             /****update the other unfit's weight,u and sigma remain unchanged****/
                            // m_weight[q].at<float>(i, j) *= (1 - GMM_LEARN_ALPHA);

                             m_weight[q].at<float>(i, j) *= ((1 - GMM_LEARN_ALPHA) / 1 - m_weight[k].at<float>(i, j) ) ;

                         }
                         break; //找到第一个权值不为0的即可
                      }
                  }
             }
             else if(num_fit == GMM_MAX_COMPONT && m_weight[GMM_MAX_COMPONT -1].at<float>(i, j) != 0)
             {
                 //如果GMM_MAX_COMPONT都曾经赋值过,则用新来的高斯代替权值最弱的高斯,权值不变,只更新均值和方差
                 m_mean[GMM_MAX_COMPONT-1].at<uchar>(i, j) = _image.at<uchar>(i, j);
                 m_sigma[GMM_MAX_COMPONT-1].at<float>(i, j) = 15.0;
             }
             /*********************** Create new Gaussian component End ****************************/
         }
    }
}

 //对输入图像每个像素gmm选择合适的高斯分量个数
 //排序后最有可能是背景分布的排在最前面,较小可能的短暂的分布趋向于末端.我们将排序后的前fit_num个分布选为背景模型;
 //在排过序的分布中,累积概率超过GMM_THRESHOD_SUMW的前fit_num个分布被当作背景模型,剩余的其它分布被当作前景模型.
void GetFootImg::getFitNum(const Mat _image)
{
    for(int i = 0; i < _image.rows; i++)
    {
        for(int j = 0; j < _image.cols; j++)
        {
            float sum_w = 0.0;	//重新赋值为0,给下一个像素做累积
            for(uchar k = 0; k < GMM_MAX_COMPONT; k++)
            {
                sum_w += m_weight[k].at<float>(i, j);
                if(sum_w >= GMM_THRESHOD_SUMW)	//如果这里THRESHOD_SUMW=0.6的话,那么得到的高斯数目都为1,因为每个像素都有一个权值接近1
                {
                     m_fit_num.at<uchar>(i, j) = k + 1;
                     break;
                }
            }
        }
    }
}

 //gmm测试函数的实现
void GetFootImg::testGMM(const Mat _image)
{
    for(int i = 0; i < _image.rows; i++)
    {
        for(int j = 0; j < _image.cols; j++)
        {
            int k = 0;
            for( ; k < m_fit_num.at<uchar>(i, j); k++)
            {
                if(abs(_image.at<uchar>(i, j) - m_mean[k].at<uchar>(i, j)) < (uchar)( 2.5 * m_sigma[k].at<float>(i, j)))
                {
                    m_mask.at<uchar>(i, j) = 0;
                    break;
                }
            }
            if(k == m_fit_num.at<uchar>(i, j))
            {
                m_mask.at<uchar>(i, j) = 255;
            }
        }
    }
}
void GetFootImg::run()
{
    Mat frame, gray, mask;
    VideoCapture capture;
    capture.open(0);

    if (!capture.isOpened())
    {
        cout<<"No camera or video input!\n"<<endl;
        return ;
    }

  //  MOG_BGS Mog_Bgs;
    int count = 0;

    while (1)
    {
        count++;
        capture >> frame;
        if (frame.empty())
            break;

        Mat TempImg_Save=frame.clone();
        cvtColor(frame, gray, CV_RGB2GRAY);

        if (count == 1)
        {
            init(gray);
            processFirstFrame(gray);
            cout<<" Using "<<TRAIN_FRAMES<<" frames to training GMM..."<<endl;
        }
        else if (count < TRAIN_FRAMES)
        {
            trainGMM(gray);
        }
        else if (count == TRAIN_FRAMES)
        {
            getFitNum(gray);
            cout<<" Training GMM complete!"<<endl;
        }
        else
        {

            testGMM(gray);
            mask = getMask();
            morphologyEx(mask, mask, MORPH_OPEN, Mat());
            erode(mask, mask, Mat(7, 7, CV_8UC1), Point(-1, -1));  // You can use Mat(5, 5, CV_8UC1) here for less distortion
            dilate(mask, mask, Mat(7, 7, CV_8UC1), Point(-1, -1));
            Mat Temp;
            TempImg_Save.copyTo(Temp,mask);
            imshow(" oooio",Temp);
            //--------------20181101


              //  waitKey();
            //-------------20181101
            Mat imgHSV;
           // vector<Mat> hsvSplit;
            cvtColor(Temp, imgHSV, COLOR_BGR2HSV);

            //绿色颜色的HSV范围
                int iLowH = 35;
                int iHighH = 77;

                int iLowS = 43;
                int iHighS = 255;

                int iLowV = 46;
                int iHighV = 255;

              //  cvtColor(img, imgHSV, COLOR_BGR2HSV);//转为HSV

               // imwrite("hsv.jpg",imgHSV);
                Mat imgThresholded;

                inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image

                //开操作 (去除一些噪点)  如果二值化后图片干扰部分依然很多,增大下面的size
                Mat element = getStructuringElement(MORPH_RECT, Size(5, 5));
                morphologyEx(imgThresholded, imgThresholded, MORPH_OPEN, element);
                //闭操作 (连接一些连通域)
                morphologyEx(imgThresholded, imgThresholded, MORPH_CLOSE, element);





              //  namedWindow("Thresholded Image",CV_WINDOW_NORMAL);
                imshow("Thresholded Image", imgThresholded);
                bool runing=false;
                for(int i=0;i<imgThresholded.rows;i++)
                {
                    for(int j=0;j<imgThresholded.cols;j++)
                    {
                        if(imgThresholded.at<uchar>(i,j) == 255)
                        {
                            cout<<" x is  >>"<<i<<"<<   y is  >>"<<j<<" << "<<endl;
                            footInfo.x=i;
                            footInfo.y=j;
                            runing=true;
                            break;
                        }
                    }
                    if(runing)
                    {
                        break;
                    }
                }

                if(footInfo.x !=NULL && footInfo.y !=NULL)
                {
                    Mat img1;
                    Mat img2;
                    cvtColor(Temp,img1,CV_RGB2GRAY);
                    cvtColor(Temp,img2,CV_RGB2GRAY);
                    SiftFeatureDetector SiftDetector(800);
                    vector<KeyPoint> keyPoint1,keyPoint2;
                    SiftDetector.detect(img1,keyPoint1);
                    SiftDetector.detect(img2,keyPoint2);
                        //特征点描述,为下边的特征点匹配做准备
                        SiftDescriptorExtractor SiftDescriptor;
                        Mat imageDesc1, imageDesc2;
                        SiftDescriptor.compute(img1, keyPoint1, imageDesc1);
                        SiftDescriptor.compute(img2, keyPoint2, imageDesc2);

                        FlannBasedMatcher matcher;
                        vector<vector<DMatch> > matchePoints;
                        vector<DMatch> GoodMatchePoints;

                        vector<Mat> train_desc(1, imageDesc1);
                        matcher.add(train_desc);
                        matcher.train();

                        matcher.knnMatch(imageDesc2, matchePoints, 2);
                        cout << "total match points: " << matchePoints.size() << endl;

                        // Lowe's algorithm,获取优秀匹配点
                        for (int i = 0; i < matchePoints.size(); i++)
                        {
                            if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
                            {
                                GoodMatchePoints.push_back(matchePoints[i][0]);
                            }
                        }

                        Mat first_match;
                        drawMatches(img2, keyPoint2, img1, keyPoint1, GoodMatchePoints, first_match);
                        imshow("first_match ", first_match);
                        imwrite("first_match.jpg", first_match);

                }

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值