caffe model和ncnn model输出结果不一致

本文对比了Caffe模型与NCNN转换后的模型在MTCNN的PNet部分的输出结果。发现两者结果不一致,原因在于MTCNN模型采用col-major存储,而NCNN仅支持row-major。文中提供了实验代码及解决方法。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

caffe model和ncnn model输出结果不一致

以下实验,基于mtcnn的pnet,使用caffe model跑出的结果与使用ncnn转换后的模型跑出的结果进行了对比。由于caffe model对input layer进行了reshape,两种结果的width和height是相反的,除去这个因素,二者的结果仍不一致(已验证caffe model输出的结果是正确的)。因为本人刚进入这个领域,基础比较薄弱,对ncnn也不了解,尚未找到导致这种结果的原因。以下贴出实验代码,请大神赐教。


更新结果不一致的原因(2017-8-11):

mtcnn模型是通过MATLAB跑出来的,而MATLAB存储数据是col-major的方式,mtcnn模型中的权重也是col-major的方式存储的。而ncnn只能读取row-major存储方式的模型。下面的实验,直接使用ncnn的caffe2ncnn工具来读取mtcnn模型,并没有做权重的转换,因此结果是不对的。有位大神已经将mtcnn v1版模型做了权重转换,并编写了ncnn版使用mtcnn做人脸检测的c++代码,需要的,点这里


实验一:计算caffe model结果的代码

1、数据预处理:
1)使用opencv读取图片face.ipg(1020*848,3通道)。
2)转换颜色通道BGR至RGB。
3)转换图片数据类型至float。
4)将图片resize至128*106。
5)将图片减去均值127.5,并乘以0.0078125。

2、读取模型,输入预处理后的图片
3、输出结果prob1和conv4-2
4、具体代码如下:

#include <boost/shared_ptr.hpp>
#include <vector>
#include <string>
#include <opencv2/opencv.hpp>
#include <caffe/caffe.hpp>
using std::cout;
using std::endl;

int main(){
    int ws = 128;
    int hs = 106;
    int l,j,k;
    boost::shared_ptr<caffe::Blob<float>> input_layer;
    boost::shared_ptr<caffe::Blob<float>> conv4_2;
    boost::shared_ptr<caffe::Blob<float>> prob1;
    std::vector<cv::Mat> input_channels;
    float *ptDataStart = NULL;

    std::string path_img = "face.jpg";
    cv::Mat img_in = cv::imread(path_img, CV_LOAD_IMAGE_COLOR);
    cv::Mat img_tmp;
    cv::cvtColor(img_in, img_tmp, CV_BGR2RGB);
    cv::Mat tImgFloat;
    img_tmp.convertTo(tImgFloat, CV_32FC3);
    cv::Mat img;
    cv::resize(tImgFloat, img, cv::Size(ws, hs));
    img = (img - 127.5) * 0.0078125;

    caffe::Caffe::set_mode(caffe::Caffe::CPU);
    FLAGS_minloglevel = 2;
    std::string g_sModelPath = "/home/wangk/models";
    caffe::Net<float> *m_ptPNet;
    m_ptPNet = new caffe::Net<float>(g_sModelPath + "/mtcnn/det1.prototxt", caffe::TEST);
    m_ptPNet->CopyTrainedLayersFrom(g_sModelPath + "/mtcnn/det1.caffemodel");

    input_layer = m_ptPNet->blob_by_name("data");
    conv4_2 = m_ptPNet->blob_by_name("conv4-2");
    prob1 = m_ptPNet->blob_by_name("prob1");

    input_layer->Reshape(1, img.channels(), ws, hs);
    input_channels.clear();
    float *input_data = input_layer->mutable_cpu_data();
    for (j = 0; j < input_layer->channels(); j++)
    {
        cv::Mat channel(input_layer->height(), input_layer->width(), CV_32FC1, input_data);
        input_channels.push_back(channel);
        input_data += input_layer->height() * input_layer->width();
    }

    cv::split(img.t(), input_channels);

    m_ptPNet->Forward();

    cout << "input_layer.w: " << input_layer->width() << endl; 
    cout << "input_layer.h: " << input_layer->height() << endl; 
    cout << "input_layer.c: " << input_layer->channels() << endl;

    cout << "prob1.w: " << prob1->width() << endl; 
    cout << "prob1.h: " << prob1->height() << endl; 
    cout << "prob1.c: " << prob1->channels() << endl;
    for (l = 0; l < prob1->channels(); l++){
        for (j = 0; j < prob1->height(); j++){
            for (k = 0; k < prob1->width(); k++){
                cout << prob1->data_at(0, l, j, k) << ",";
            }
            cout << endl;
        }
        cout << endl;
        cout << endl;
    }

    cout << "conv4_2.w: " << conv4_2->width() << endl; 
    cout << "conv4_2.h: " << conv4_2->height() << endl; 
    cout << "conv4_2.c: " << conv4_2->channels() << endl;
    for (l = 0; l < conv4_2->channels(); l++)
    {
        for (j = 0; j < conv4_2->height(); j++)
        {
            for (k = 0; k < conv4_2->width(); k++)
            {
                cout << conv4_2->data_at(0, l, j, k) << ",";
            }
            cout << endl;
        }
        cout << endl;
        cout << endl;
    }
    return 0;
}

实验二:按如上数据预处理方式,并输入ncnn model

#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
using std::string;
using std::cout;
using std::endl;
#include "net.h"

int main(){
    int ws = 128;
    int hs = 106;
    string path_img = "face.jpg";
    cv::Mat img_in = cv::imread(path_img, CV_LOAD_IMAGE_COLOR);
    cv::Mat img_tmp;
    cv::cvtColor(img_in, img_tmp, CV_BGR2RGB);
    cv::Mat tImgFloat;
    img_tmp.convertTo(tImgFloat, CV_32FC3);
    cv::Mat img;
    cv::resize(tImgFloat, img, cv::Size(ws, hs));
    img = (img - 127.5) * 0.0078125;

    ncnn::Net pnet;
    string path = "/home/wangk/models";
    string path_param_str = path + "/mtcnn_c2/det1.param"; //ncnn转换后pnet 网络配置文件
    string path_model_str = path + "/mtcnn_c2/det1.bin"; //ncnn转换后pnet 模型文件
    const char* path_param = path_param_str.c_str();
    const char* path_model = path_model_str.c_str();
    pnet.load_param(path_param);
    pnet.load_model(path_model);

    float* imageData = (float*) img.data;
    ncnn::Mat in = ncnn::Mat(ws, hs, img.channels(), imageData); 
    cout << "in.w: " << in.w << "\n" \
         << "in.h: " << in.h << "\n" \
         << "in.c: " << in.c << "\n" \
         << "in.cstep: " << in.cstep << "\n";
    ncnn::Extractor ex = pnet.create_extractor();
    ex.set_light_mode(true);
    ncnn::Mat conv4_2;
    ncnn::Mat prob1;

    ex.input("data",in);
    ex.extract("conv4-2", conv4_2);
    ex.extract("prob1", prob1);
    cout << "prob1.w: " << prob1.w << endl;
    cout << "prob1.h: " << prob1.h << endl; 
    cout << "prob1.c: " << prob1.c << endl; 
    cout << "prob1.cstep: " << prob1.cstep << endl;
    for (int k = 0; k < prob1.c; ++k){
        for (int i=0; i < prob1.h; ++i){
            for (int j=0; j<prob1.w; ++j){
                cout << *(prob1.data + prob1.cstep*k + prob1.w*i + j) << ",";
            }
            cout << endl;
        }
        cout << endl;
        cout << endl;
    }

    cout << "conv4_2.w: " << conv4_2.w << endl; // conv4_2.w: 505
    cout << "conv4_2.h: " << conv4_2.h << endl; // conv4_2.h: 419
    cout << "conv4_2.c: " << conv4_2.c << endl; // conv4_2.c: 4
    cout << "conv4_2.cstep: " << conv4_2.cstep << endl; 

    for (int i = 0; i < conv4_2.c; ++i){
        for (int j = 0; j < conv4_2.h; ++j){
            for (int k = 0; k < conv4_2.w; ++k){
                cout << *(conv4_2.data + conv4_2.cstep*i + conv4_2.w*j + k) << ",";
            }
            cout << endl;
        }
        cout << endl;
        cout << endl;
    }
    return 0;
}

实验三:使用ncnn进行如上预处理步骤,并输入ncnn model

具体代码如下:

#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
using std::string;
using std::cout;
using std::endl;
#include "net.h"
int main(){
    int ws = 128;
    int hs = 106;
    string path_img = "face.jpg";
    cv::Mat img_in = cv::imread(path_img, CV_LOAD_IMAGE_COLOR);
    ncnn::Mat in = ncnn::Mat::from_pixels_resize(img_in.data, ncnn::Mat::PIXEL_BGR2RGB, img_in.cols, img_in.rows, ws, hs);
    const float mean_vals[3] = { 127.5f, 127.5f, 127.5f };
    const float norm_vals[3] = { 0.0078125f, 0.0078125f, 0.0078125f };
    in.substract_mean_normalize(mean_vals, norm_vals);
    cout << "in.w: " << in.w << "\n" \
         << "in.h: " << in.h << "\n" \
         << "in.c: " << in.c << "\n" \
         << "in.cstep: " << in.cstep << "\n";

    ncnn::Net pnet;
    string path = "/home/wangk/models/mtcnn_c2/";
    string path_param_str = path + "det1.param";
    string path_model_str = path + "det1.bin";
    const char* path_param = path_param_str.c_str();
    const char* path_model = path_model_str.c_str();
    pnet.load_param(path_param);
    pnet.load_model(path_model);

    ncnn::Extractor ex = pnet.create_extractor();
    ex.set_light_mode(true);
    ncnn::Mat conv4_2;
    ncnn::Mat prob1;

    ex.input("data",in);
    ex.extract("conv4-2", conv4_2);
    ex.extract("prob1", prob1);

    cout << "prob1.w: " << prob1.w << endl; 
    cout << "prob1.h: " << prob1.h << endl; 
    cout << "prob1.c: " << prob1.c << endl;
    cout << "prob1.cstep: " << prob1.cstep << endl;
    for (int k = 0; k < prob1.c; ++k){
        for (int i=0; i < prob1.h; ++i){
            for (int j=0; j<prob1.w; ++j){
                cout << *(prob1.data + prob1.cstep*k + prob1.w*i + j) << ",";
            }
            cout << endl;
        }
        cout << endl;
        cout << endl;
    }

    cout << "conv4_2.w: " << conv4_2.w << endl;
    cout << "conv4_2.h: " << conv4_2.h << endl;
    cout << "conv4_2.c: " << conv4_2.c << endl;
    cout << "conv4_2.cstep: " << conv4_2.cstep << endl;
    for (int i = 0; i < conv4_2.c; ++i){
        for (int j = 0; j < conv4_2.h; ++j){
            for (int k = 0; k < conv4_2.w; ++k){
                cout << *(conv4_2.data + conv4_2.cstep*i + conv4_2.w*j + k) << ",";
            }
            cout << endl;
        }
        cout << endl;
        cout << endl;
    }
    return 0;
}

参考:
[1] ncnn 组件使用指北 alexnet
[2] https://github.com/Tencent/ncnn/issues/50

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值