NCNN-Net

NCNN——Net

路径:src/net.h 和 src/net.cpp

源码中最为关键的文件

源码

Class Net {
public:
    Net();
    virtual ~Net();

public:
	// 配置项
	Option opt;
	
	// 注册自定义层
    int register_custom_layer(const char* type, layer_creator_func creator, layer_destroyer_func destroyer = 0, void* userdata = 0);
    virtual int custom_layer_to_index(const char* type);
    int register_custom_layer(int index, layer_creator_func creator, layer_destroyer_func destroyer = 0, void* userdata = 0);
    
    // 函数重载
    int load_param(const DataReader& dr);
    int load_param_bin(const DataReader& dr);
    int load_model(const DataReader& dr);
    int load_param(FILE* fp);
    int load_param(const char* protopath);
    int load_param_mem(const char* mem);    
    int load_param_bin(FILE* fp);
    int load_param_bin(const char* protopath);  
    int load_model(FILE* fp);
    int load_model(const char* modelpath);  
    int load_param(const unsigned char* mem);
    int load_model(const unsigned char* mem);
    
    void clear();
    Extractor create_extractor() const;
    
    // 获取 输入/输出 索引、名称
    const std::vector<int>& input_indexes() const;
    const std::vector<int>& output_indexes() const;
    const std::vector<const char*>& input_names() const;
    const std::vector<const char*>& output_names() const;
    // 获取 网络节点和层
    const std::vector<Blob>& blobs() const;
    const std::vector<Layer*>& layers() const;
    std::vector<Blob>& mutable_blobs();
    std::vector<Layer*>& mutable_layers();   
    // 添加自定义层
    int find_blob_index_by_name(const char* name) const;
    int find_layer_index_by_name(const char* name) const;
    virtual Layer* create_custom_layer(const char* type);
    virtual Layer* create_overwrite_builtin_layer(const char* type);
    virtual Layer* create_custom_layer(int index);
    virtual Layer* create_overwrite_builtin_layer(int typeindex);
    
    NetPrivate* const d; 	
}

int Net::register_custom_layer(const char* type, layer_creator_func creator, layer_destroyer_func destroyer, void* userdata) {
    int typeindex = layer_to_index(type);  // 获取层对应的索引,因为是用户自定义的层,应该返回-1
    if (typeindex != -1) {  // 返回的不是-1,说明已有官方实现的层,注册该重写的层
        NCNN_LOGE("overwrite built-in layer type %s", type);

        for (size_t i = 0; i < d->overwrite_builtin_layer_registry.size(); i++) {
            if (d->overwrite_builtin_layer_registry[i].typeindex == typeindex) {
            	// 发现有index相同的层已经注册过了,覆盖
                NCNN_LOGE("overwrite existing overwritten built-in layer index %d", typeindex);

                d->overwrite_builtin_layer_registry[i].creator = creator;
                d->overwrite_builtin_layer_registry[i].destroyer = destroyer;
                d->overwrite_builtin_layer_registry[i].userdata = userdata;
                return 0;
            }
        }
		// 没有注册过,添加
        struct overwrite_builtin_layer_registry_entry entry = {typeindex, creator, destroyer, userdata};
        d->overwrite_builtin_layer_registry.push_back(entry);
        return 0;
    }

    int custom_index = custom_layer_to_index(type);
    if (custom_index == -1) {
    	// 注册用户自定义层并添加
        struct custom_layer_registry_entry entry = {type, creator, destroyer, userdata};
        d->custom_layer_registry.push_back(entry);
    }else	{
    	// 覆盖用户的自定义层
        NCNN_LOGE("overwrite existing custom layer type %s", type);
        d->custom_layer_registry[custom_index].name = type;
        d->custom_layer_registry[custom_index].creator = creator;
        d->custom_layer_registry[custom_index].destroyer = destroyer;
        d->custom_layer_registry[custom_index].userdata = userdata;
    }

    return 0;
}

// 实现与上述类似
int Net::register_custom_layer(int index, layer_creator_func creator, layer_destroyer_func destroyer, void* userdata);

// 这里要了解输入文件的格式:https://github.com/Tencent/ncnn/wiki/param-and-model-file-structure
int Net::load_param(const DataReader& dr) {
#define SCAN_VALUE(fmt, v)                \
    if (dr.scan(fmt, &v) != 1)            \
    {                                     \
        NCNN_LOGE("parse " #v " failed"); \
        return -1;                        \
    }
	
    int magic = 0;  // 版本号
    SCAN_VALUE("%d", magic)
    if (magic != 7767517) {
        NCNN_LOGE("param is too old, please regenerate");
        return -1;
    }

    // parse
    int layer_count = 0;	// 层数
    int blob_count = 0;		// 节点个数
    SCAN_VALUE("%d", layer_count)
    SCAN_VALUE("%d", blob_count)
    if (layer_count <= 0 || blob_count <= 0) {
        NCNN_LOGE("invalid layer_count or blob_count");
        return -1;
    }

    d->layers.resize((size_t)layer_count);  // resize
    d->blobs.resize((size_t)blob_count);

    ParamDict pd;  // 用于读取层参数
    int blob_index = 0;  // 记录bolb索引
    for (int i = 0; i < layer_count; i++) {
    	// 逐行读取层类型,层名, 层输出节点数量,层输出节点数量等其他信息
        char layer_type[256];
        char layer_name[256];
        int bottom_count = 0;
        int top_count = 0;
        SCAN_VALUE("%255s", layer_type)
        SCAN_VALUE("%255s", layer_name)
        SCAN_VALUE("%d", bottom_count)
        SCAN_VALUE("%d", top_count)

		// 创建一个层对象
        Layer* layer = create_overwrite_builtin_layer(layer_type);
        if (!layer) {
            layer = create_layer(layer_type);
        }
        if (!layer) {
            layer = create_custom_layer(layer_type);
        }
        if (!layer) {
            NCNN_LOGE("layer %s not exists or registered", layer_type);
            clear();
            return -1;
        }
		// 设置层属性
        layer->type = std::string(layer_type);
        layer->name = std::string(layer_name);

        layer->bottoms.resize(bottom_count);
        for (int j = 0; j < bottom_count; j++) {
        	// 读取输入节点名称
            char bottom_name[256];
            SCAN_VALUE("%255s", bottom_name)
		
			// 查看该节点是否已经在网络的节点集合中记录
            int bottom_blob_index = find_blob_index_by_name(bottom_name);
            if (bottom_blob_index == -1) {
            	// 没有记录添加进去,分配了index, 设置属性
                Blob& blob = d->blobs[blob_index];
                bottom_blob_index = blob_index;
                blob.name = std::string(bottom_name);
                blob_index++;
            }
			// 记录其consumer,但是如果不止一个层需要这个节点该怎么办呢
            Blob& blob = d->blobs[bottom_blob_index];
            blob.consumer = i;
            layer->bottoms[j] = bottom_blob_index;
        }
		
        layer->tops.resize(top_count);
        for (int j = 0; j < top_count; j++) {
        	// 创建输出节点
            Blob& blob = d->blobs[blob_index];
            char blob_name[256];
            SCAN_VALUE("%255s", blob_name)
            blob.name = std::string(blob_name);
            blob.producer = i;
            layer->tops[j] = blob_index;
            blob_index++;
        }

        // 加载每个层不同的参数信息
        int pdlr = pd.load_param(dr);
        if (pdlr != 0) {
            NCNN_LOGE("ParamDict load_param %d %s failed", i, layer->name.c_str());
            continue;
        }

        if (layer->support_int8_storage) {
            // no int8 gpu support yet
            opt.use_vulkan_compute = false;
        }

        // 这里是调用的Mat ParamDict::get(int id, const Mat& def)函数
       	// 这里为什么是30
        Mat shape_hints = pd.get(30, Mat());
        if (!shape_hints.empty()) {
            const int* psh = shape_hints;
            for (int j = 0; j < top_count; j++) {
            	// 设置输出节点形状
                Blob& blob = d->blobs[layer->tops[j]];

                int dims = psh[0];
                if (dims == 1) {
                    blob.shape = Mat(psh[1], (void*)0, 4u, 1);
                }
                if (dims == 2) {
                    blob.shape = Mat(psh[1], psh[2], (void*)0, 4u, 1);
                }
                if (dims == 3) {
                    blob.shape = Mat(psh[1], psh[2], psh[3], (void*)0, 4u, 1);
                }

                psh += 4;
            }
        }

        // 设置层输入/输出形状
        layer->bottom_shapes.resize(bottom_count);
        for (int j = 0; j < bottom_count; j++)
        {
            layer->bottom_shapes[j] = d->blobs[layer->bottoms[j]].shape;
        }

        layer->top_shapes.resize(top_count);
        for (int j = 0; j < top_count; j++)
        {
            layer->top_shapes[j] = d->blobs[layer->tops[j]].shape;
        }

        // pull out layer specific feature disabled set
        layer->featmask = pd.get(31, 0);

		// 层加载参数
        int lr = layer->load_param(pd);
        if (lr != 0) {
            NCNN_LOGE("layer load_param %d %s failed", i, layer->name.c_str());
            continue;
        }
		// 将该层添加至网络中层向量中
        d->layers[i] = layer;
    }
	// 更新网络的输入输出
    d->update_input_output_indexes();
    d->update_input_output_names();

#undef SCAN_VALUE
    return 0;
}

// 加载模型的训练参数
int Net::load_model(const DataReader& dr) {
    if (d->layers.empty()) {
        NCNN_LOGE("network graph not ready");
        return -1;
    }

    int layer_count = (int)d->layers.size();

    // load file
    int ret = 0;

    ModelBinFromDataReader mb(dr);
    for (int i = 0; i < layer_count; i++) {
    	// 每一个layer加载模型权重
        Layer* layer = d->layers[i];
        if (!layer) {
            NCNN_LOGE("load_model error at layer %d, parameter file has inconsistent content.", i);
            ret = -1;
            break;
        }
		// 加载层的可训练参数,例如卷积层的权重等
        int lret = layer->load_model(mb);
        if (lret != 0) {
            NCNN_LOGE("layer load_model %d %s failed", i, layer->name.c_str());
            ret = -1;
            break;
        }

        if (layer->support_int8_storage) {
            // no int8 gpu support yet
            opt.use_vulkan_compute = false;
        }
    }

    for (int i = 0; i < layer_count; i++) {
        Layer* layer = d->layers[i];
        Option opt1 = get_masked_option(opt, layer->featmask);
        int cret = layer->create_pipeline(opt1);
        if (cret != 0) {
            NCNN_LOGE("layer create_pipeline %d %s failed", i, layer->name.c_str());
            ret = -1;
            break;
        }
    }

	// 设置Alloctator
    if (opt.use_local_pool_allocator) {
        if (opt.blob_allocator == 0) {
            if (!d->local_blob_allocator) {
                d->local_blob_allocator = new PoolAllocator;
                d->local_blob_allocator->set_size_compare_ratio(0.f);
            }
        }
        if (opt.workspace_allocator == 0) {
            if (!d->local_workspace_allocator) {
                d->local_workspace_allocator = new PoolAllocator;
                d->local_workspace_allocator->set_size_compare_ratio(0.f);
            }
        }
    }
    return ret;
}

其余的load_param, load_model函数调用上述函数

// 创建Extractor
Extractor Net::create_extractor() const {
    return Extractor(this, d->blobs.size());
}

Layer* Net::create_custom_layer(int index) {
	// 判断index对应的自定义层是否存在
    const size_t custom_layer_registry_entry_count = d->custom_layer_registry.size();
    if (index < 0 || static_cast<unsigned int>(index) >= custom_layer_registry_entry_count)
        return 0;
	// 迪奥用对应的构造函数
    layer_creator_func layer_creator = d->custom_layer_registry[index].creator;
    if (!layer_creator)
        return 0;

    Layer* layer = layer_creator(d->custom_layer_registry[index].userdata);
    layer->typeindex = ncnn::LayerType::CustomBit | index;
    return layer;
}
class NetPrivate {
    NetPrivate(Option& _opt);
    Option& op
    
	friend class Extractor;
	// 模型推理
    int forward_layer(int layer_index, std::vector<Mat>& blob_mats, const Option& opt) const;
    int convert_layout(Mat& bottom_blob, const Layer* layer, const Option& opt) const;
    int do_forward_layer(const Layer* layer, std::vector<Mat>& blob_mats, const Option& opt) const;
    
    // 更新输入输出
    void update_input_output_indexes();
    void update_input_output_names();
    
    // 网络节点、层
    std::vector<Blob> blobs;
    std::vector<Layer*> layers;
	
	// 节点索引和名称
    std::vector<int> input_blob_indexes;
    std::vector<int> output_blob_indexes;
    std::vector<const char*> input_blob_names;
    std::vector<const char*> output_blob_names;

	// 自定义层
    std::vector<custom_layer_registry_entry> custom_layer_registry;
    std::vector<overwrite_builtin_layer_registry_entry> overwrite_builtin_layer_registry;
	
	// Allocator
    PoolAllocator* local_blob_allocator;
    PoolAllocator* local_workspace_allocator;
}

int NetPrivate::forward_layer(int layer_index, std::vector<Mat>& blob_mats, const Option& opt) const {
    const Layer* layer = layers[layer_index];
    // load bottom blobs
    for (size_t i = 0; i < layer->bottoms.size(); i++) {
        int bottom_blob_index = layer->bottoms[i];
        if (blob_mats[bottom_blob_index].dims == 0) {
        	// 递归调用
            int ret = forward_layer(blobs[bottom_blob_index].producer, blob_mats, opt);
            if (ret != 0)
                return ret;
        }
    }

    int ret = 0;
    if (layer->featmask) {
        ret = do_forward_layer(layer, blob_mats, get_masked_option(opt, layer->featmask));
    }else {
    	// 计算输出
        ret = do_forward_layer(layer, blob_mats, opt);
    }

    if (ret != 0)
        return ret;

    return 0;
}

int NetPrivate::do_forward_layer(const Layer* layer, std::vector<Mat>& blob_mats, const Option& opt) const {
    if (layer->one_blob_only) {
    	// 单输入单输出
        int bottom_blob_index = layer->bottoms[0];
        int top_blob_index = layer->tops[0];

        Mat& bottom_blob_ref = blob_mats[bottom_blob_index];
        Mat bottom_blob;

        if (opt.lightmode)
        {
            // deep copy for inplace forward if data is shared
            if (layer->support_inplace && *bottom_blob_ref.refcount != 1)
            {
                bottom_blob = bottom_blob_ref.clone(opt.blob_allocator);
            }
        }
        if (bottom_blob.dims == 0)
        {
            bottom_blob = bottom_blob_ref;
        }

        convert_layout(bottom_blob, layer, opt);

        // forward
        if (opt.lightmode && layer->support_inplace)
        {
            Mat& bottom_top_blob = bottom_blob;
            int ret = layer->forward_inplace(bottom_top_blob, opt);
            if (ret != 0)
                return ret;

            // store top blob
            blob_mats[top_blob_index] = bottom_top_blob;
        }else {
            Mat top_blob;
            int ret = layer->forward(bottom_blob, top_blob, opt);
            if (ret != 0)
                return ret;

            // store top blob
            blob_mats[top_blob_index] = top_blob;
        }

        if (opt.lightmode)
        {
            // delete after taken in light mode
            blob_mats[bottom_blob_index].release();
        }
    }else {
        std::vector<Mat> bottom_blobs(layer->bottoms.size());
        for (size_t i = 0; i < layer->bottoms.size(); i++)
        {
            int bottom_blob_index = layer->bottoms[i];

            Mat& bottom_blob_ref = blob_mats[bottom_blob_index];
            bottom_blobs[i].release();

            if (opt.lightmode)
            {
                // deep copy for inplace forward if data is shared
                if (layer->support_inplace && *bottom_blob_ref.refcount != 1)
                {
                    bottom_blobs[i] = bottom_blob_ref.clone(opt.blob_allocator);
                }
            }
            if (bottom_blobs[i].dims == 0)
            {
                bottom_blobs[i] = bottom_blob_ref;
            }

            convert_layout(bottom_blobs[i], layer, opt);
        }

        // forward
        if (opt.lightmode && layer->support_inplace)
        {
            std::vector<Mat>& bottom_top_blobs = bottom_blobs;
            int ret = layer->forward_inplace(bottom_top_blobs, opt);
            if (ret != 0)
                return ret;

            // store top blobs
            for (size_t i = 0; i < layer->tops.size(); i++)
            {
                int top_blob_index = layer->tops[i];

                blob_mats[top_blob_index] = bottom_top_blobs[i];
            }
        }
        else
        {
            std::vector<Mat> top_blobs(layer->tops.size());
            int ret = layer->forward(bottom_blobs, top_blobs, opt);
            if (ret != 0)
                return ret;

            // store top blobs
            for (size_t i = 0; i < layer->tops.size(); i++)
            {
                int top_blob_index = layer->tops[i];

                blob_mats[top_blob_index] = top_blobs[i];
            }
        }

        if (opt.lightmode) 
        {
            for (size_t i = 0; i < layer->bottoms.size(); i++)
            {
                int bottom_blob_index = layer->bottoms[i];

                // delete after taken in light mode
                blob_mats[bottom_blob_index].release();
            }
        }
    }

    return 0;
}

// 更新模型的输入输出节点索引
void NetPrivate::update_input_output_indexes() {
    input_blob_indexes.clear();
    output_blob_indexes.clear();

    for (size_t i = 0; i < layers.size(); i++) {	
        if (layers[i]->typeindex == LayerType::Input) {
            int blob_index = layers[i]->tops[0];
            input_blob_indexes.push_back(blob_index);
        }
    }

    for (size_t i = 0; i < blobs.size(); i++) {
        if (blobs[i].producer != -1 && blobs[i].consumer == -1) {
            output_blob_indexes.push_back(i);
        }
    }
}

class NCNN_EXPORT Extractor {
public:
    virtual ~Extractor();
    Extractor(const Extractor&);
    Extractor& operator=(const Extractor&);

    void clear();

    void set_light_mode(bool enable);
    void set_num_threads(int num_threads);
    void set_blob_allocator(Allocator* allocator);
    void set_workspace_allocator(Allocator* allocator);

    int input(const char* blob_name, const Mat& in);
    int extract(const char* blob_name, Mat& feat, int type = 0);

    int input(int blob_index, const Mat& in);
    int extract(int blob_index, Mat& feat, int type = 0);
protected:
    friend Extractor Net::create_extractor() const;
    Extractor(const Net* net, size_t blob_count);
private:
    ExtractorPrivate* const d;
};

class ExtractorPrivate {
public:
    ExtractorPrivate(const Net* _net)
        : net(_net)
    {
    }
    const Net* net;
    std::vector<Mat> blob_mats;
    Option opt;
}

// 设置网络输入
int Extractor::input(int blob_index, const Mat& in) {
    if (blob_index < 0 || blob_index >= (int)d->blob_mats.size())
        return -1;

    d->blob_mats[blob_index] = in;
	
    return 0;
}

// 执行网络推理
int Extractor::extract(int blob_index, Mat& feat, int type) {
    if (blob_index < 0 || blob_index >= (int)d->blob_mats.size())
        return -1;

    int old_blocktime = get_kmp_blocktime();
    set_kmp_blocktime(d->opt.openmp_blocktime);

    int old_flush_denormals = get_flush_denormals();
    set_flush_denormals(d->opt.flush_denormals);

    int ret = 0;

    if (d->blob_mats[blob_index].dims == 0)
    {
        int layer_index = d->net->blobs()[blob_index].producer;

        // use local allocator
        if (d->opt.use_local_pool_allocator)
        {
            if (!d->opt.blob_allocator)
            {
                d->opt.blob_allocator = d->net->d->local_blob_allocator;
            }
            if (!d->opt.workspace_allocator)
            {
                d->opt.workspace_allocator = d->net->d->local_workspace_allocator;
            }
        }
        // 关键:递归调用
        ret = d->net->d->forward_layer(layer_index, d->blob_mats, d->opt);
    }
	// 得到所有结果
    feat = d->blob_mats[blob_index];

    if (d->opt.use_packing_layout && (type == 0) && feat.elempack != 1)
    {
        Mat bottom_blob_unpacked;
        convert_packing(feat, bottom_blob_unpacked, 1, d->opt);
        feat = bottom_blob_unpacked;
    }

    if (feat.elembits() == 8 && (type == 0))
    {
        Mat feat_fp32;
        cast_int8_to_float32(feat, feat_fp32, d->opt);
        feat = feat_fp32;
    }

    if (d->opt.use_local_pool_allocator && feat.allocator == d->net->d->local_blob_allocator)
    {
        // detach the returned mat from local pool allocator
        // so we could destroy net instance much earlier
        feat = feat.clone();
    }

    set_kmp_blocktime(old_blocktime);
    set_flush_denormals(old_flush_denormals);
    return ret;
}

示例

仅包含main函数

int main() {
    std::string img_path = "D:\\Code\\Vscode\\modelDeploy\\img\\sheep.png";
    std::string param_path = "D:\\Code\\Vscode\\modelDeploy\\ncnn\\squeezenet\\squeezenet_v1.1.param";
    std::string bin_path = "D:\\Code\\Vscode\\modelDeploy\\ncnn\\squeezenet\\squeezenet_v1.1.bin";
    cv::Mat img = cv::imread(img_path);
    if (img.empty()) {
       std::cout << "cv::imread " << img_path << " failed." << std::endl;
		return -1;
    }

    int w = img.cols;
    int h = img.rows;
    ncnn::Mat in = ncnn::Mat::from_pixels_resize(img.data, ncnn::Mat::PIXEL_BGR, w, h, 227, 227);

    // const float mean_vals[3] = {104.f, 117.f, 123.f};
    // const float norm[1] = {0.0f};
    // in.substract_mean_normalize(mean_vals, norm);

    ncnn::Net squeezenet;
    squeezenet.load_param(param_path);
	squeezenet.load_model(bin_path);

    ncnn::Extractor ex = squeezenet.create_extractor();
    ex.set_light_mode(true);
    ex.set_num_threads(4);

    ex.input("data", in);
    ncnn::Mat output;
    ex.extract("prob", output);

    // PrettyPrint(output);
    std::vector<float> cls_scores(output.w);
    for (int i = 0; i < output.w; ++i) {
        cls_scores[i] = output[i];
    }
    PrintTopk(cls_scores, 3);
    return 0;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值