一、模型转换准备
首先确保已完成PyTorch到ONNX的转换:深度学习之用CelebA_Spoof数据集搭建活体检测系统:模型验证与测试。这里有将PyTorch到ONNX格式的模型转换。
二、ONNX转MNN
使用MNN转换工具进行格式转换:具体的编译过程可以参考MNN的官方代码。MNN是一个轻量级的深度神经网络引擎,支持深度学习的推理与训练。适用于服务器/个人电脑/手机/嵌入式各类设备。
./MNNConvert -f ONNX --modelFile live_spoof.onnx --MNNModel live_spoof.mnn
三、C++推理工程搭建
工程结构
mnn_inference/
├── CMakeLists.txt
├── include/
│ ├── InferenceInit.h
│ └── LiveSpoofDetector.h
├── src/
│ ├── InferenceInit.cpp
│ ├── LiveSpoofDetector.cpp
│ └── CMakeLists.txt
└── third_party/MNN/
根目录下的 CMakeLists.txt
cmake_minimum_required(VERSION 3.12)
project(MNNInference)
# 设置C++标准
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# 查找OpenCV
find_package(OpenCV REQUIRED)
# 包含第三方库MNN
set(MNN_DIR ${CMAKE_SOURCE_DIR}/third_party/MNN)
include_directories(${MNN_DIR}/include)
# 添加子目录
add_subdirectory(src)
# 主可执行文件
add_executable(mnn_inference_main
src/main.cpp
)
# 链接库
target_link_libraries(mnn_inference_main
PRIVATE
inference_lib
${MNN_DIR}/lib/libMNN.so
${OpenCV_LIBS}
)
# 安装规则
install(TARGETS mnn_inference_main
RUNTIME DESTINATION bin
)
src目录下的 CMakeLists.txt
# 添加库
add_library(inference_lib STATIC
InferenceInit.cpp
LiveSpoofDetector.cpp
)
# 包含目录
target_include_directories(inference_lib
PUBLIC
${CMAKE_SOURCE_DIR}/include
${MNN_DIR}/include
${OpenCV_INCLUDE_DIRS}
)
# 编译选项
target_compile_options(inference_lib
PRIVATE
-Wall
-O3
)
核心实现代码
将MNN读取导入模型和一些mnn_session进行预处理的公共部分抽取出来,以后可以更换不同的模型,只需要给出特定的预处理。
// InferenceInit.h
#ifndef MNN_CORE_MNN_HANDLER_H
#define MNN_CORE_MNN_HANDLER_H
#include "MNN/Interpreter.hpp"
#include "MNN/MNNDefine.h"
#include "MNN/Tensor.hpp"
#include "MNN/ImageProcess.hpp"
#include <iostream>
#include "opencv2/opencv.hpp"
#endif
#include "mylog.h"
#define LITEMNN_DEBUG
namespace mnncore
{
class BasicMNNHandler
{
protected:
std::shared_ptr<MNN::Interpreter> mnn_interpreter;
MNN::Session *mnn_session = nullptr;
MNN::Tensor *input_tensor = nullptr; // assume single input.
MNN::ScheduleConfig schedule_config;
std::shared_ptr<MNN::CV::ImageProcess> pretreat; // init at subclass
const char *log_id = nullptr;
const char *mnn_path = nullptr;
const char *mnn_model_data = nullptr;
//int mnn_model_size = 0;
protected:
const int num_threads; // initialize at runtime.
int input_batch;
int input_channel;
int input_height;
int input_width;
int dimension_type;
int num_outputs = 1;
protected:
explicit BasicMNNHandler(const std::string &_mnn_path, int _num_threads = 1);
int initialize_handler();
std::string turnHeadDataToString(std::string headData);
virtual ~BasicMNNHandler();
// un-copyable
protected:
BasicMNNHandler(const BasicMNNHandler &) = delete; //
BasicMNNHandler(BasicMNNHandler &&) = delete; //
BasicMNNHandler &operator=(const BasicMNNHandler &) = delete; //
BasicMNNHandler &operator=(BasicMNNHandler &&) = delete; //
protected:
virtual void transform(const cv::Mat &mat) = 0; // ? needed ?
private:
void print_debug_string();
};
}
// InferenceInit.cpp
#include "mnn/core/InferenceInit.h"
namespace mnncore
{
BasicMNNHandler::BasicMNNHandler(
const std::string &_mnn_path, int _num_threads) :
log_id(_mnn_path.data()), mnn_path(_mnn_path.data()),num_threads(_num_threads)
{
//initialize_handler();
}
int BasicMNNHandler::initialize_handler()
{
std::cout<<"load Model from file: " << mnn_path << "\n";
mnn_interpreter = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(mnn_path));
myLog(ERROR_, "mnn_interpreter createFromFile done!");
if (nullptr == mnn_interpreter) {
std::cout << "load centerface failed." << std::endl;
return -1;
}
// 2. init schedule_config
schedule_config.numThread = (int) num_threads;
MNN::BackendConfig backend_config;
backend_config.precision = MNN::BackendConfig::Precision_Low; // default Precision_High
backend_config.memory = MNN::BackendConfig::Memory_Low;
backend_config.power = MNN::BackendConfig::Power_Low;
schedule_config.backendConfig = &backend_config;

最低0.47元/天 解锁文章
849

被折叠的 条评论
为什么被折叠?



