OpenVino精简demo代码Super Resolution C++

本文介绍使用Intel OpenVINO工具包实现图像超分辨率的过程,包括环境配置、模型加载、输入输出配置、模型加载到设备等步骤,并展示了如何进行推理及处理输出。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

配置环境:
在这里插入图片描述

①C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\inference_engine\include
②D:\opencv\opencv344\opencv\build\install\include\opencv2
③D:\opencv\opencv344\opencv\build\install\include\opencv
④D:\opencv\opencv344\opencv\build\install\include
⑤C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\inference_engine\samples\cpp\common
在这里插入图片描述①C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\ngraph\lib
②C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\inference_engine\lib\intel64\Debug
③D:\opencv\opencv344\opencv\build\install\x64\vc15\lib
④C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\inference_engine\external\hddl\lib
在这里插入图片描述
hddlapi.lib
inference_engined.lib
opencv_aruco344d.lib
opencv_bgsegm344d.lib
opencv_bioinspired344d.lib
opencv_calib3d344d.lib
opencv_ccalib344d.lib
opencv_core344d.lib
opencv_datasets344d.lib
opencv_dnn344d.lib
opencv_dnn_objdetect344d.lib
opencv_dpm344d.lib
opencv_face344d.lib
opencv_features2d344d.lib
opencv_flann344d.lib
opencv_fuzzy344d.lib
opencv_hfs344d.lib
opencv_highgui344d.lib
opencv_imgcodecs344d.lib
opencv_imgproc344d.lib
opencv_img_hash344d.lib
opencv_line_descriptor344d.lib
opencv_ml344d.lib
opencv_objdetect344d.lib
opencv_optflow344d.lib
opencv_phase_unwrapping344d.lib
opencv_photo344d.lib
opencv_plot344d.lib
opencv_reg344d.lib
opencv_rgbd344d.lib
opencv_saliency344d.lib
opencv_shape344d.lib
opencv_stereo344d.lib
opencv_stitching344d.lib
opencv_structured_light344d.lib
opencv_superres344d.lib
opencv_surface_matching344d.lib
opencv_text344d.lib
opencv_tracking344d.lib
opencv_video344d.lib
opencv_videoio344d.lib
opencv_videostab344d.lib
opencv_xfeatures2d344d.lib
opencv_ximgproc344d.lib
opencv_xobjdetect344d.lib
opencv_xphoto344d.lib
ngraphd.lib
onnx_importerd.lib

代码:

// ForOpenvinoTest.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>
#include <memory>
#include <string>
#include <samples/common.hpp>
#include <windows.h>
#include <wchar.h>
#include <inference_engine.hpp>
#include <samples/ocv_common.hpp>
#include <samples/classification_results.h>

using namespace InferenceEngine;

#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
#define tcout std::wcout
#define file_name_t std::wstring
#define imread_t imreadW
#define ClassificationResult_t ClassificationResultW
#else
#define tcout std::cout
#define file_name_t std::string
#define imread_t cv::imread
#define ClassificationResult_t ClassificationResult
#endif

std::wstring StringToWString(const std::string& str)
{
	int num = MultiByteToWideChar(CP_UTF8, 0, str.c_str(), -1, NULL, 0);
	wchar_t *wide = new wchar_t[num];
	MultiByteToWideChar(CP_UTF8, 0, str.c_str(), -1, wide, num);
	std::wstring w_str(wide);
	delete[] wide;
	return w_str;
}
//int wmain(int argc, char *argv[]) {
//#else

int main(int argc) {
	// --------------------------- 1. Load inference engine instance负载推理引擎实例 -------------------------------------
	Core ie;
	// 2. 读模型Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
	CNNNetwork network = ie.ReadNetwork("C:/Users/sjsys/Desktop/1/single-image-super-resolution-1032.xml");
	// --------------------------- 3. Configure input & output 配置输入输出---------------------------------------------
	// --------------------------- Prepare input blobs 准备输入blob-----------------------------------------------------
	/** Collect images**/
	std::cout<< "Preparing input blobs" << std::endl;
	/** Taking information about all topology inputs **/
	ICNNNetwork::InputShapes inputShapes(network.getInputShapes());
	std::string lrInputBlobName = inputShapes.begin()->first;
	SizeVector lrShape = inputShapes[lrInputBlobName];
	// A model like single-image-super-resolution-???? may take bicubic interpolation of the input image as the
	// second input
	std::string bicInputBlobName;
	if (inputShapes.size() == 2) {
		bicInputBlobName = (++inputShapes.begin())->first;
		SizeVector bicShape = inputShapes[bicInputBlobName];
		if (bicShape.size() != 4) {
			throw std::logic_error("Number of dimensions for both inputs must be 4");
		}
		if (lrShape[2] >= bicShape[2] && lrShape[3] >= bicShape[3]) {
			lrInputBlobName.swap(bicInputBlobName);
			lrShape.swap(bicShape);
		}
		else if (!(lrShape[2] <= bicShape[2] && lrShape[3] <= bicShape[3])) {
			throw std::logic_error("Each spatial dimension of one input must surpass or be equal to a spatial"
				"dimension of another input");
		}
	}
	/** Collect images**/
	std::vector<cv::Mat> inputImages;
	std::string imageNames = "C:/Users/sjsys/Desktop/1/3.bmp";
	cv::Mat img = cv::imread(imageNames, cv::IMREAD_COLOR);
	inputImages.push_back(img);	
	if (inputImages.empty()) throw std::logic_error("Valid input images were not found!");

	/** Setting batch size using image count **/
	inputShapes[lrInputBlobName][0] = inputImages.size();
	if (!bicInputBlobName.empty()) {
		inputShapes[bicInputBlobName][0] = inputImages.size();
	}
	network.reshape(inputShapes);
	std::cout << "Batch size is " << std::to_string(network.getBatchSize()) << std::endl;

	// --------------------------- Prepare output blobs 准备输出blob----------------------------------------------------
	std::cout << "Preparing output blobs" << std::endl;

	OutputsDataMap outputInfo(network.getOutputsInfo());
	// BlobMap outputBlobs;
	std::string firstOutputName;
	for (auto &item : outputInfo) {
		if (firstOutputName.empty()) {
			firstOutputName = item.first;
		}
		DataPtr outputData = item.second;
		if (!outputData) {
			throw std::logic_error("output data pointer is not valid");
		}

		item.second->setPrecision(Precision::FP32);
	}

	// --------------------------- 4. Loading model to the device 加载模型到设备------------------------------------------
	ExecutableNetwork executable_network = ie.LoadNetwork(network, "CPU");
	// --------------------------- 5. Create infer request创建请求 -------------------------------------------------
	InferRequest infer_request = executable_network.CreateInferRequest();
	// --------------------------- 6. Prepare input 准备输入--------------------------------------------------------
	Blob::Ptr lrInputBlob = infer_request.GetBlob(lrInputBlobName);
	for (size_t i = 0; i < inputImages.size(); ++i) {
		cv::Mat img = inputImages[i];
		matU8ToBlob<float_t>(img, lrInputBlob, i);

		if (!bicInputBlobName.empty()) {
			Blob::Ptr bicInputBlob = infer_request.GetBlob(bicInputBlobName);
			int w = bicInputBlob->getTensorDesc().getDims()[3];
			int h = bicInputBlob->getTensorDesc().getDims()[2];

			cv::Mat resized;
			cv::resize(img, resized, cv::Size(w, h), 0, 0, cv::INTER_CUBIC);
			matU8ToBlob<float_t>(resized, bicInputBlob, i);
		}
	}
	// --------------------------- 7. Do inference 做推理--------------------------------------------------------
	infer_request.Infer();
	// --------------------------- 8. Process output流程输出 ------------------------------------------------------
	Blob::Ptr output = infer_request.GetBlob(firstOutputName);
	LockedMemory<const void> outputBlobMapped = as<MemoryBlob>(output)->rmap();
	const auto outputData = outputBlobMapped.as<float*>();
	size_t numOfImages = output->getTensorDesc().getDims()[0];
	size_t numOfChannels = output->getTensorDesc().getDims()[1];
	size_t h = output->getTensorDesc().getDims()[2];
	size_t w = output->getTensorDesc().getDims()[3];
	size_t nunOfPixels = w * h;
	std::cout << "Output size [N,C,H,W]: " << numOfImages << ", " << numOfChannels << ", " << h << ", " << w << std::endl;
	for (size_t i = 0; i < numOfImages; ++i) {
		std::vector<cv::Mat> imgPlanes;
		if (numOfChannels == 3) {
			imgPlanes = std::vector<cv::Mat>{
				  cv::Mat(h, w, CV_32FC1, &(outputData[i * nunOfPixels * numOfChannels])),
				  cv::Mat(h, w, CV_32FC1, &(outputData[i * nunOfPixels * numOfChannels + nunOfPixels])),
				  cv::Mat(h, w, CV_32FC1, &(outputData[i * nunOfPixels * numOfChannels + nunOfPixels * 2])) };
		}
		else {
			imgPlanes = std::vector<cv::Mat>{ cv::Mat(h, w, CV_32FC1, &(outputData[i * nunOfPixels * numOfChannels])) };
			cv::threshold(imgPlanes[0], imgPlanes[0], 0.5f, 1.0f, cv::THRESH_BINARY);
		};

		for (auto & img : imgPlanes)
			img.convertTo(img, CV_8UC1, 255);
		cv::Mat resultImg;
		cv::merge(imgPlanes, resultImg);	
		cv::imshow("result", resultImg);
		
	}
	
	std::cout << "程序完成啦!" << std::endl;
	cv::waitKey(0);
	return EXIT_SUCCESS;
}



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值