改写caffe classification.cpp使其支持批量读取图片

本文档详细介绍了如何修改Caffe的classification.cpp样例程序,以支持批量读取图像数据,提高数据处理效率。通过这一改造,可以更有效地进行图像分类任务,尤其适用于大规模数据集的训练过程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

#include <caffe/caffe.hpp>
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif  // USE_OPENCV
#include <algorithm>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <io.h>
#include<direct.h>
#include <iostream>
#include<windows.h>
#ifdef USE_OPENCV
using namespace caffe;  // NOLINT(build/namespaces)
using std::string;
using namespace cv;
/* Pair (label, confidence) representing a prediction. */
typedef std::pair<string, float> Prediction;

class Classifier {
public:
	Classifier(const string& model_file,
		const string& trained_file,
		const string& mean_file,
		const string& label_file);

	std::vector<Prediction> Classify(const cv::Mat& img, int N = 5);

private:
	void SetMean(const string& mean_file);

	std::vector<float> Predict(const cv::Mat& img);

	void WrapInputLayer(std::vector<cv::Mat>* input_channels);

	void Preprocess(const cv::Mat& img,
		std::vector<cv::Mat>* input_channels);

private:
	shared_ptr<Net<float> > net_;
	cv::Size input_geometry_;
	int num_channels_;
	cv::Mat mean_;
	std::vector<string> labels_;
};

Classifier::Classifier(const string& model_file,
	const string& trained_file,
	const string& mean_file,
	const string& label_file) {
#ifdef CPU_ONLY
	Caffe::set_mode(Caffe::CPU);
#else
	Caffe::set_mode(Caffe::GPU);
#endif

	/* Load the network. */
	net_.reset(new Net<float>(model_file, TEST));
	net_->CopyTrainedLayersFrom(trained_file);

	CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input.";
	CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output.";

	Blob<float>* input_layer = net_->input_blobs()[0];
	num_channels_ = input_layer->channels();
	CHECK(num_channels_ == 3 || num_channels_ == 1)
		<< "Input layer should have 1 or 3 channels.";
	input_geometry_ = cv::Size(input_layer->width(), input_layer->height());

	/* Load the binaryproto mean file. */
	SetMean(mean_file);

	/* Load labels. */
	std::ifstream labels(label_file.c_str());
	CHECK(labels) << "Unable to open labels file " << label_file;
	string line;
	while (std::getline(labels, line))
		labels_.push_back(string(line));

	Blob<float>* output_layer = net_->output_blobs()[0];
	CHECK_EQ(labels_.size(), output_layer->channels())
		<< "Number of labels is different from the output layer dimension.";
}

static bool PairCompare(const std::pair<float, int>& lhs,
	const std::pair<float, int>& rhs) {
	return lhs.first > rhs.first;
}

/* Return the indices of the top N values of vector v. */
static std::vector<int> Argmax(const std::vector<float>& v, int N) {
	std::vector<std::pair<float, int> > pairs;
	for (size_t i = 0; i < v.size(); ++i)
		pairs.push_back(std::make_pair(v[i], i));
	std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare);

	std::vector<int> result;
	for (int i = 0; i < N; ++i)
		result.push_back(pairs[i].second);
	return result;
}

/* Return the top N predictions. */
std::vector<Prediction> Classifier::Classify(const cv::Mat& img, int N) {
	std::vector<float> output = Predict(img);

	N = std::min<int>(labels_.size(), N);
	std::vector<int> maxN = Argmax(output, N);
	std::vector<Prediction> predictions;
	for (int i = 0; i < N; ++i) {
		int idx = maxN[i];
		predictions.push_back(std::make_pair(labels_[idx], output[idx]));
	}

	return predictions;
}

/* Load the mean file in binaryproto format. */
void Classifier::SetMean(const string& mean_file) {
	BlobProto blob_proto;
	ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);

	/* Convert from BlobProto to Blob<float> */
	Blob<float> mean_blob;
	mean_blob.FromProto(blob_proto);
	CHECK_EQ(mean_blob.channels(), num_channels_)
		<< "Number of channels of mean file doesn't match input layer.";

	/* The format of the mean file is planar 32-bit float BGR or grayscale. */
	std::vector<cv::Mat> channels;
	float* data = mean_blob.mutable_cpu_data();
	for (int i = 0; i < num_channels_; ++i) {
		/* Extract an individual channel. */
		cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);
		channels.push_back(channel);
		data += mean_blob.height() * mean_blob.width();
	}

	/* Merge the separate channels into a single image. */
	cv::Mat mean;
	cv::merge(channels, mean);

	/* Compute the global mean pixel value and create a mean image
	* filled with this value. */
	cv::Scalar channel_mean = cv::mean(mean);
	mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean);
}

std::vector<float> Classifier::Predict(const cv::Mat& img) {
	Blob<float>* input_layer = net_->input_blobs()[0];
	input_layer->Reshape(1, num_channels_,
		input_geometry_.height, input_geometry_.width);
	/* Forward dimension change to all layers. */
	net_->Reshape();

	std::vector<cv::Mat> input_channels;
	WrapInputLayer(&input_channels);

	Preprocess(img, &input_channels);

	//double t = (double)getTickCount();
	net_->ForwardPrefilled();
	//getTickFrequency() * 1000;
	//std::cout << "时间" << t << "ms" << std::endl;

	/* Copy the output layer to a std::vector */
	Blob<float>* output_layer = net_->output_blobs()[0];
	const float* begin = output_layer->cpu_data();
	const float* end = begin + output_layer->channels();
	return std::vector<float>(begin, end);
}

/* Wrap the input layer of the network in separate cv::Mat objects
* (one per channel). This way we save one memcpy operation and we
* don't need to rely on cudaMemcpy2D. The last preprocessing
* operation will write the separate channels directly to the input
* layer. */
void Classifier::WrapInputLayer(std::vector<cv::Mat>* input_channels) {
	Blob<float>* input_layer = net_->input_blobs()[0];

	int width = input_layer->width();
	int height = input_layer->height();
	float* input_data = input_layer->mutable_cpu_data();
	for (int i = 0; i < input_layer->channels(); ++i) {
		cv::Mat channel(height, width, CV_32FC1, input_data);
		input_channels->push_back(channel);
		input_data += width * height;
	}
}

void Classifier::Preprocess(const cv::Mat& img,
	std::vector<cv::Mat>* input_channels) {
	/* Convert the input image to the input image format of the network. */
	cv::Mat sample;
	if (img.channels() == 3 && num_channels_ == 1)
		cv::cvtColor(img, sample, CV_BGR2GRAY);
	else if (img.channels() == 4 && num_channels_ == 1)
		cv::cvtColor(img, sample, CV_BGRA2GRAY);
	else if (img.channels() == 4 && num_channels_ == 3)
		cv::cvtColor(img, sample, CV_BGRA2BGR);
	else if (img.channels() == 1 && num_channels_ == 3)
		cv::cvtColor(img, sample, CV_GRAY2BGR);
	else
		sample = img;

	cv::Mat sample_resized;
	if (sample.size() != input_geometry_)
		cv::resize(sample, sample_resized, input_geometry_);
	else
		sample_resized = sample;

	cv::Mat sample_float;
	if (num_channels_ == 3)
		sample_resized.convertTo(sample_float, CV_32FC3);
	else
		sample_resized.convertTo(sample_float, CV_32FC1);

	cv::Mat sample_normalized;
	cv::subtract(sample_float, mean_, sample_normalized);

	/* This operation will write the separate BGR planes directly to the
	* input layer of the network because it is wrapped by the cv::Mat
	* objects in input_channels. */
	cv::split(sample_normalized, *input_channels);

	CHECK(reinterpret_cast<float*>(input_channels->at(0).data)
		== net_->input_blobs()[0]->cpu_data())
		<< "Input channels are not wrapping the input layer of the network.";
}

//定义批处理文件的读取函数
int EnumFiles(string strDir, vector<string>&vec)
{
	WIN32_FIND_DATA FindFileData;
	HANDLE hFind;

	string strDirTmp;
	string strFileName;
	string strsubstring;
	int iFileCnt = 0;

	strDirTmp = strDir + "\\*.*";

	hFind = FindFirstFile(strDirTmp.c_str(), &FindFileData);

	if (hFind == INVALID_HANDLE_VALUE)
	{
		return 0;
	}
	else
	{
		do
		{
			strFileName = string(FindFileData.cFileName);
			if (strFileName.length()>4)
			{
				strsubstring = strFileName.substr(strFileName.length() - 4, 4);
				string strTmp(".bmp");
				string strTmp2(".jpg");
				string strTmp3(".JPG");
				string strTmp4("jpeg");
				if (!strcmp(strsubstring.c_str(), strTmp.c_str()) || !strcmp(strsubstring.c_str(), strTmp2.c_str())
					|| !strcmp(strsubstring.c_str(), strTmp3.c_str()) || !strcmp(strsubstring.c_str(), strTmp4.c_str()))
				{
					vec.push_back(strDir + "\\" + strFileName);
					//vec.push_back(strFileName);
					iFileCnt++;
				}
				else
				{

				}
			}
		} while (FindNextFile(hFind, &FindFileData) != 0);
	}

	// 查找结束
	FindClose(hFind);
	//返回容器包含测试集的个数
	return iFileCnt;
}

//主函数
int main(int argc, char** argv) {
	/*if (argc != 6) {
	std::cerr << "Usage: " << argv[0]
	<< " deploy.prototxt network.caffemodel"
	<< " mean.binaryproto labels.txt img.jpg" << std::endl;
	return 1;
	}*/

	//caffe::GlobalInit(&argc, &argv);


	///****************效果最好**********************/
	//string model_file = "E:\\egg\\train-data\\data\\egg9\\train-val\\16-32-64-xavier\\deploy.prototxt";// argv[1];
	//string trained_file = "E:\\egg\\train-data\\data\\egg9\\train-val\\16-32-64-xavier\\测试集\\16-32-64_iter_200000.caffemodel";// argv[2];
	//string mean_file = "E:\\egg\\train-data\\data\\egg9\\ming_mean.binaryproto";//argv[3];
	//string label_file = "E:\\egg\\train-data\\1.txt";//argv[4];
	///**************************************/

	//string model_file = "E:\\egg\\deploy.prototxt";// argv[1];
	//string trained_file = "E:\\egg\\hui_iter_600.caffemodel";// argv[2];
	//string mean_file = "E:\\egg\\ming_mean.binaryproto";//argv[3];
	//string label_file = "E:\\egg\\1.txt";//argv[4];
	//string model_file = "E:\\egg\\train-new\\data\\egg\\train-val\\deploy.prototxt";// argv[1];
	//string trained_file = "E:\\egg\\train-new\\egg2_iter_500000.caffemodel";// argv[2];
	//string mean_file = "E:\\egg\\train-new\\data\\egg\\ming_mean.binaryproto";//argv[3];
	//string label_file = "E:\\egg\\train-data\\1.txt";//argv[4];

	///****************************train-org副本*********************************************/
	//string model_file = "D:\\XuYunyun\\XuNet\\mydeploy.prototxt";// argv[1];
	//string trained_file = "D:\\XuYunyun\\XuNet\\alextmodel_iter_8000.caffemodel";// argv[2];
	//string mean_file = "D:\\XuYunyun\\mymeanfile\\mean.binaryproto";//argv[3];
	//string label_file = "D:\\XuYunyun\\label.txt";//argv[4];
	/****************************train-org副本*********************************************/
	string model_file = "L:\\ZenglaiGao\\EggImages\\googlenet_dataset\\2nd-2019-1-10\\deploy.prototxt";// argv[1];
	string trained_file = "L:\\ZenglaiGao\\EggImages\\googlenet_dataset\\1st-2018-12-17\\model\\bvlc_googlenet_iter_104000.caffemodel";// argv[2];
	string mean_file = "L:\\ZenglaiGao\\EggImages\\googlenet_dataset\\2nd-2019-1-10\\ming_mean.binaryproto";//argv[3];
	string label_file = "L:\\ZenglaiGao\\EggImages\\googlenet_dataset\\2nd-2019-1-10\\1.txt";//argv[4];
	//string model_file = "E:\\caffe-windows\\data\\egg_center\\train-val\\deploy.prototxt";// argv[1];
	//string trained_file = "E:\\caffe-windows\\data\\egg_center\\egg_se_iter_5000.caffemodel";// argv[2];
	//string mean_file = "E:\\caffe-windows\\data\\egg_center\\mean.binaryproto";//argv[3];
	//string label_file = "E:\\caffe-windows\\data\\egg_center\\1.txt";//argv[4];
	/**************************************/

	/****************************train-org*********************************************/
	//string model_file = "E:\\egg\\train-org\\data\\egg\\train-val\\deploy.prototxt";// argv[1];
	//string trained_file = "E:\\egg\\train-org\\egg-xavier_iter_200000.caffemodel";// argv[2];
	//string mean_file = "E:\\egg\\train-org\\data\\egg\\ming_mean.binaryproto";//argv[3];
	//string label_file = "E:\\egg\\train-data\\1.txt";//argv[4];
	/**************************************/

	/****************************train-new*********************************************/
	//string model_file = "E:\\egg\\train-new\\data\\egg\\train-val\\deploy1.prototxt";// argv[1];
	//string trained_file = "E:\\egg\\train-new\\egg-xavier-xin_iter_10000.caffemodel";// argv[2];
	//string mean_file = "E:\\egg\\train-new\\data\\egg\\ming_mean.binaryproto";//argv[3];
	//string label_file = "E:\\egg\\train-data\\1.txt";//argv[4];
	/**************************************/
	
	Classifier classifier(model_file, trained_file, mean_file, label_file);

	//string file = "E:\\Liuhuasng\\模型文件\\pz\\1\\bad\\10150.jpg";//argv[5];

	//std::cout << "---------- Prediction for "
	//	<< file << " ----------" << std::endl;
	FILE *file_;
	/* file_ = fopen("E:\\egg\\train-new\\data\\egg\\train\\bad.txt", "wt");*/
	file_ = fopen("L:\\ZenglaiGao\\EggImages\\googlenet_dataset\\2nd-2019-1-10\\train\\1\\dirname.txt", "wt");
	vector<string> vec;
	/*EnumFiles(string("E:\\egg\\train-new\\data\\egg\\train\\bad"), vec);*/
	//EnumFiles(string("E:\\pz\\1\\bad"), vec);
	EnumFiles(string("L:\\ZenglaiGao\\EggImages\\googlenet_dataset\\2nd-2019-1-10\\train\\1"), vec);
	int isize = vec.size();
	for (int ifileCnt = 0; ifileCnt < vec.size(); ifileCnt++)
	{
		cv::Mat img = cv::imread(vec[ifileCnt].c_str(), -1);
		//Mat dst;
		//CHECK(!img.empty()) << "Unable to decode image " << file;
		std::vector<Prediction> predictions = classifier.Classify(img);

		/* Print the top N predictions. */
		for (size_t i = 0; i < predictions.size(); ++i) {
			Prediction p = predictions[i];
			//std::cout << std::fixed << std::setprecision(4) << p.second << " - \""
			//  << p.first << "\"" << std::endl;
		}
		Prediction p = predictions[0];
		std::cout << p.first << std::endl;
		if (p.first != "1")
		{
			int npos = vec[ifileCnt].find_last_of("\\");
			string s(vec[ifileCnt].substr(npos - 4));

			fprintf(file_, "%s  %f  %s\r\n", p.first.c_str(), p.second, s.c_str());
		}



	}
	fclose(file_);

	//FILE *file_1;
	///* file_ = fopen("E:\\egg\\train-new\\data\\egg\\train\\bad.txt", "wt");*/
	//file_1 = fopen("E:\\caffe-windows\\data\\egg_center_64\\val\\good.txt", "wt");
	//vector<string> vec1;
	//char buf1[200];
	////int flag1;
	///*EnumFiles(string("E:\\egg\\train-new\\data\\egg\\train\\bad"), vec);*/
	//EnumFiles(string("E:\\caffe-windows\\data\\egg_center_64\\val\\good"), vec1);
	//for (int ifileCnt = 0; ifileCnt < vec1.size(); ifileCnt++)
	//{
	//	cv::Mat img = cv::imread(vec1[ifileCnt].c_str(), -1);
	//	//Mat dst;
	//	//  CHECK(!img.empty()) << "Unable to decode image " << file;
	//	std::vector<Prediction> predictions = classifier.Classify(img);

	//	/* Print the top N predictions. */
	//	for (size_t i = 0; i < predictions.size(); ++i) {
	//		Prediction p = predictions[i];
	//		//std::cout << std::fixed << std::setprecision(4) << p.second << " - \""
	//		//  << p.first << "\"" << std::endl;
	//	}
	//	Prediction p = predictions[0];
	//	std::cout << p.first << "    " << p.second << std::endl;
	//	if (p.first != "good")
	//	{
	//		//flag1++;
	//		int npos = vec[ifileCnt].find_last_of("\\");
	//		string s(vec[ifileCnt].substr(npos - 4));
	//		sprintf(buf1, "E:\\caffe-windows\\data\\egg_center_64\\val\\error\\%s_%f_%d.bmp", p.first.c_str(), p.second, flag1);
	//		imwrite(buf1, img);
	//		fprintf(file_1, "%s  %f  %s\r\n", p.first.c_str(), p.second, s.c_str());
	//	}


	//}
	//fclose(file_1);
}
#else
int main(int argc, char** argv) {
	LOG(FATAL) << "This example requires OpenCV; compile with USE_OPENCV.";
}
#endif  // USE_OPENCV

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值