Template Matching

本文介绍了一个使用OpenCV实现的模板匹配与图像识别系统。该系统通过读取多个参考图像并将其与摄像头捕获的实时图像进行比较,以识别特定的箭头指示和其他符号。系统首先对图像进行预处理,包括灰度转换、边缘检测和轮廓查找等步骤,然后通过透视变换将检测到的区域校正为矩形,并与参考图像进行比较。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >


The template matching code is as follows.

#include <iostream>
#include <string>
#include <vector>
#include <stdio.h>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
#include "opencv2/video/tracking.hpp"

using namespace cv;
using namespace std;



string intToString(int number) {

	std::stringstream ss;
	ss << number;
	return ss.str();
}

class Symbol {

public:
	Mat img;
	string name;

};



void sortCorners(std::vector<cv::Point2f>& corners, cv::Point2f center) {
	std::vector<cv::Point2f> top, bot;

	for (unsigned int i = 0; i < corners.size(); i++) {
		if (corners[i].y < center.y)
			top.push_back(corners[i]);
		else
			bot.push_back(corners[i]);
	}

	cv::Point2f tl = top[0].x > top[1].x ? top[1] : top[0];
	cv::Point2f tr = top[0].x > top[1].x ? top[0] : top[1];
	cv::Point2f bl = bot[0].x > bot[1].x ? bot[1] : bot[0];
	cv::Point2f br = bot[0].x > bot[1].x ? bot[0] : bot[1];

	corners.clear();
	corners.push_back(tl);
	corners.push_back(tr);
	corners.push_back(br);
	corners.push_back(bl);
}

int readRefImages(Symbol *symbols) {

	symbols[0].img = imread("arrowL.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	if (!symbols[0].img.data)
		return -1;
	threshold(symbols[0].img, symbols[0].img, 100, 255, 0);
	symbols[0].name = "Left 90";

	symbols[1].img = imread("arrowR.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	if (!symbols[1].img.data)
		return -1;
	threshold(symbols[1].img, symbols[1].img, 100, 255, 0);
	symbols[1].name = "Right 90";

	symbols[2].img = imread("arrowT.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	if (!symbols[2].img.data)
		return -1;
	threshold(symbols[2].img, symbols[2].img, 100, 255, 0);
	symbols[2].name = "Turn Around";

	symbols[3].img = imread("arrowB.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	if (!symbols[3].img.data)
		return -1;
	threshold(symbols[3].img, symbols[3].img, 100, 255, 0);
	symbols[3].name = "Ball";

	symbols[4].img = imread("arrowL45.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	if (!symbols[4].img.data)
		return -1;
	threshold(symbols[4].img, symbols[4].img, 100, 255, 0);
	symbols[4].name = "Left 45";

	symbols[5].img = imread("arrowR45.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	if (!symbols[5].img.data)
		return -1;
	threshold(symbols[5].img, symbols[5].img, 100, 255, 0);
	symbols[5].name = "Right 45";

	symbols[6].img = imread("arrowStop.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	if (!symbols[6].img.data)
		return -1;
	threshold(symbols[6].img, symbols[6].img, 100, 255, 0);
	symbols[6].name = "Stop";

	symbols[7].img = imread("arrowGo.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	if (!symbols[7].img.data)
		return -1;
	threshold(symbols[7].img, symbols[7].img, 100, 255, 0);
	symbols[7].name = "Go";

	return 0;

}



int lowThreshold;

void CannyThreshold(int, void*) {
}

int main(int argc, char** argv) {
	// check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
	// for OpenCV general detection/matching framework details

	VideoCapture cap;

	cap.open(0);

	namedWindow("A", CV_WINDOW_AUTOSIZE);
	namedWindow("B", CV_WINDOW_AUTOSIZE);
	namedWindow("C", CV_WINDOW_AUTOSIZE);

	Mat camera;
	Mat greyImg;

	Symbol symbols[10];
	if (readRefImages(symbols) == -1) {
		printf("Error reading reference symbols\n");
		return -1;
	}

	createTrackbar("Min Threshold:", "A", &lowThreshold, 100, CannyThreshold);

	double t = (double) getTickCount();
	// do something ...
	//t = ((double)getTickCount() - t)/getTickFrequency();
	//cout << "Times passed in seconds: " << t << endl;

	cap >> camera;

	Mat new_image;

	while (1) {

		t = (double) getTickCount() - t;

		cap >> camera;

		cvtColor(camera, greyImg, CV_RGB2GRAY);

		//threshold(greyImg, greyImg, 100, 255, 0);

		Mat canny_output;
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;

		GaussianBlur(greyImg, greyImg, Size(9, 9), 2, 2);

		/// Detect edges using canny
		Canny(greyImg, canny_output, lowThreshold, lowThreshold * 3, 3);

		//	imshow("B",canny_output);
		/// Find contours
		findContours(canny_output, contours, hierarchy, CV_RETR_TREE,
				CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

		vector<Point> approxRect;

		for (size_t i = 0; i < contours.size(); i++) {
			approxPolyDP(contours[i], approxRect,
					arcLength(Mat(contours[i]), true) * 0.05, true);
			if (approxRect.size() == 4) {
				float area = contourArea(contours[i]);

				if (area > 10000) {
					std::vector<cv::Point2f> corners;

					vector<Point>::iterator vertex;
					vertex = approxRect.begin();
					//vertex++;
					circle(camera, *vertex, 2, Scalar(0, 0, 255), -1, 8, 0);
					corners.push_back(*vertex);
					vertex++;
					circle(camera, *vertex, 2, Scalar(0, 0, 255), -1, 8, 0);
					corners.push_back(*vertex);
					vertex++;
					circle(camera, *vertex, 2, Scalar(0, 0, 255), -1, 8, 0);
					corners.push_back(*vertex);
					vertex++;
					circle(camera, *vertex, 2, Scalar(0, 0, 255), -1, 8, 0);
					corners.push_back(*vertex);

					Moments mu;
					mu = moments(contours[i], false);
					Point2f center(mu.m10 / mu.m00, mu.m01 / mu.m00);

					sortCorners(corners, center);

					// Define the destination image
					Mat correctedImg = ::Mat::zeros(195, 271, CV_8UC3);

					// Corners of the destination image
					std::vector<cv::Point2f> quad_pts;
					quad_pts.push_back(Point2f(0, 0));
					quad_pts.push_back(Point2f(correctedImg.cols, 0));
					quad_pts.push_back(
							Point2f(correctedImg.cols, correctedImg.rows));
					quad_pts.push_back(Point2f(0, correctedImg.rows));

					// Get transformation matrix
					Mat transmtx = getPerspectiveTransform(corners, quad_pts);

					// Apply perspective transformation
					warpPerspective(camera, correctedImg, transmtx,
							correctedImg.size());

					Mat correctedImgBin;

					cvtColor(correctedImg, correctedImgBin, CV_RGB2GRAY);

					//equalizeHist(correctedImgBin, correctedImgBin);

					correctedImgBin.copyTo(new_image);

					threshold(correctedImgBin, correctedImgBin, 140, 255, 0);

					imshow("B", correctedImgBin);

					double minVal,maxVal,medVal;


					minMaxLoc(new_image, &minVal, &maxVal);

					medVal=(maxVal-minVal)/2;

					threshold(new_image, new_image, medVal, 255, 0);

					imshow("C", new_image);


					Mat diffImg;

					int match, minDiff, diff;
					minDiff = 12000;
					match = -1;

					for (int i = 0; i < 8; i++) {
						//diffImg = symbols[i].img-correctedImgBin;

						bitwise_xor(new_image, symbols[i].img, diffImg,
								noArray());

						diff = countNonZero(diffImg);
						if (diff < minDiff) {
							minDiff = diff;
							match = i;
						}

						if (i == 0) {
							//	imshow("B",diffImg);

						}
					}

					//imshow("B", correctedImg);

					if (match != -1) {
						putText(camera, symbols[match].name, Point(320, 30), 1,
								2, Scalar(0, 255, 0), 2);
					}

					//break;

				}

			}
		}

		imshow("A", camera);
		waitKey(20);
	}
}


### 模板匹配算法及其在计算机视觉中的应用 #### 定义与基本概念 模板匹配是一种用于检测图像中特定模式的技术,在给定一幅大图(目标图像)的情况下,通过比较其中的小区域与预定义的模板来寻找最相似的部分。该过程通常涉及计算两个窗口之间的差异度量,如均方误差或相关系数。 #### 算法原理 为了提高旋转和平移情况下的鲁棒性,可以通过创建针对不同角度和尺度变化的多个模板模型来进行增强[^1]。具体来说: - **平移不变性**:对于简单的位移变换,可以直接滑动窗口遍历整张图片并逐像素对比; - **缩放处理**:当存在大小改变时,则需预先准备一系列按比例调整后的样本集; - **旋转变形适应**:考虑到物体可能发生的倾斜状况,同样要构建多方位版本作为参照物。 ```python import cv2 import numpy as np def match_template(image_path, template_path): img = cv2.imread(image_path, 0) temp = cv2.imread(template_path, 0) result = cv2.matchTemplate(img, temp, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) top_left = max_loc h, w = temp.shape[:2] bottom_right = (top_left[0] + w, top_left[1] + h) return { 'position': {'start': top_left, 'end': bottom_right}, 'confidence': max_val, 'result_image': result } ``` 此段Python代码展示了如何利用OpenCV库执行基础形式的二维卷积操作完成特征定位任务。 #### 应用场景 随着人工智能技术的发展,基于深度学习框架下的神经网络逐渐成为主流解决方案之一。然而传统意义上的模板匹配依旧广泛应用于如下领域内: - 工业自动化生产线上的质量检验环节; - 文档扫描仪自动裁剪边界功能实现; - 生物医学成像分析软件辅助医生诊断病情; - 增强现实设备快速锁定真实世界坐标系位置等。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值