先来公式比较,再来效果比较
#下面是测试代码
import cv2
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from matplotlib import pylab as pl
%matplotlib inline
from skimage import io, data, color, exposure, feature, util, measure
import skimage as sk
URL="https://previews.123rf.com/images/lenetsnikolai/lenetsnikolai1703/lenetsnikolai170300514/75202133-surgical-instruments-and-tools-including-scalpels-forceps-and-tweezers-arranged-on-a-table-for-a-sur.jpg"
raw_image=sk.io.imread(URL)
print (raw_image.shape)
image=sk.transform.resize(raw_image, (433,650,3), mode='reflect').astype(np.float32) #重要,一定要转换成np.float32,否则cv2不识别
r,c,w,h=130,300,125, 215
tmpl= get_box_region(image, c,r,w,h).astype(np.float32) #重要,一定要转换成np.float32,否则cv2不识别
# tmpl= image[r:r+h, c:c+w]
# cv2.rectangle(img, (x,y), (x+w,y+h), (B,G,R), Thickness)
draw=cv2.rectangle(image.copy(),(c,r), (c+w, r+h),(1,0,0),10)
deleted= image.copy()
deleted[r:r+h, c:c+w]=0
display_1_row([tmpl, image, draw,deleted],
['tmpl','image', 'draw','deleted'])
上面找了一个钳子做魔板,并从原图中挖去这个模板区域
下面是用opencv的官方文档上的代码做效果比较
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
for meth in methods:
img = deleted.copy()
method = eval(meth)
# Apply template Matching
res = cv2.matchTemplate(img,tmpl,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, (1,0,0),5)
display_1_row([res, img],[meth,'image'])
如果不挖去,那就和官网上的效果类似,只是CCORR的效果不行:
结论: 挖坑需谨慎。