目录
4、定义一个函数,用于对四个点进行排序,以获得正确的矩形顶点顺序。
12、对筛选出的文字轮廓进行排序和分组,然后识别文字并计算准确率。
14、 显示原始图像和变换后的图像,并等待用户按键关闭窗口。
一、如何实现识别答题卡?
对图像进行处理以识别其中的文字,并计算识别准确率的程序。包含了一系列图像处理操作,如灰度化、模糊、边缘检测、轮廓查找等,以及对轮廓进行排序和筛选、透视变换、二值化和阈值处理等步骤。最终,代码识别出图像中的文字,并与正确答案进行比较,计算识别准确率。整个代码的目的是实现图像中文字的自动识别和准确率的评估,适用于文档扫描和识别等应用场景。
二、代码实现:
1、数据:
"test.png"
2、导入库
import cv2
import numpy as np
3、定义一个字典,存储正确答案的索引。
ANSWER_KEY={0:1,1:4,2:0,3:3,4:1}
4、定义一个函数,用于对四个点进行排序,以获得正确的矩形顶点顺序。
def order_points(pts): #找出4个坐标位置
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]#左上
rect[2] = pts[np.argmax(s)]#右下
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]#右上
rect[3] = pts[np.argmax(diff)]#左下
return rect
5、定义一个函数,用于进行四点透视变换,以矫正图像。
def four_point_transform(image, pts): # 获取输入坐标点,并做透视变换
rect = order_points(pts) #找出4个坐标位置
(tl, tr, br, bl) = rect
# 计算输入的w和h值
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# 变换后对应坐标位置
dst = np.array([[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]],
dtype = "float32")
# 计算变换矩阵
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped # 返回变换后结果
6、定义一个函数,用于对轮廓进行排序。
def sort_contours(cnts, method="left-to-right"): #对轮廓进行排序
reverse = False
i = 0
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
return cnts, boundingBoxes
7、定义一个函数,用于显示图像。
def cv_show(name,img):
cv2.imshow(name, img)
cv2.waitKey(0)
8、转化并处理图像,寻找轮廓
image = cv2.imread('test2.png')
#读取图像文件'test2.png'。
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#将图像转换为灰度图像。
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#对灰度图像进行高斯模糊。
edged = cv2.Canny(blurred, 50, 100)
#应用Canny边缘检测算法。
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#查找轮廓。
cv2.drawContours(contours_img,cnts,-1,(0,0,255),3)
cv_show('contours_img',contours_img)
docCnt = None
9、对轮廓按面积进行排序,筛选出面积最大的轮廓。
# 根据轮廓大小进行排序,准备透视变换
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
for c in cnts: # 遍历每一个轮廓
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)#轮廓近似
if len(approx) == 4:
docCnt = approx
break
10、透视变换并处理
warped_t = four_point_transform(image,docCnt.reshape(4,2))
#对图像进行四点透视变换。
cv_show('warped',warped_t)
warped = cv2.cvtColor(warped_t,cv2.COLOR_BGR2GRAY)
将变换后的图像转换为灰度图像
thresh = cv2.threshold(warped,0,255,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
#进行二值化处理。
cv_show('thresh',thresh)
11、对二值化图像进行轮廓查找和筛选,找出可能的文字轮廓。
# 找到每一个圆圈轮廓
cnts = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[1]
warped_Contours = cv2.drawContours(warped_t,cnts,-1,(0,255,0),1)
cv_show('warped_Contours',warped_Contours)
questionCnts = []
for c in cnts:# 遍历轮廓并计算比例和大小
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
# 根据实际情况指定标准
if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
questionCnts.append(c)
12、对筛选出的文字轮廓进行排序和分组,然后识别文字并计算准确率。
# 每排有5个选项
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
cnts = sort_contours(questionCnts[i:i + 5])[0] # 排序
bubbled = None
# 遍历每一个结果
for (j, c) in enumerate(cnts):
# 使用mask来判断结果
mask = np.zeros(thresh.shape, dtype="uint8")
cv2.drawContours(mask, [c], -1, 255, -1) #-1表示填充
cv_show('mask',mask)
# 通过计算非零点数量来算是否选择这个答案
# 利用掩膜(mask)进行“与”操作,只保留mask位置中的内容
thresh_mask_and = cv2.bitwise_and(thresh, thresh, mask=mask)
cv_show('thresh_mask_and', thresh_mask_and)
total = cv2.countNonZero(thresh_mask_and)#统计灰度值不为0的像素数
if bubbled is None or total > bubbled[0]:# 通过阈值判断,保存灰度值最大的序号
bubbled = (total, j)
# 对比正确答案
color = (0, 0, 255)
k = ANSWER_KEY[q]
if k == bubbled[1]: # 判断正确
color = (0, 255, 0)
correct += 1
cv2.drawContours(warped, [cnts[k]], -1, color, 3) # 绘图
cv_show('warpeding', warped)
14、 显示原始图像和变换后的图像,并等待用户按键关闭窗口。
score = (correct / 5.0) * 100
print("[INFO] score: {:.2f}%".format(score))
cv2.putText(warped, "{:.2f}%".format(score), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.imshow("Original", image)
cv2.imshow("Exam", warped)
cv2.waitKey(0)
15、代码汇总
import cv2
import numpy as np
ANSWER_KEY={0:1,1:4,2:0,3:3,4:1}
def order_points(pts):
rect = np.zeros((4, 2), dtype='float32')
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_point_transform(image,pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def sort_contours(cnts,method='left-to-right'):
reverse = False
i = 0
if method == 'right-to-left' or method == 'bottom-to-top':
reverse = True
if method == 'top-to-bottom' or method == 'bottom-to-top':
i = 1
boundingBoxs = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxs) = zip(*sorted(zip(cnts, boundingBoxs), key=lambda x: x[1][i], reverse=reverse))
return cnts, boundingBoxs
def cv_show(name,img):
cv2.imshow(name, img)
cv2.waitKey(0)
image = cv2.imread(r'test2.png')
contours_img = image.copy()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray,(5,5),0)
cv_show('blurred',blurred)
edged=cv2.Canny(blurred,75,200)
cv_show('edged',edged)
cnts = cv2.findContours(edged.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[1]
cv2.drawContours(contours_img,cnts,-1,(0,0,255),3)
cv_show('contours_img',contours_img)
docCnt = None
cnts = sorted(cnts,key=cv2.contourArea,reverse=True)
for c in cnts:
peri = cv2.arcLength(c,True)
approx = cv2.approxPolyDP( c,0.02*peri ,True)
if len(approx) == 4:
docCnt = approx
break
warped_t = four_point_transform(image,docCnt.reshape(4,2))
cv_show('warped',warped_t)
warped = cv2.cvtColor(warped_t,cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(warped,0,255,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cv_show('thresh',thresh)
thresh_Contours=thresh.copy()
cnts = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[1]
warped_Contours = cv2.drawContours(warped,cnts,-1,(0,0,255),3)
cv_show('warped_Contours',warped_Contours)
questionCnts = []
for c in cnts:
(x,y,w,h) = cv2.boundingRect(c)
ar = w / float(h)
if w>= 20 and h>=20 and ar >= 0.9 and ar <= 1.1:
questionCnts.append(c)
questionCnts = sort_contours(questionCnts,method="top-to-bottom")[0]
correct = 0
for (q,i) in enumerate(np.arange(0,len(questionCnts),5)):
cnts = sort_contours(questionCnts[i:i + 5])[0]
bubbled = None
for (j,c) in enumerate(cnts):
mask = np.zeros(thresh.shape, dtype='uint8')
cv2.drawContours(mask, [c], -1, 255, -1)
cv_show('mask', mask)
thresh_mask_and = cv2.bitwise_and(thresh, thresh, mask=mask)
cv_show('thresh_mask_and', thresh_mask_and)
total = cv2.countNonZero(thresh_mask_and)
if bubbled is None or total > bubbled[0]:
bubbled = (total, j)
color = (0, 0, 255)
k = ANSWER_KEY[q]
if k == bubbled[1]:
color = (0, 255, 0)
correct += 1
cv2.drawContours(warped, [cnts[k]], -1, color, 3)
score = (correct / 5.0) * 100
print("[INFO] score: {:.2f}%".format(score))
cv2.putText(warped, "{:.2f}%".format(score), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.imshow("Original", image)
cv2.imshow("Exam", warped)
cv2.waitKey(0)