总体步骤:
- 图片预处理,
- 透视变换, 视角拉正.
- tesseract进行识别.
import numpy as np
import cv2
图片预处理
# 读取图片
image = cv2.imread('./images/page.jpg')
# 计算比例. 限定高度500
ratio = image.shape[0] / 500.0
# 保存原始大小的图片
orig = image.copy()
# 修改resize,使其按比例缩放
# 封装resize功能 -- width,height只传入其一
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# 保存修改的宽高
dim = None
# 获取图片的宽高
(h, w) = image.shape[:2]
# 宽高均未指定,返回原图像
if width is None and height is None:
return image
# 指定了height
if width is None:
r = height / float(h) # 计算比例
dim = (int(w * r), height)
# 指定了width
else:
r = width / float(w)
dim = (width, int(h * r))
# 按比例缩放,并返回
resized = cv2.resize(image, dim, interpolation=inter)
return resized
# 对图片进行resize,指定height为500
image = resize(orig, height=500)
image.shape
(500, 375, 3)
# 显示窗口
def cv_show(name, image):
cv2.imshow(name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.waitKey(1)
# 图片预处理
# 灰度化处理
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 高斯平滑 -- 去除噪声
gray = cv2.GaussianBlur(gray, (5, 5), 0)
# 边缘检测
edged = cv2.Canny(gray, 75, 200)
cv_show('edged', edged)
# 轮廓检测并返回
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0]
# 轮廓,按照面积排序,降序排序
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
cnts
# 画出轮廓
image_contours = cv2.drawContours(image.copy(), cnts, -1, (0, 0, 255), 2)
cv_show('image_contours', image_contours)
# 遍历轮廓找出最大的轮廓.
for c in cnts:
# 计算轮廓周长
perimeter = cv2.arcLength(c, True)
# 多边形逼近.
approx = cv2.approxPolyDP(c, 0.02 * perimeter, True)
if len(approx) == 4:
screen_cnt = approx # screen_cnt为矩形轮廓
break
image_contours = cv2.drawContours(image.copy(), [screen_cnt], -1, (0, 0, 255), 2)
cv_show('image_contours', image_contours)
透视变换
# 对角点坐标进行排序,顺时针排序
def order_points(pts):
# 创建全是0的矩阵, 来接收等下找出来的4个角的坐标.
rect = np.zeros((4, 2), dtype='float32')
# s存储各角点的x, y坐标之和
s = pts.sum(axis=1)
# 左上的坐标一定是x,y加起来最小的坐标. 右下的坐标一定是x,y加起来最大的坐标.
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# diff存储各角点的x, y坐标之差
diff = np.diff(pts, axis=1)
# 右上角的x,y相减的差值一定是最小的.
# 左下角的x,y相减的差值, 一定是最大.
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
# 把透视变换功能封装成一个函数
def four_point_transform(image, pts):
# 对输入的4个坐标排序
rect = order_points(pts)
# 返回排序后的结果
(tl, tr, br, bl) = rect
# 空间中两点的距离
widthA = np.sqrt((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)
widthB = np.sqrt((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)
max_width = max(int(widthA), int(widthB))
heightA = np.sqrt((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)
heightB = np.sqrt((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)
max_height = max(int(heightA), int(heightB))
# 构造变换之后的对应坐标位置.
dst = np.array([
[0, 0],
[max_width - 1, 0],
[max_width - 1, max_height - 1],
[0, max_height - 1]], dtype='float32')
# 计算变换矩阵
M = cv2.getPerspectiveTransform(rect, dst)
# 透视变换,传入变换之后的图片大小
warped = cv2.warpPerspective(image, M, (max_width, max_height))
return warped
# 对原始大小的图片,进行透视变换
warped = four_point_transform(orig, screen_cnt.reshape(4, 2) * ratio)
warped.shape
(2624, 1962, 3)
cv_show('warped', warped)
# 灰度化处理
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
# 二值化处理
ref = cv2.threshold(warped, 100, 255, cv2.THRESH_BINARY)[1]
cv_show('ref', ref)
# 把处理好的图片写入图片文件.
cv2.imwrite('./scan.jpg', ref)
True
tesseract进行识别
# 导包
import pytesseract
from PIL import Image
# pytesseract要求的image:不是opencv读进来的image, 而是pillow这个包, 即PIL读进来的image
text = pytesseract.image_to_string(Image.open('./scan.jpg'))
print(text)