任务目标:实现实时的全景环视图像拼接的视频输出
实现思路:使用固定的四个摄像头获取四个面的图像,由于摄像机固定所以拼接参数固定。所以使用标定参数来进行后续图像的实时拼接显示来满足目标的实时性。
代码可以优化的地方:没有增加图像亮度预处理的功能,对于亮度不均匀的场景会出现明显的明暗不均匀
效果预览:
待拼接图片
拼接效果:
懒得整理了就这样吧,代码找了老久,不保证这个是我当时成功运行的代码
import cv2
import time
import os
import numpy as np
import cv2
import numpy as np
# 图片展示函数
def cv_show(img, name):
cv2.imshow(img, name)
cv2.waitKey(0)
cv2.destroyAllWindows()
return
# 图片变换 曲面函数
def bianhuan(img):
'''
:param img: 输入图片
:return: 变成六边形图片,四个角点垂直向中间移动
'''
height = int(img.shape[0])
width = int(img.shape[1])
zuo_img = img[:, :width // 2 + 1]
# cv_show('',zuo_img)
you_img = img[:, width // 2:]
# cv_show('',you_img)
# 左半图透视变换
# a = [[0, 0], [320, 0], [320, 480], [0, 480]]
# b = [[0, 145], [320, 0], [320, 480], [0, 345]]
a = [[0, 0], [320, 0], [320, 480], [0, 480]]
b = [[0, 100], [320, 0], [320, 480], [0, 380]]
src_pts = np.float32(a)
tge_pts = np.float32(b)
M = cv2.getPerspectiveTransform(src_pts, tge_pts)
warped_zuo = cv2.warpPerspective(zuo_img, M, (zuo_img.shape[1], zuo_img.shape[0]))
# 右半图透视变换
a = [[0, 0], [320, 0], [320, 480], [0, 480]]
b = [[0, 0], [320, 100], [320, 380], [0, 480]]
src_pts = np.float32(a)
tge_pts = np.float32(b)
M = cv2.getPerspectiveTransform(src_pts, tge_pts)
warped_you = cv2.warpPerspective(you_img, M, (you_img.shape[1], you_img.shape[0]))
res = cv2.hconcat([warped_zuo, warped_you])
return res
def bianhuan1(img):
'''
:param img: 输入图片
:return: 变成六边形图片,四个角点垂直向中间移动
'''
height = int(img.shape[0])
width = int(img.shape[1])
zuo_img = img[:, :width // 2 + 1]
# cv_show('',zuo_img)
you_img = img[:, width // 2:]
# cv_show('',you_img)
# 左半图透视变换
# a = [[0, 0], [320, 0], [320, 480], [0, 480]]
# b = [[0, 145], [320, 0], [320, 480], [0, 345]]
a = [[0, 0], [320, 0], [320, 480], [0, 480]]
b = [[0, 145], [320, 0], [320, 480], [0, 345]]
src_pts = np.float32(a)
tge_pts = np.float32(b)
M = cv2.getPerspectiveTransform(src_pts, tge_pts)
warped_zuo = cv2.warpPerspective(zuo_img, M, (zuo_img.shape[1], zuo_img.shape[0]))
# 右半图透视变换
a = [[0, 0], [320, 0], [320, 480], [0, 480]]
b = [[0, 0], [320, 100], [320, 380], [0, 480]]
src_pts = np.float32(a)
tge_pts = np.float32(b)
M = cv2.getPerspectiveTransform(src_pts, tge_pts)
warped_you = cv2.warpPerspective(you_img, M, (you_img.shape[1], you_img.shape[0]))
res = cv2.hconcat([warped_zuo, warped_you])
return res
def bianhuan4(img):
'''
:param img: 输入图片
:return: 变成六边形图片,四个角点垂直向中间移动
'''
height = int(img.shape[0])
width = int(img.shape[1])
zuo_img = img[:, :width // 2 + 1]
# cv_show('',zuo_img)
you_img = img[:, width // 2:]
# cv_show('',you_img)
# 左半图透视变换
# a = [[0, 0], [320, 0], [320, 480], [0, 480]]
# b = [[0, 145], [320, 0], [320, 480], [0, 345]]
a = [[0, 0], [320, 0], [320, 480], [0, 480]]
b = [[0, 100], [320, 0], [320, 480], [0, 380]]
src_pts = np.float32(a)
tge_pts = np.float32(b)
M = cv2.getPerspectiveTransform(src_pts, tge_pts)
warped_zuo = cv2.warpPerspective(zuo_img, M, (zuo_img.shape[1], zuo_img.shape[0]))
# 右半图透视变换
a = [[0, 0], [320, 0], [320, 480], [0, 480]]
b = [[0, 0], [320, 140], [320, 340], [0, 480]]
src_pts = np.float32(a)
tge_pts = np.float32(b)
M = cv2.getPerspectiveTransform(src_pts, tge_pts)
warped_you = cv2.warpPerspective(you_img, M, (you_img.shape[1], you_img.shape[0]))
res = cv2.hconcat([warped_zuo, warped_you])
return res
# 基于特征检测的变换M矩阵 1是可以进行特征拼接,0是直接进行拼接
def sift_M(img1, img2):
sift = cv2.SIFT_create()
a=True
# 对裁剪后的图像进行角点检测及计算描述子
kp1, describe1 = sift.detectAndCompute(img1, None)
kp2, describe2 = sift.detectAndCompute(img2, None)
# 使用OpenCV中的FLANN匹配算法进行特征匹配,并返回最近邻和次近邻匹配的结果
FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)
flann = cv2.FlannBasedMatcher(indexParams, searchParams)
matches = flann.knnMatch(describe1, describe2, k=2)
# 储存特征匹配最好的优质匹配点对
'''基于距离阈值选择优质匹配点对,如果最近邻m的距离小于0.65倍的次近邻n的距离,
则认为这个匹配点对是优质的,将它存储在good列表中。'''
good = []
for m, n in matches:
if m.distance < 0.65 * n.distance:
good.append(m)
pic3 = cv2.drawMatches(img1=img1, keypoints1=kp1, img2=img2, keypoints2=kp2, matches1to2=good, outImg=None)
# cv_show('m11', pic3)
# 可视化特征匹配结果,并保存
'''4基于特征点的图片变换得到中间拼接图像'''
# RANSAC算法计算单应性矩阵
if len(good) > 5:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
tge_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
# src_pts = np.float32(a)
# tge_pts = np.float32(b)
M, mask = cv2.findHomography(src_pts, tge_pts, cv2.RANSAC, 5)
# 源图像img2图像扭曲(透视变换)
# cv_show('img2_crop', img2_crop)
else:
print('特征匹配点不足,进行直接拼接')
a=False
M=0
return M,a
# 获得sift变换图片的6个顶点坐标 group1上面三个点 group2下面三个点
def six_dian(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, binary_img = cv2.threshold(img_gray, 1, 255, cv2.THRESH_BINARY)
binary_img = cv2.medianBlur(binary_img, 5)
# cv_show('', binary_img)
contours, _ = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour_image = img.copy()
cv2.drawContours(contour_image, contours, -1, (0, 255, 0), 2)
# cv_show('contour_image', contour_image)
contour_points = [cv2.approxPolyDP(cnt, epsilon=0.01 * cv2.arcLength(cnt, True), closed=True) for cnt in contours]
approx_contour_image = img.copy()
for approx_contour in contour_points:
cv2.drawContours(approx_contour_image, [approx_contour], -1, (0, 255, 0), 2)
group1 = []
group2 = []
for point in contour_points[0]:
x, y = point[0]
# print(x, y)
if y < 240:
group1.append(point)
else:
group2.append(point)
# 在各自组内按宽度(横坐标)从小到大排序
group1.sort(key=lambda p: p[0][0])
group2.sort(key=lambda p: p[0][0])
return group1,group2
def sigmoid(x):
"""
计算Sigmoid函数的值。
参数:
x (float或numpy.ndarray): 输入值或值的数组。
返回:
float或numpy.ndarray: Sigmoid函数的输出值。
"""
return 1 / (1 + np.exp(-x+5))
#融合
def ronghe(img_crop_right,img_b_left_half):
'''
对部分重合部分(-40:)进行加权平均融合
:param img_crop_right: 原始图片右侧 拼接图片的左侧
:param img_b_left_half: 原始图片左侧变换后的 左侧 变换图片的左侧
:return: img_crop_right的融合之后的部分
'''
for i in range(49,0,-1):
alpha = sigmoid(i/5)
betla = 1-alpha
img_crop_right_1=img_crop_right.copy()
img3_crop = img_crop_right_1[:, -i:]
wraped_half_crop = img_b_left_half[:, -i:]
merged_img = cv2.addWeighted(img3_crop, alpha, wraped_half_crop, betla, 0)
# cv_show('merged_img', merged_img)
img_crop_right = img_crop_right[:, :-i]
# cv_show('img_crop_right', img_crop_right)
img_crop_right = cv2.hconcat([img_crop_right, merged_img])
return img_crop_right
print(cv2.__version__)
'''1图片导入并变换分割'''
img1 = cv2.imread('qian2.jpg')
img2 = cv2.imread('you2.jpg')
img3=cv2.imread('hou1.jpg')
img4= cv2.imread('zuo2.jpg')
img1 = bianhuan1(img1)
# cv_show('img2', img2)
img2 = bianhuan(img2)
img3=bianhuan(img3)
# cv_show('img4', img4)
img4=bianhuan4(img4)
# cv_show('img4', img4)
img4_crop_right=img4[:,321:]
img1_crop_left=img1[:,:320]
img1_crop_right=img1[:,321:]
img2_crop_left=img2[:,:320]
img2_crop_right=img2[:,321:]
img3_crop_left=img3[:,:320]
img3_crop_right=img3[:,321:]
img4_crop_left=img4[:,:320]
'''2 34拼接'''
black1=np.zeros((480, 320, 3), dtype=np.uint8)
M34,a = sift_M(img3_crop_right,img4_crop_left) #计算4->3的变换矩阵
#np.linalg.inv(M41)
warpimg = cv2.warpPerspective(img4_crop_left,np.linalg.inv(M34),(img4_crop_right.shape[1]*2, img4_crop_left.shape[0])) # 4->3
#cv_show('warpimg',warpimg)
img_b_left_half = warpimg[:, :320]
img_b_right_half = warpimg[:, 321:]
#得到融合图片(拼接图片的左侧)
img3_crop_right=ronghe(img3_crop_right,img_b_left_half) #将3与 变换后的4(wrapimg)的部分进行融合拼接
merged_img34 = cv2.hconcat([img3_crop_right, img_b_right_half]) #将融合后的图像与wrapimg未拼接的部分 进行拼接
#cv_show('merged_img34',merged_img34)
img4_crop_left=cv2.hconcat([black1,img4_crop_left]) #merged_img34由于右侧变换,这里要对其进行矫正做准备 得到宽高相同要校正的结果参考图
M34_2,a = sift_M(merged_img34, img4_crop_left) #计算merged_img34校正矩阵
warpimg34 = cv2.warpPerspective(merged_img34, M34_2,
(merged_img34.shape[1]+ img1_crop_left.shape[1], img4_crop_left.shape[0])) #校正merged_img34
# cv_show('warpimg34',warpimg34)
warped_houzuo=warpimg34[:,:-321] #裁剪掉变换后的黑色部分
# cv_show('warped_houzuo',warped_houzuo)
'''3 拼接左前 '''
M41,a = sift_M(img4_crop_right, img1_crop_left) #计算4->1的变换矩阵
warpimg = cv2.warpPerspective(img1_crop_left, np.linalg.inv(M41),
(img4_crop_right.shape[1] + img1_crop_left.shape[1], img4_crop_left.shape[0]))
#cv_show('warpimg',warpimg)
img_b_left_half = warpimg[:, :320]
img_b_right_half = warpimg[:, 321:]
img4_crop_right=ronghe(img4_crop_right,img_b_left_half)
# merged_img41 = cv2.addWeighted(img4_crop_roi, 1, img_b_left_half, 1,0)
warped_zuoqian = cv2.hconcat([img4_crop_right, img_b_right_half])
#cv_show('merged_img41',merged_img41)
img_houzuoqian=cv2.hconcat([warped_houzuo,warped_zuoqian])
# cv_show('img_houzuoqian',img_houzuoqian)
print(img_houzuoqian.shape)
'''4 将全景左半图与前(左半图)校准对其'''
black2=np.zeros((480, 957, 3), dtype=np.uint8)
# img4_crop_right=img4_crop_right[:,:300]
img1_crop_left=cv2.hconcat([black2,img1_crop_left])
# cv_show('img1_crop_left',img1_crop_left)
M341,a = sift_M(img_houzuoqian, img1_crop_left)
warpimg = cv2.warpPerspective(img_houzuoqian, M341,
(img_houzuoqian.shape[1], img4_crop_left.shape[0]))
# warped_zuo=warpimg[:,500:-30]
warped_zuo=warpimg[:,:-1]
# cv_show('warped_zuo',warped_zuo)
print('左半部分尺寸',warped_zuo.shape)
'''youhou'''
M23 ,a= sift_M(img2_crop_right, img3_crop_left)
warpimg = cv2.warpPerspective(img3_crop_left, np.linalg.inv(M23),
(img3_crop_right.shape[1] + img3_crop_left.shape[1], img3_crop_left.shape[0]))
#cv_show('warped',warped)
# cv_show('warpimg',warpimg)
warped_left_half = warpimg[:, :320]
warped_right_half = warpimg[:, 321:]
img2_crop_right=ronghe(img2_crop_right,warped_left_half)
you_hou_merged_img = cv2.hconcat([img2_crop_right, warped_right_half])
'''拼接前,右部分'''
you_hou_merged_img=cv2.hconcat([img2_crop_left,you_hou_merged_img])
M12 ,a= sift_M(img1_crop_right, you_hou_merged_img)
warped = cv2.warpPerspective(you_hou_merged_img, np.linalg.inv(M12),
(you_hou_merged_img.shape[1] + img2_crop_left.shape[1], img1_crop_right.shape[0]))
warped_left_half = warped[:, :320]
warped_right_half = warped[:, 321:]
# 确保a图片和裁剪后的b图片尺寸相同
height = 480
width = 320
img1_crop_right=ronghe(img1_crop_right,warped_left_half)
warpimg_you = cv2.hconcat([img1_crop_right, warped_right_half])
# warpimg_you=warpimg_you[:,:-70]
print('右半部分尺寸',warpimg_you.shape)
#cv_show('warpimg_you',warpimg_you)
''''''
#
res=cv2.hconcat([warped_zuo,warpimg_you])
print('res',res.shape)
# # res[:,747-320:748+320]=img1
#
group1,group2=six_dian(res)
a = [ group1[0], [[2024,0]], [[2024,480]],group2[0]]
b = [[0,0], [2024,0], [2024,480], [0,480]]
src_pts = np.float32(a)
tge_pts = np.float32(b)
M = cv2.getPerspectiveTransform(src_pts, tge_pts)
'''获得整体左半部分'''
res = cv2.warpPerspective(res, M, (2525, warpimg.shape[0]))
# 设定摄像头设备
cameras = [
{'device': '/dev/video13'},
{'device': '/dev/video15'}
]
cap_usb_hou = cv2.VideoCapture('/dev/video9')
cap_usb_zuo = cv2.VideoCapture('/dev/video11')
running = True # 控制程序运行状态的标志
start_time = time.time()
while running:
frames_hub = []
ret, img_hou = cap_usb_hou.read()
img_hou = cv2.flip(img_hou, flipCode=-1)
ret, img_zuo = cap_usb_zuo.read()
img_zuo = cv2.flip(img_zuo, flipCode=-1)
if time.time() - start_time > 300:
break
for i, camera in enumerate(cameras):
cap = cv2.VideoCapture(camera['device'])
if not cap.isOpened():
print(f"无法打开摄像头 {camera['device']}")
continue
ret, frame = cap.read()
# if i ==0:
# ret9, frame_usb = cap_usb9.read()
# else:
# ret11, frame_usb = cap_usb11.read()
frames_hub.append(frame)
cap.release()
if not ret:
print(f"读取 {camera['device']} 帧失败")
break
if len(frames_hub) == 2:
img_qian=frames_hub[1]
img_qian = cv2.flip(img_qian, flipCode=-1)
img_you=frames_hub[0]
img_you = cv2.flip(img_you, flipCode=-1)
img1 = bianhuan1(img_qian)
img2 = bianhuan(img_you)
img3 = bianhuan(img_hou)
img4 = bianhuan4(img_zuo)
img4_crop_right=img4[:,321:]
img1_crop_left=img1[:,:320]
img1_crop_right=img1[:,321:]
img2_crop_left=img2[:,:320]
img2_crop_right=img2[:,321:]
img3_crop_left=img3[:,:320]
img3_crop_right=img3[:,321:]
img4_crop_left=img4[:,:320]
'''对 2,3进行拼接'''
warpimg = cv2.warpPerspective(img4_crop_left, np.linalg.inv(M34),
(img4_crop_right.shape[1] * 2, img4_crop_left.shape[0])) # 4->3
# cv_show('warpimg',warpimg)
img_b_left_half = warpimg[:, :320]
img_b_right_half = warpimg[:, 321:]
# 得到融合图片(拼接图片的左侧)
img3_crop_right = ronghe(img3_crop_right, img_b_left_half) # 将3与 变换后的4(wrapimg)的部分进行融合拼接
merged_img34 = cv2.hconcat([img3_crop_right, img_b_right_half]) # 将融合后的图像与wrapimg未拼接的部分 进行拼接
# cv_show('merged_img34',merged_img34)
img4_crop_left = cv2.hconcat([black1, img4_crop_left]) # merged_img34由于右侧变换,这里要对其进行矫正做准备 得到宽高相同要校正的结果参考图
warpimg34 = cv2.warpPerspective(merged_img34, M34_2,
(merged_img34.shape[1] + img1_crop_left.shape[1],
img4_crop_left.shape[0])) # 校正merged_img34
# cv_show('warpimg34',warpimg34)
warped_houzuo = warpimg34[:, :-321] # 裁剪掉变换后的黑色部分
warpimg = cv2.warpPerspective(img1_crop_left, np.linalg.inv(M41),
(img4_crop_right.shape[1] + img1_crop_left.shape[1], img4_crop_left.shape[0]))
# cv_show('warpimg',warpimg)
img_b_left_half = warpimg[:, :320]
img_b_right_half = warpimg[:, 321:]
img4_crop_right = ronghe(img4_crop_right, img_b_left_half)
# merged_img41 = cv2.addWeighted(img4_crop_roi, 1, img_b_left_half, 1,0)
warped_zuoqian = cv2.hconcat([img4_crop_right, img_b_right_half])
# cv_show('merged_img41',merged_img41)
img_houzuoqian = cv2.hconcat([warped_houzuo, warped_zuoqian])
img1_crop_left = cv2.hconcat([black2, img1_crop_left])
# cv_show('img1_crop_left',img1_crop_left)
warpimg = cv2.warpPerspective(img_houzuoqian, M341,
(img_houzuoqian.shape[1], img4_crop_left.shape[0]))
# warped_zuo=warpimg[:,500:-30]
warped_zuo = warpimg[:, :-1]
# cv_show('warped_zuo',warped_zuo)
print('左半部分尺寸', warped_zuo.shape)
'''youhou'''
warpimg = cv2.warpPerspective(img3_crop_left, np.linalg.inv(M23),
(img3_crop_right.shape[1] + img3_crop_left.shape[1], img3_crop_left.shape[0]))
# cv_show('warped',warped)
# cv_show('warpimg',warpimg)
warped_left_half = warpimg[:, :320]
warped_right_half = warpimg[:, 321:]
img2_crop_right = ronghe(img2_crop_right, warped_left_half)
you_hou_merged_img = cv2.hconcat([img2_crop_right, warped_right_half])
'''拼接前,右部分'''
you_hou_merged_img = cv2.hconcat([img2_crop_left, you_hou_merged_img])
warped = cv2.warpPerspective(you_hou_merged_img, np.linalg.inv(M12),
(you_hou_merged_img.shape[1] + img2_crop_left.shape[1], img1_crop_right.shape[0]))
warped_left_half = warped[:, :320]
warped_right_half = warped[:, 321:]
# 确保a图片和裁剪后的b图片尺寸相同
height = 480
width = 320
img1_crop_right = ronghe(img1_crop_right, warped_left_half)
warpimg_you = cv2.hconcat([img1_crop_right, warped_right_half])
# warpimg_you=warpimg_you[:,:-70]
print('右半部分尺寸', warpimg_you.shape)
# cv_show('warpimg_you',warpimg_you)
res = cv2.hconcat([warped_zuo, warpimg_you])
# cv_show('res', res)
print('res', res.shape)
# # res[:,747-320:748+320]=img1
#
'''获得整体左半部分'''
res = cv2.warpPerspective(res, M, (2525, warpimg.shape[0]))
resized_img = cv2.resize(res, (1200, 228))
resized_img = resized_img[35:-35, :-70]
res = np.rot90(resized_img, 1)
# # 使用cv2.resize进行缩放
# res = cv2.resize(res, (640, 1200), interpolation=cv2.INTER_AREA)
cv2.imshow('res', res)
cv2.waitKey(1)
cap_usb_hou.release()
cap_usb_zuo.release()
cv2.destroyAllWindows()