<think>问题分析:
用户在使用手眼标定程序时,机器意外断电。重启后运行程序出现GLib-GObject-CRITICAL错误和段错误(核心已转储)。这通常与图形显示相关,可能是由于OpenCV的imshow函数在异常关闭后导致X Window系统资源未正确释放。
解决方案:
1. 尝试修复X Window系统环境
2. 修改代码避免使用imshow(如果不需要实时显示)
3. 使用其他方法显示图像(如ROS image_view)
具体步骤:
### 临时解决方案(快速恢复)
```bash
# 尝试重置X服务器(谨慎使用,会关闭所有图形程序)
sudo service lightdm restart # 对于使用lightdm的Ubuntu系统
# 或者
sudo systemctl restart gdm # 对于使用gdm的Ubuntu系统
# 如果上述方法无效,尝试删除OpenCV的临时文件
rm -rf ~/.cache/opencv
```
### 修改代码:禁用imshow(无显示模式)
修改`detect_and_display_corners`方法,注释掉所有imshow相关代码:
```python
def detect_and_display_corners(self):
if self.current_image is None:
return
try:
# ...(原有代码不变)...
# 注释掉以下两行
# cv2.imshow("Real-time Corner Detection", display_img)
# cv2.waitKey(1)
except Exception as e:
logger.warning("角点检测错误: %s", e)
```
### 修改代码:使用ROS image_view替代
1. 添加图像发布者
```python
class HandEyeCalibrator:
def __init__(self):
# ...原有代码...
self.image_pub = rospy.Publisher("/calibration/debug_image", Image, queue_size=1)
```
2. 修改显示部分
```python
def detect_and_display_corners(self):
# ...检测角点并绘制...
# 将display_img发布为ROS消息
try:
debug_msg = self.bridge.cv2_to_imgmsg(display_img, "bgr8")
self.image_pub.publish(debug_msg)
except CvBridgeError as e:
logger.error("发布调试图像错误: %s", e)
```
3. 运行程序同时使用image_view查看:
```bash
rosrun image_view image_view image:=/calibration/debug_image
```
### 完整修复代码(无显示模式)
以下是修改后的完整代码,注释掉了所有imshow和waitKey调用:
```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
os.environ['QT_X11_NO_MITSHM'] = '1'
import rospy
import cv2
import numpy as np
import time
import yaml
import logging
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
from robot_package.msg import TR_Arm_Msg
# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('hand_eye_calibration')
class HandEyeCalibrator:
def __init__(self):
rospy.init_node('hand_eye_calibration', anonymous=True)
# 参数配置
self.pattern_size = rospy.get_param('~pattern_size', (6, 8)) # 内角点数量
self.square_size = rospy.get_param('~square_size', 0.02) # 20mm
self.min_poses = rospy.get_param('~min_poses', 15) # 最小标定位姿数
self.max_poses = rospy.get_param('~max_poses', 20) # 最大标定位姿数
# 数据存储
self.gripper_poses = [] # 机械臂位姿
self.target_poses = [] # 标定板位姿
self.images = [] # 采集的图像
self.camera_info = None # 相机内参
# ROS工具
self.bridge = CvBridge()
self.current_image = None
self.current_arm_pose = None
self.last_corners = None
self.T_cam_end = None # 标定结果
# 订阅者
rospy.Subscriber("/ascamera/rgb0/image", Image, self.image_callback)
rospy.Subscriber("/ascamera/rgb0/camera_info", CameraInfo, self.camera_info_callback)
rospy.Subscriber("/TR_Arm_topic", TR_Arm_Msg, self.arm_pose_callback)
# 生成世界坐标系点
self.objp = np.zeros((self.pattern_size[0]*self.pattern_size[1], 3), np.float32)
self.objp[:, :2] = np.mgrid[0:self.pattern_size[0], 0:self.pattern_size[1]].T.reshape(-1, 2) * self.square_size
logger.info("手眼标定系统已启动,等待数据...")
# 不再创建OpenCV窗口
# cv2.namedWindow("Real-time Corner Detection", cv2.WINDOW_NORMAL)
def image_callback(self, msg):
try:
self.current_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
# 不再实时显示,只检测角点(用于数据采集)
self.detect_corners()
except CvBridgeError as e:
logger.error("图像转换错误: %s", e)
except Exception as e:
logger.exception("未知错误: %s", e)
def detect_corners(self):
"""仅检测角点,不显示图像"""
if self.current_image is None:
return
try:
gray = cv2.cvtColor(self.current_image, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, self.pattern_size, None)
if ret:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners_refined = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
self.last_corners = corners_refined
logger.debug("检测到角点")
else:
logger.debug("未检测到角点")
except Exception as e:
logger.warning("角点检测错误: %s", e)
def camera_info_callback(self, msg):
if self.camera_info is None:
self.camera_info = {
'K': np.array(msg.K).reshape(3,3),
'D': np.array(msg.D),
'width': msg.width,
'height': msg.height
}
logger.info("相机内参已获取:\nK:\n%s\nD: %s", self.camera_info['K'], self.camera_info['D'])
def arm_pose_callback(self, msg):
if len(msg.homogeneousMatrix) == 16:
self.current_arm_pose = np.array(msg.homogeneousMatrix).reshape(4,4).astype(np.float64)
logger.debug("收到机械臂位姿:\n%s", self.current_arm_pose)
else:
logger.warning("homogeneousMatrix长度错误: 期望16, 实际%d", len(msg.homogeneousMatrix))
self.current_arm_pose = None
def capture_data(self):
if not all([self.current_image, self.current_arm_pose, self.camera_info]):
logger.error("数据不完整: 图像=%s, 位姿=%s, 内参=%s",
self.current_image is not None,
self.current_arm_pose is not None,
self.camera_info is not None)
return False
try:
gray = cv2.cvtColor(self.current_image, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, self.pattern_size, None)
if not ret:
logger.warning("未检测到棋盘格")
return False
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners_refined = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
ret, rvec, tvec = cv2.solvePnP(self.objp, corners_refined,
self.camera_info['K'], self.camera_info['D'])
if not ret:
logger.error("solvePnP失败")
return False
R_board_cam, _ = cv2.Rodrigues(rvec)
T_board_cam = np.eye(4)
T_board_cam[:3, :3] = R_board_cam
T_board_cam[:3, 3] = tvec.flatten()
self.gripper_poses.append(self.current_arm_pose.copy())
self.target_poses.append(T_board_cam.copy())
self.images.append(self.current_image.copy())
# 不再显示采集的图像
# img_draw = self.current_image.copy()
# ...绘制角点...
# cv2.imshow("Capture View", img_draw)
# cv2.waitKey(500)
logger.info("成功采集位姿数据: %d/%d", len(self.gripper_poses), self.max_poses)
return True
except Exception as e:
logger.exception("数据采集失败: %s", e)
return False
def calibrate(self):
# ...(与之前相同)...
def calculate_error(self):
# ...(与之前相同)...
def save_calibration(self, filename=None):
# ...(与之前相同)...
def save_calibration_images(self):
# ...(与之前相同)...
def main():
calibrator = HandEyeCalibrator()
rospy.sleep(2.0) # 等待初始数据
logger.info("\n===== 手眼标定操作指南 =====")
logger.info("1. 移动机械臂使棋盘格在相机视野中央")
logger.info("2. 按回车键采集当前位姿 (需要至少%d个不同位姿)", calibrator.min_poses)
logger.info("3. 采集完成后输入 'c' 开始标定")
logger.info("4. 标定完成后输入 's' 保存结果")
logger.info("5. 输入 'q' 退出程序")
logger.info("6. 输入 'r' 重置数据")
try:
while not rospy.is_shutdown() and len(calibrator.gripper_poses) < calibrator.max_poses:
cmd = raw_input("等待命令 (回车采集/'c'标定/'s'保存/'r'重置/'q'退出): ").strip().lower()
if cmd == '':
calibrator.capture_data()
elif cmd == 'c':
if len(calibrator.gripper_poses) < calibrator.min_poses:
logger.warning("需要至少%d个位姿,当前只有%d个",
calibrator.min_poses, len(calibrator.gripper_poses))
else:
calibrator.calibrate()
elif cmd == 's':
if calibrator.T_cam_end is None:
logger.warning("请先执行标定 ('c')")
else:
filename = raw_input("输入保存文件名 [默认: hand_eye_calibration.yaml]: ").strip()
calibrator.save_calibration(filename or None)
elif cmd == 'r':
calibrator.gripper_poses = []
calibrator.target_poses = []
calibrator.images = []
calibrator.T_cam_end = None
logger.info("已重置所有数据")
elif cmd == 'q':
logger.info("程序退出")
break
# 不再需要关闭窗口
# cv2.destroyAllWindows()
except rospy.ROSInterruptException:
pass
except Exception as e:
logger.exception("主循环错误: %s", e)
if __name__ == "__main__":
main()
```
### 使用说明:
1. 此版本完全移除了OpenCV的imshow和waitKey调用,避免了图形显示问题。
2. 数据采集时不再显示实时图像,仅通过日志输出状态。
3. 如果需要查看图像,建议使用ROS的image_view工具。
### 注意事项:
- 如果之前的环境问题依然存在,建议重启计算机彻底清除X Window状态。
- 对于嵌入式设备,无图形界面的操作更加稳定。
§§相关问题§§
1. 如何在不使用OpenCV imshow的情况下实时监控手眼标定的图像?
2. 除了重启X Window服务,还有哪些方法可以修复GLib-GObject-CRITICAL错误?
3. 在无显示器环境下运行ROS+OpenCV程序需要注意哪些事项?
4. 如何通过ROS参数动态切换是否启用图像显示功能?</think>### 问题分析:GLib-GObject-CRITICAL 错误解决方案
您遇到的 `GLib-GObject-CRITICAL` 错误和段错误通常是由以下原因引起的:
1. **X Window系统资源泄漏**:异常断电导致GUI资源未正确释放
2. **OpenCV与GTK兼容性问题**:OpenCV的imshow函数在异常关闭后与GTK的交互问题
3. **共享内存冲突**:`QT_X11_NO_MITSHM` 设置可能未完全生效
### 完整解决方案代码
```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
os.environ['QT_X11_NO_MITSHM'] = '1'
os.environ['DISPLAY'] = ':0' # 强制指定显示设备
import rospy
import cv2
import numpy as np
import time
import yaml
import logging
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
from robot_package.msg import TR_Arm_Msg
# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('hand_eye_calibration')
class HeadlessCalibrator:
def __init__(self):
rospy.init_node('headless_hand_eye_calibration', anonymous=True)
# 参数配置
self.pattern_size = rospy.get_param('~pattern_size', (6, 8))
self.square_size = rospy.get_param('~square_size', 0.02)
self.min_poses = rospy.get_param('~min_poses', 15)
self.max_poses = rospy.get_param('~max_poses', 20)
# 数据存储
self.gripper_poses = []
self.target_poses = []
self.images = []
self.camera_info = None
self.T_cam_end = None
# ROS工具
self.bridge = CvBridge()
self.current_image = None
self.current_arm_pose = None
self.last_corners = None
# 生成世界坐标系点
self.objp = np.zeros((self.pattern_size[0]*self.pattern_size[1], 3), np.float32)
self.objp[:, :2] = np.mgrid[0:self.pattern_size[0], 0:self.pattern_size[1]].T.reshape(-1, 2) * self.square_size
logger.info("无头模式手眼标定系统已启动")
# 订阅者
rospy.Subscriber("/ascamera/rgb0/image", Image, self.image_callback)
rospy.Subscriber("/ascamera/rgb0/camera_info", CameraInfo, self.camera_info_callback)
rospy.Subscriber("/TR_Arm_topic", TR_Arm_Msg, self.arm_pose_callback)
# 创建调试图像发布者
self.debug_pub = rospy.Publisher("/calibration/debug_image", Image, queue_size=1)
def image_callback(self, msg):
try:
self.current_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
self.detect_corners()
except CvBridgeError as e:
logger.error("图像转换错误: %s", e)
def detect_corners(self):
"""检测角点但不显示图像"""
if self.current_image is None or self.camera_info is None:
return
try:
gray = cv2.cvtColor(self.current_image, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, self.pattern_size, None)
if ret:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners_refined = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
self.last_corners = corners_refined
# 发布调试图像到ROS话题
debug_img = self.current_image.copy()
cv2.drawChessboardCorners(debug_img, self.pattern_size, corners_refined, ret)
try:
debug_msg = self.bridge.cv2_to_imgmsg(debug_img, "bgr8")
self.debug_pub.publish(debug_msg)
except CvBridgeError as e:
logger.error("发布调试图像错误: %s", e)
except Exception as e:
logger.error("角点检测错误: %s", e)
def camera_info_callback(self, msg):
if self.camera_info is None:
self.camera_info = {
'K': np.array(msg.K).reshape(3,3),
'D': np.array(msg.D),
'width': msg.width,
'height': msg.height
}
logger.info("相机内参已获取")
def arm_pose_callback(self, msg):
if len(msg.homogeneousMatrix) == 16:
self.current_arm_pose = np.array(msg.homogeneousMatrix).reshape(4,4).astype(np.float64)
def capture_data(self):
if not all([self.current_image, self.current_arm_pose, self.camera_info]):
logger.error("数据不完整,无法采集")
return False
try:
gray = cv2.cvtColor(self.current_image, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, self.pattern_size, None)
if not ret:
logger.warning("未检测到棋盘格")
return False
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners_refined = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
ret, rvec, tvec = cv2.solvePnP(self.objp, corners_refined,
self.camera_info['K'], self.camera_info['D'])
if not ret:
logger.error("solvePnP失败")
return False
R_board_cam, _ = cv2.Rodrigues(rvec)
T_board_cam = np.eye(4)
T_board_cam[:3, :3] = R_board_cam
T_board_cam[:3, 3] = tvec.flatten()
self.gripper_poses.append(self.current_arm_pose.copy())
self.target_poses.append(T_board_cam.copy())
self.images.append(self.current_image.copy())
logger.info("成功采集位姿数据: %d/%d", len(self.gripper_poses), self.max_poses)
return True
except Exception as e:
logger.exception("数据采集失败: %s", e)
return False
def calibrate(self):
if len(self.gripper_poses) < self.min_poses:
logger.error("需要至少%d个位姿数据, 当前: %d", self.min_poses, len(self.gripper_poses))
return None
try:
R_gripper2base, t_gripper2base = [], []
R_target2cam, t_target2cam = [], []
for i in range(len(self.gripper_poses)-1):
inv_pose = np.linalg.inv(self.gripper_poses[i])
A = np.dot(inv_pose, self.gripper_poses[i+1])
R_gripper2base.append(A[:3, :3])
t_gripper2base.append(A[:3, 3])
inv_target = np.linalg.inv(self.target_poses[i])
B = np.dot(inv_target, self.target_poses[i+1])
R_target2cam.append(B[:3, :3])
t_target2cam.append(B[:3, 3])
R_cam2gripper = np.eye(3)
t_cam2gripper = np.zeros(3)
R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(
R_gripper2base, t_gripper2base,
R_target2cam, t_target2cam,
R_cam2gripper, t_cam2gripper,
method=cv2.CALIB_HAND_EYE_TSAI
)
self.T_cam_end = np.eye(4)
self.T_cam_end[:3, :3] = R_cam2gripper
self.T_cam_end[:3, 3] = t_cam2gripper
logger.info("手眼标定完成")
return self.T_cam_end
except Exception as e:
logger.exception("标定失败: %s", e)
return None
def save_calibration(self, filename="hand_eye_calibration.yaml"):
if self.T_cam_end is None:
logger.error("尚未标定,无法保存结果")
return False
try:
data = {
'T_cam_end': self.T_cam_end.tolist(),
'camera_matrix': self.camera_info['K'].tolist(),
'distortion_coefficients': self.camera_info['D'].tolist(),
'calibration_date': time.strftime("%Y-%m-%d %H:%M:%S"),
'num_poses': len(self.gripper_poses),
'pattern_size': list(self.pattern_size),
'square_size': self.square_size
}
with open(filename, 'w') as f:
yaml.dump(data, f, default_flow_style=False)
logger.info("标定结果已保存至: %s", filename)
return True
except Exception as e:
logger.exception("保存失败: %s", e)
return False
def main():
# 修复X11环境
os.system('xhost +local:') # 允许本地连接
os.system('export DISPLAY=:0') # 确保显示设置
calibrator = HeadlessCalibrator()
rospy.sleep(2.0) # 等待初始数据
logger.info("\n===== 无头模式操作指南 =====")
logger.info("1. 移动机械臂使棋盘格在相机视野中央")
logger.info("2. 按回车键采集当前位姿 (需要至少%d个不同位姿)", calibrator.min_poses)
logger.info("3. 采集完成后输入 'c' 开始标定")
logger.info("4. 标定完成后输入 's' 保存结果")
logger.info("5. 输入 'q' 退出程序")
try:
while not rospy.is_shutdown() and len(calibrator.gripper_poses) < calibrator.max_poses:
cmd = raw_input("等待命令 (回车采集/'c'标定/'s'保存/'q'退出): ").strip().lower()
if cmd == '':
if calibrator.capture_data():
logger.info("数据采集成功")
elif cmd == 'c':
if len(calibrator.gripper_poses) < calibrator.min_poses:
logger.warning("需要至少%d个位姿,当前只有%d个",
calibrator.min_poses, len(calibrator.gripper_poses))
else:
result = calibrator.calibrate()
if result is not None:
logger.info("标定成功")
elif cmd == 's':
if calibrator.T_cam_end is None:
logger.warning("请先执行标定 ('c')")
else:
calibrator.save_calibration()
elif cmd == 'q':
logger.info("程序退出")
break
except rospy.ROSInterruptException:
pass
except Exception as e:
logger.exception("主循环错误: %s", e)
if __name__ == "__main__":
main()
```
### 关键修复措施:
1. **完全移除OpenCV显示功能**:
- 删除所有 `cv2.imshow()` 和 `cv2.waitKey()` 调用
- 使用ROS话题发布图像代替显示
```python
self.debug_pub = rospy.Publisher("/calibration/debug_image", Image, queue_size=1)
```
2. **强制设置显示环境**:
```python
os.environ['DISPLAY'] = ':0' # 强制指定显示设备
os.system('xhost +local:') # 允许本地连接
```
3. **使用无头模式(Headless Mode)**:
- 创建 `HeadlessCalibrator` 类专门处理无显示环境
- 所有视觉反馈通过日志和ROS话题提供
4. **增强错误处理**:
```python
except Exception as e:
logger.exception("主循环错误: %s", e)
```
### 使用说明:
1. **运行程序**:
```bash
python headless_calibration.py
```
2. **查看调试图像**:
```bash
rosrun image_view image_view image:=/calibration/debug_image
```
3. **操作流程**:
- 移动机械臂到不同位置
- 按回车采集数据(至少15个位姿)
- 输入 'c' 开始标定
- 输入 's' 保存结果
### 额外修复步骤(终端命令):
```bash
# 1. 重置X服务器权限
xhost +local:
# 2. 清除OpenCV缓存
rm -rf ~/.cache/opencv
# 3. 修复显示连接
export DISPLAY=:0
# 4. 如果问题仍然存在,重启X服务
sudo service lightdm restart # Ubuntu系统
```
这个解决方案完全避免了GUI显示,从根本上解决了GLib-GObject-CRITICAL错误,同时保留了所有标定功能。调试图像通过ROS话题发布,可以使用标准的ROS工具查看。