VS 下使用findcontours函数触发断点问题解决方案

本文解决了使用findContours函数在Debug模式下出现的问题,通过调整动态库加载顺序,确保使用正确的库版本。同时验证了该函数支持Mat类型。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

因为最近在做一个轮廓检测的项目,其中需要用到findcontour函数对二值图像的轮廓进行提取,然而在使用时却出现了许多问题,在DEBUG模式下总是会报错“.......exe have triggered a breakpoint.在网上查找了一些资料主要是动态库连接问题,最终也没能解决我的问题。于是准备转而使用CVfindcontours函数进行,但是心里不是很痛快。毕竟在使用opencv3.0 所以我决定解决这个问题,结合网友的资料,我重新看了下opencv的配置,发现了问题,在连接器的配置中我加入了world_ts300.lib,和world_ts300d.lib;也就是说我加入了release和debug版本的库,而我是在DEBUG 模式下运行的,也许对于其他的函数无影响,但是对于findcontours函数来讲这是有区别的,于是我将上述两个库的加载顺序改变了一下,world_ts300d在前,world_ts300在后,或者我们可以删除其中的某一个库都行,这根据自己的习惯来决定,自行选用DEBUG 或者RELEASE 版本。总的来讲就是说我们在配置的时候要养成好习惯,对于每一个工程对应于相应的配置文件。而不是像网上所讲的加入的越多越好。这是我对这个问题的一点见解,并非具有普世价值,水平有限多多谅解。

改正之后的配置文件:

部分代码:

网友提出的 :vector<Mat> contours;

                     Mat hierarchy;

           仍然没有解决问题,但是代码是可以运行的,查看findcontours的源码,可以看到其需要的vector于std的vector是相同的,因此网上的说这两个不同的说法是错误的。同              时我们也可以看出find从tours函数是支持Mat的。

改正之后的程序运行效果图:CANNY



CONTOURS:(加入了一些删减的操作)


以上是我解决这个问题的方案,因为网上众说纷纭,所以想要给出一个相对更加完善的解决方案。

# -*- coding: utf-8 -*- import sys import os import cv2 import numpy as np import time from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QWidget, QVBoxLayout, QHBoxLayout, QMessageBox, QLabel, QFileDialog, QToolBar, QComboBox, QStatusBar, QGroupBox, QSlider, QDockWidget, QProgressDialog, QCheckBox, QSpinBox) from PyQt5.QtCore import QRect, Qt, QSettings, QThread, pyqtSignal, QTimer from CamOperation_class import CameraOperation sys.path.append("D:\\海康\\MVS\\Development\\Samples\\Python\\BasicDemo") from MvCameraControl_class import * from MvErrorDefine_const import * from CameraParams_header import * from PyUICBasicDemo import Ui_MainWindow import ctypes from datetime import datetime import logging import platform import queue import ctypes def check_network_configuration(): """ 检查网络配置并枚举相机设备 """ # 初始化设备列表 device_list = MV_CC_DEVICE_INFO_LIST() # 枚举所有GigE设备 ret = MvCamera.MV_CC_EnumDevices(MV_GIGE_DEVICE, device_list) if ret != MV_OK: print(f"枚举设备失败! 错误码: {hex(ret)}") return False # 获取发现的相机数量 discovered_cameras = device_list.nDeviceNum print(f"发现 {discovered_cameras} 台相机设备") # 检查是否发现相机 if discovered_cameras == 0: print("未发现任何相机设备,请检查网络连接和相机电源") return False # 打印每个相机的信息 for i in range(discovered_cameras): device_info = cast(device_list.pDeviceInfo[i], POINTER(MV_CC_DEVICE_INFO)).contents if device_info.nTLayerType == MV_GIGE_DEVICE: ip_addr = device_info.SpecialInfo.stGigEInfo.nCurrentIp ip = f"{(ip_addr >> 24) & 0xFF}.{(ip_addr >> 16) & 0xFF}.{(ip_addr >> 8) & 0xFF}.{ip_addr & 0xFF}" print(f"相机 {i+1}: IP地址={ip}") return True # 初始化SDK ret = MvCamera.MV_CC_Initialize() if ret != MV_OK: print(f"SDK初始化失败! 错误码: {hex(ret)}") exit(1) # 检查网络配置 if not check_network_configuration(): print("网络配置检查失败,请解决上述问题后重试") # 反初始化SDK MvCamera.MV_CC_Finalize() exit(1) # 配置日志系统 logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler("cloth_inspection_debug.log"), logging.StreamHandler() ] ) logging.info("布料印花检测系统启动 - 优化版") # ====================== 新增检测算法类 ====================== class PrintQualityDetector: def __init__(self): self.preprocessing_params = { 'contrast': 1.2, 'brightness': 10, 'gaussian_blur': (5, 5), 'threshold': 30, 'morph_kernel': np.ones((3, 3), np.uint8) } def preprocess_image(self, image): """图像预处理:增强对比度、去噪、二值化""" # 转换为灰度图 if len(image.shape) == 3: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: gray = image.copy() # 增强对比度和亮度 enhanced = cv2.convertScaleAbs(gray, alpha=self.preprocessing_params['contrast'], beta=self.preprocessing_params['brightness']) # 高斯模糊去噪 blurred = cv2.GaussianBlur(enhanced, self.preprocessing_params['gaussian_blur'], 0) return blurred def detect_defects(self, sample_image, test_image, threshold=0.05): """ 使用模板匹配和形态学操作检测缺陷 :param sample_image: 标准样本图像 (灰度图) :param test_image: 测试图像 (灰度图) :param threshold: 缺陷面积阈值 :return: (是否合格, 缺陷面积占比, 标记图像) """ # 确保图像大小一致 if sample_image.shape != test_image.shape: test_image = cv2.resize(test_image, (sample_image.shape[1], sample_image.shape[0])) # 计算绝对差异 diff = cv2.absdiff(sample_image, test_image) # 二值化差异图 _, thresh = cv2.threshold(diff, self.preprocessing_params['threshold'], 255, cv2.THRESH_BINARY) # 形态学操作去除小噪点 cleaned = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.preprocessing_params['morph_kernel']) # 查找缺陷轮廓 contours, _ = cv2.findContours(cleaned, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 计算缺陷总面积 defect_area = 0 marked_image = cv2.cvtColor(test_image, cv2.COLOR_GRAY2BGR) for contour in contours: area = cv2.contourArea(contour) defect_area += area # 绘制缺陷轮廓 cv2.drawContours(marked_image, [contour], -1, (0, 0, 255), 2) # 计算缺陷面积占比 total_pixels = sample_image.size defect_ratio = defect_area / total_pixels # 判断是否合格 is_qualified = defect_ratio <= threshold return is_qualified, defect_ratio, marked_image # ====================== 新增帧缓冲区类 ====================== class FrameBuffer: def __init__(self, max_size=10): self.buffer = queue.Queue(maxsize=max_size) self.lock = threading.Lock() def put_frame(self, frame): """添加新帧到缓冲区""" with self.lock: if self.buffer.full(): self.buffer.get() # 移除最旧的帧 self.buffer.put(frame.copy()) def get_latest_frame(self): """获取最新的帧""" with self.lock: if not self.buffer.empty(): return self.buffer.queue[-1].copy() return None def get_frame_at_time(self, timestamp, tolerance=0.01): """ 获取最接近指定时间戳的帧 :param timestamp: 目标时间戳 :param tolerance: 时间容差(秒) :return: (帧, 时间差) """ best_frame = None min_diff = float('inf') with self.lock: for frame_data in list(self.buffer.queue): frame, frame_time = frame_data time_diff = abs(frame_time - timestamp) if time_diff < min_diff: min_diff = time_diff best_frame = frame if min_diff <= tolerance: return best_frame.copy(), min_diff return None, min_diff # ====================== 全局变量 ====================== current_sample_path = "" # 当前使用的样本路径 detection_history = [] # 检测历史记录 frame_buffer = FrameBuffer(max_size=30) # 帧缓冲区 detector = PrintQualityDetector() # 检测器实例 sensor_trigger_time = None # 传感器触发时间 sensor_trigger_enabled = False # 是否启用传感器触发 # ====================== 新增线程类 ====================== class SensorTriggerThread(QThread): trigger_detected = pyqtSignal() def __init__(self): super().__init__() self.running = True def run(self): """模拟传感器触发线程""" while self.running: # 在实际应用中,这里会读取真实的传感器输入 # 此处模拟每5秒触发一次 time.sleep(5) if sensor_trigger_enabled: self.trigger_detected.emit() def stop(self): self.running = False class FrameMonitorThread(QThread): frame_status = pyqtSignal(str) new_frame = pyqtSignal(np.ndarray, float) # 帧数据和时间戳 def __init__(self, cam_operation): super().__init__() self.cam_operation = cam_operation self.running = True def run(self): while self.running: if self.cam_operation: status = self.cam_operation.get_frame_status() frame_text = "有帧" if status.get('current_frame', False) else "无帧" self.frame_status.emit(f"帧状态: {frame_text}") # 获取新帧并添加到缓冲区 frame = self.cam_operation.get_current_frame() if frame is not None: timestamp = time.time() frame_buffer.put_frame((frame, timestamp)) self.new_frame.emit(frame, timestamp) QThread.msleep(50) # 更频繁地检查帧状态 def stop(self): self.running = False # ====================== 优化后的检测函数 ====================== def check_print_quality(sample_image_path, test_image, threshold=0.05): """ 优化版布料印花质量检测 :param sample_image_path: 合格样本图像路径 :param test_image: 内存中的测试图像 (numpy数组) :param threshold: 差异阈值 :return: 是否合格,差异值,标记图像 """ # 读取样本图像 try: sample_img_data = np.fromfile(sample_image_path, dtype=np.uint8) sample_image = cv2.imdecode(sample_img_data, cv2.IMREAD_GRAYSCALE) if sample_image is None: logging.error(f"无法解码样本图像: {sample_image_path}") return None, None, None except Exception as e: logging.exception(f"样本图像读取异常: {str(e)}") return None, None, None # 预处理测试图像 processed_test = detector.preprocess_image(test_image) # 确保测试图像是灰度图 if len(processed_test.shape) == 3: # 如果是彩色图像 processed_test = cv2.cvtColor(processed_test, cv2.COLOR_BGR2GRAY) # 使用优化算法检测缺陷 is_qualified, defect_ratio, marked_image = detector.detect_defects( sample_image, processed_test, threshold=threshold ) return is_qualified, defect_ratio, marked_image # ====================== 传感器触发处理 ====================== def handle_sensor_trigger(): """处理传感器触发信号""" global sensor_trigger_time logging.info("传感器触发信号接收") # 记录触发时间 sensor_trigger_time = time.time() # 更新UI状态 ui.lblSensorStatus.setText("传感器: 已触发 (等待拍摄...)") ui.lblSensorStatus.setStyleSheet("color: orange; font-weight: bold;") # 设置定时器在0.05秒后执行拍摄和检测 QTimer.singleShot(50, capture_after_sensor_trigger) def capture_after_sensor_trigger(): """传感器触发后0.05秒执行拍摄和检测""" global sensor_trigger_time if sensor_trigger_time is None: logging.warning("传感器触发时间未记录") return # 计算目标时间戳 (触发时间 + 0.05秒) target_time = sensor_trigger_time + 0.05 # 从缓冲区获取最接近目标时间的帧 frame, time_diff = frame_buffer.get_frame_at_time(target_time, tolerance=0.01) if frame is None: logging.warning(f"未找到接近目标时间({target_time:.3f})的帧,最小时间差: {time_diff:.3f}秒") ui.lblSensorStatus.setText("传感器: 触发失败 (无匹配帧)") ui.lblSensorStatus.setStyleSheet("color: red;") return logging.info(f"成功捕获传感器触发后帧,时间差: {time_diff:.3f}秒") # 更新UI状态 ui.lblSensorStatus.setText("传感器: 检测中...") ui.lblSensorStatus.setStyleSheet("color: blue; font-weight: bold;") # 执行检测 perform_detection(frame) # ====================== 优化后的检测流程 ====================== def perform_detection(test_image): """执行检测逻辑""" global current_sample_path, detection_history # 检查样本路径 if not current_sample_path or not os.path.exists(current_sample_path): logging.warning(f"无效样本路径: {current_sample_path}") QMessageBox.warning(mainWindow, "错误", "请先设置有效的标准样本图像!", QMessageBox.Ok) ui.lblSensorStatus.setText("传感器: 就绪") ui.lblSensorStatus.setStyleSheet("color: green;") return # 使用进度对话框防止UI阻塞 progress = QProgressDialog("正在检测...", "取消", 0, 100, mainWindow) progress.setWindowModality(Qt.WindowModal) progress.setValue(10) try: # 获取差异度阈值 diff_threshold = ui.sliderDiffThreshold.value() / 100.0 logging.info(f"使用差异度阈值: {diff_threshold}") progress.setValue(30) # 执行检测 is_qualified, diff_ratio, marked_image = check_print_quality( current_sample_path, test_image, threshold=diff_threshold ) progress.setValue(70) # 检查返回结果是否有效 if is_qualified is None: logging.error("检测函数返回无效结果") QMessageBox.critical(mainWindow, "检测错误", "检测失败,请检查日志", QMessageBox.Ok) ui.lblSensorStatus.setText("传感器: 检测失败") ui.lblSensorStatus.setStyleSheet("color: red;") return logging.info(f"检测结果: 合格={is_qualified}, 差异={diff_ratio}") progress.setValue(90) # 更新UI update_diff_display(diff_ratio, is_qualified) result_text = f"印花是否合格: {'合格' if is_qualified else '不合格'}\n缺陷占比: {diff_ratio*100:.2f}%\n阈值: {diff_threshold*100:.2f}%" QMessageBox.information(mainWindow, "检测结果", result_text, QMessageBox.Ok) if marked_image is not None: cv2.imshow("缺陷标记结果", marked_image) cv2.waitKey(0) cv2.destroyAllWindows() else: logging.warning("标记图像为空") # 记录检测结果 detection_result = { 'timestamp': datetime.now(), 'qualified': is_qualified, 'diff_ratio': diff_ratio, 'threshold': diff_threshold, 'trigger_type': '传感器' if sensor_trigger_enabled else '手动' } detection_history.append(detection_result) update_history_display() progress.setValue(100) # 更新传感器状态 ui.lblSensorStatus.setText("传感器: 就绪") ui.lblSensorStatus.setStyleSheet("color: green;") except Exception as e: logging.exception("印花检测失败") QMessageBox.critical(mainWindow, "检测错误", f"检测过程中发生错误: {str(e)}", QMessageBox.Ok) ui.lblSensorStatus.setText("传感器: 检测错误") ui.lblSensorStatus.setStyleSheet("color: red;") finally: progress.close() def check_print(): """手动触发检测""" global obj_cam_operation logging.info("手动检测印花质量") # 检查相机状态 if not isGrabbing: logging.warning("相机未取流") QMessageBox.warning(mainWindow, "错误", "请先开始取流并捕获图像!", QMessageBox.Ok) return # 获取当前帧 frame = obj_cam_operation.get_current_frame() if frame is None: logging.warning("获取当前帧失败") QMessageBox.warning(mainWindow, "错误", "无法获取当前帧图像!", QMessageBox.Ok) return # 执行检测 perform_detection(frame) # ====================== UI更新函数 ====================== def update_diff_display(diff_ratio, is_qualified): """更新差异度显示控件""" # 更新当前差异度显示 ui.lblCurrentDiff.setText(f"当前差异度: {diff_ratio*100:.2f}%") # 根据合格状态设置颜色 if is_qualified: ui.lblDiffStatus.setText("状态: 合格") ui.lblDiffStatus.setStyleSheet("color: green; font-size: 12px;") else: ui.lblDiffStatus.setText("状态: 不合格") ui.lblDiffStatus.setStyleSheet("color: red; font-size: 12px;") def update_diff_threshold(value): """当滑块值改变时更新阈值显示""" ui.lblDiffValue.setText(f"{value}%") def update_sample_display(): """更新样本路径显示""" global current_sample_path if current_sample_path: ui.lblSamplePath.setText(f"当前样本: {os.path.basename(current_sample_path)}") ui.lblSamplePath.setToolTip(current_sample_path) ui.bnPreviewSample.setEnabled(True) else: ui.lblSamplePath.setText("当前样本: 未设置样本") ui.bnPreviewSample.setEnabled(False) def update_history_display(): """更新历史记录显示""" global detection_history ui.cbHistory.clear() for i, result in enumerate(detection_history[-10:]): # 显示最近10条记录 timestamp = result['timestamp'].strftime("%H:%M:%S") status = "合格" if result['qualified'] else "不合格" ratio = f"{result['diff_ratio']*100:.2f}%" trigger = result['trigger_type'] ui.cbHistory.addItem(f"[{timestamp}] {trigger} - {status} - 差异: {ratio}") # ====================== 传感器控制函数 ====================== def toggle_sensor_trigger(state): """切换传感器触发状态""" global sensor_trigger_enabled sensor_trigger_enabled = state if sensor_trigger_enabled: ui.lblSensorStatus.setText("传感器: 就绪") ui.lblSensorStatus.setStyleSheet("color: green;") logging.info("传感器触发已启用") else: ui.lblSensorStatus.setText("传感器: 禁用") ui.lblSensorStatus.setStyleSheet("color: gray;") logging.info("传感器触发已禁用") # ... (其余相机操作函数保持不变,如enum_devices, open_device, start_grabbing等) ... if __name__ == "__main__": # ... (相机SDK初始化代码保持不变) ... # 初始化UI app = QApplication(sys.argv) mainWindow = QMainWindow() ui = Ui_MainWindow() ui.setupUi(mainWindow) # 扩大主窗口尺寸 mainWindow.resize(1200, 800) # ====================== 新增传感器控制UI ====================== # 创建工具栏 toolbar = mainWindow.addToolBar("检测工具") # 添加检测按钮 ui.bnCheckPrint = QPushButton("手动检测") toolbar.addWidget(ui.bnCheckPrint) # 添加保存样本按钮 ui.bnSaveSample = QPushButton("保存标准样本") toolbar.addWidget(ui.bnSaveSample) # 添加预览样本按钮 ui.bnPreviewSample = QPushButton("预览样本") toolbar.addWidget(ui.bnPreviewSample) # 添加传感器控制 sensor_group = QWidget() sensor_layout = QHBoxLayout(sensor_group) sensor_layout.setContentsMargins(0, 0, 0, 0) ui.chkSensorEnable = QCheckBox("启用传感器触发") ui.chkSensorEnable.setChecked(False) ui.chkSensorEnable.stateChanged.connect(toggle_sensor_trigger) ui.lblSensorStatus = QLabel("传感器: 禁用") ui.lblSensorStatus.setStyleSheet("color: gray;") ui.spinDelay = QSpinBox() ui.spinDelay.setRange(1, 100) # 1-100毫秒 ui.spinDelay.setValue(50) # 默认50毫秒 ui.spinDelay.setSuffix(" ms") sensor_layout.addWidget(ui.chkSensorEnable) sensor_layout.addWidget(ui.lblSensorStatus) sensor_layout.addWidget(QLabel("延迟:")) sensor_layout.addWidget(ui.spinDelay) toolbar.addWidget(sensor_group) # 添加历史记录下拉框 ui.cbHistory = QComboBox() ui.cbHistory.setMinimumWidth(300) toolbar.addWidget(QLabel("历史记录:")) toolbar.addWidget(ui.cbHistory) # 添加当前样本显示标签 ui.lblSamplePath = QLabel("当前样本: 未设置样本") status_bar = mainWindow.statusBar() status_bar.addPermanentWidget(ui.lblSamplePath) # ====================== 优化后的差异度调整UI ====================== # 创建右侧面板 right_panel = QWidget() right_layout = QVBoxLayout(right_panel) right_layout.setContentsMargins(10, 10, 10, 10) # 差异度调整组 diff_group = QGroupBox("检测参数设置") diff_layout = QVBoxLayout(diff_group) ui.lblDiffThreshold = QLabel("缺陷阈值 (0-100%):") ui.sliderDiffThreshold = QSlider(Qt.Horizontal) ui.sliderDiffThreshold.setRange(0, 100) ui.sliderDiffThreshold.setValue(5) ui.lblDiffValue = QLabel("5%") # 当前检测结果显示 ui.lblCurrentDiff = QLabel("当前差异度: -") ui.lblCurrentDiff.setStyleSheet("font-size: 14px; font-weight: bold;") ui.lblDiffStatus = QLabel("状态: 未检测") ui.lblDiffStatus.setStyleSheet("font-size: 12px;") # 算法参数调整 param_group = QGroupBox("算法参数") param_layout = QVBoxLayout(param_group) ui.lblContrast = QLabel("对比度增强:") ui.sliderContrast = QSlider(Qt.Horizontal) ui.sliderContrast.setRange(5, 30) # 0.5-3.0 ui.sliderContrast.setValue(12) # 默认1.2 ui.lblContrastValue = QLabel("1.2") ui.lblThreshold = QLabel("二值化阈值:") ui.sliderThreshold = QSlider(Qt.Horizontal) ui.sliderThreshold.setRange(10, 100) ui.sliderThreshold.setValue(30) ui.lblThresholdValue = QLabel("30") param_layout.addWidget(ui.lblContrast) param_layout.addWidget(ui.sliderContrast) param_layout.addWidget(ui.lblContrastValue) param_layout.addWidget(ui.lblThreshold) param_layout.addWidget(ui.sliderThreshold) param_layout.addWidget(ui.lblThresholdValue) # 布局控件 diff_layout.addWidget(ui.lblDiffThreshold) diff_layout.addWidget(ui.sliderDiffThreshold) diff_layout.addWidget(ui.lblDiffValue) diff_layout.addWidget(ui.lblCurrentDiff) diff_layout.addWidget(ui.lblDiffStatus) diff_layout.addWidget(param_group) right_layout.addWidget(diff_group) right_layout.addStretch(1) # 创建停靠窗口 dock = QDockWidget("检测控制面板", mainWindow) dock.setWidget(right_panel) dock.setFeatures(QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetFloatable) mainWindow.addDockWidget(Qt.RightDockWidgetArea, dock) # ====================== 信号连接 ====================== # 连接滑块信号 ui.sliderDiffThreshold.valueChanged.connect(update_diff_threshold) ui.sliderContrast.valueChanged.connect(lambda v: ui.lblContrastValue.setText(f"{v/10:.1f}")) ui.sliderThreshold.valueChanged.connect(lambda v: ui.lblThresholdValue.setText(str(v))) # 更新算法参数 def update_detector_params(): detector.preprocessing_params['contrast'] = ui.sliderContrast.value() / 10.0 detector.preprocessing_params['threshold'] = ui.sliderThreshold.value() logging.info(f"更新检测参数: 对比度={detector.preprocessing_params['contrast']}, 阈值={detector.preprocessing_params['threshold']}") ui.sliderContrast.valueChanged.connect(update_detector_params) ui.sliderThreshold.valueChanged.connect(update_detector_params) # 绑定按钮事件 ui.bnCheckPrint.clicked.connect(check_print) ui.bnSaveSample.clicked.connect(save_sample_image) ui.bnPreviewSample.clicked.connect(preview_sample) # ... (其余相机操作信号连接保持不变) ... # ====================== 启动传感器线程 ====================== sensor_thread = SensorTriggerThread() sensor_thread.trigger_detected.connect(handle_sensor_trigger) sensor_thread.start() # 显示主窗口 mainWindow.show() # 执行应用 app.exec_() # 关闭线程和设备 sensor_thread.stop() sensor_thread.wait() close_device() # 反初始化SDK MvCamera.MV_CC_Finalize() sys.exit() 这个代码还是出现了 Traceback (most recent call last): File "d:\海康\MVS\Development\Samples\Python\MvImport\1.py", line 22, in <module> check_network_configuration() File "d:\海康\MVS\Development\Samples\Python\MvImport\1.py", line 12, in check_network_configuration if discovered_cameras: NameError: name 'discovered_cameras' is not defined这个问题
07-09
#include <opencv2/opencv.hpp> #include <iostream> using namespace std; using namespace cv; #define WINDOW_NAME "【程序窗口1】" //全局变量的声明 Mat g_maskImage, g_srcImage; Point prevPt(-1,-1); //全局函数的声明 static void ShowHelpText() ; static void on_Mouse( int event, int x, int y, int flags, void* ); int main( int argc, char** argv ){ //【1】载入原图并显示,初始化掩膜和灰度图 g_srcImage = imread("2.jpg", 1); imshow( WINDOW_NAME, g_srcImage ); Mat srcImage, grayImage; g_srcImage.copyTo(srcImage) ; cvtColor(g_srcImage, g_maskImage, COLOR_BGR2GRAY); cvtColor (g_maskImage, grayImage, COLOR_GRAY2BGR); g_maskImage = Scalar::all (0); //【2】设置鼠标回调函数 setMouseCallback( WINDOW_NAME, on_Mouse, 0 ); //【3】轮询按键,进行处理 while(1){ //获取键值 int c = waitKey(0); //若按键键值为 ESC 时,退出 if( (char)c == 27 ) break; //按键键值为2时,恢复源图 if( (char)c =='2'){ g_maskImage = Scalar::all (0); srcImage.copyTo(g_srcImage); imshow( "image", g_srcImage ); } //若检测到按键值为 1或者空格,则进行处理 if( (char)c =='1' || (char)c==' '){ //定义一些参数 int i, j, compCount = 0; vector<vector<Point> > contours; vector<Vec4i> hierarchy; //寻找轮廓 findContours (g_maskImage, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE); //轮廓为空时的处理 if( contours.empty() ) continue; //复制掩膜 Mat maskImage (g_maskImage.size(), CV_32S); maskImage = Scalar::all(0); //循环绘制出轮廓 for (int index = 0; index >= 0; index++) { drawContours(maskImage,contours,index,Scalar::all(compCount+1),-1,8,hierarchy,INT_MAX); } //compCount为零时的处理 if( compCount ==0)continue; //生成随机颜色 vector<Vec3b> colorTab; for(i=0; i< compCount; i++ ){ int b = theRNG().uniform(0, 255); int g = theRNG().uniform(0, 255); int r = theRNG().uniform(0, 255); colorTab.push_back (Vec3b ((uchar)b, (uchar)g, (uchar)r)); } //计算处理时间并输出到窗口中 double dTime = (double)getTickCount(); watershed( srcImage, maskImage ); dTime = (double)getTickCount() - dTime; printf("\t处理时间= %gms\n",dTime*1000./getTickFrequency()); //双层循环,将分水岭图像遍历存入 watershedImage 中 Mat watershedImage (maskImage.size(), CV_8UC3); for ( i = 0; i < maskImage.rows; i++ ) for( j = 0; j < maskImage.cols; j++ ){ int index = maskImage.at<int>(i, j) ; if( index ==-1 ) watershedImage.at<Vec3b> (i, j) =Vec3b(255, 255, 255); else if( index <=0 || index > compCount ) watershedImage.at<Vec3b>(i, j) = Vec3b(0, 0,0); else watershedImage.at<Vec3b>(i, j) = colorTab[index -1]; } //混合灰度图和分水岭效果图并显示最终的窗口 watershedImage = watershedImage*0.5 + grayImage*0.5; imshow ( "watershed transform", watershedImage ); } } return 0; } //鼠标消息回调函数 static void on_Mouse( int event, int x, int y, int flags, void* ){ //处理鼠标不在窗口中的情况 if(x<0 || x >= g_srcImage.cols || y < 0 || y >= g_srcImage.rows ) return; //处理鼠标左键相关消息 if( event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON) ) prevPt = Point(-1, -1) ; else if( event == EVENT_LBUTTONDOWN ) prevPt = Point(x, y); //鼠标左键按下并移动,绘制出白色线条 else if( event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON)){ Point pt(x, y); if( prevPt.x < 0 ) prevPt = pt; line( g_maskImage, prevPt, pt, Scalar::all(255), 5, 8, 0 ); line( g_srcImage, prevPt, pt, Scalar::all(255), 5, 8, 0 ); prevPt =pt; imshow(WINDOW_NAME, g_srcImage); } } 鼠标部分加入布尔函数
最新发布
07-20
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值