尝试在条件“$(_DeviceSdkVersion) >= 21”中对计算结果为“”而不是数字的“$(_DeviceSdkVersion)”进行数值比较。

本文介绍了在使用Xamarin Android进行开发过程中遇到的部署错误问题及其解决方案。通过调整Genymotion模拟器的ADB配置指向正确的Android SDK路径,可以有效解决部署失败的问题。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

最近折腾xamarin android,使用genymotion模拟器,vs 2015自带的速度太慢。

发生【出现部署错误】问题,查看【输出】窗口,发现是adb.exe执行问题。

原因是genymotion默认使用自身的adb配置,更改过来即可。

解决步骤如下:


Android SDK路径选择 Android SDK Manager下载安装的sdk包路径。

# -*- coding: utf-8 -*- import sys import os import cv2 import numpy as np from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QWidget, QVBoxLayout, QHBoxLayout, QMessageBox, QLabel, QFileDialog, QToolBar, QComboBox, QStatusBar, QGroupBox, QSlider, QDockWidget, QProgressDialog, QLineEdit, QCheckBox, QGridLayout, QSpinBox, QRadioButton) from PyQt5.QtCore import QRect, Qt, QSettings, QThread, pyqtSignal, QTimer from PyQt5.QtGui import QPixmap, QImage, QFont import time import datetime import logging import platform import random from skimage.metrics import structural_similarity as ssim import json import threading import ctypes # 尝试导入海康SDK try: from MvCameraControl_class import * except ImportError: logging.error("未找到海康SDK库,请安装MVS SDK") # 如果没有安装SDK,使用模拟模式 class MvCamera: MV_CC_DEVICE_INFO_LIST = type('MV_CC_DEVICE_INFO_LIST', (object,), {}) MV_GIGE_DEVICE = 1 MV_USB_DEVICE = 4 MV_ACCESS_Exclusive = 1 @staticmethod def MV_CC_EnumDevices(nTLayerType, stDeviceList): return 0 @staticmethod def MV_CC_CreateHandle(stDeviceInfo): return 0 @staticmethod def MV_CC_OpenDevice(stCamHandle, nAccessMode, nSwitchoverKey): return 0 @staticmethod def MV_CC_StartGrabbing(stCamHandle): return 0 @staticmethod def MV_CC_StopGrabbing(stCamHandle): return 0 @staticmethod def MV_CC_CloseDevice(stCamHandle): return 0 @staticmethod def MV_CC_DestroyHandle(stCamHandle): return 0 @staticmethod def MV_CC_RegisterImageCallBack(stCamHandle, cbOutput, pUser): return 0 # 配置日志系统 logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler("cloth_inspection_debug.log"), logging.StreamHandler() ] ) logging.info("布料印花检测系统启动") # 全局变量 current_sample_path = "" # 当前使用的样本路径 detection_history = [] # 检测历史记录 is_processing = False # 防止重复处理 discovered_cameras = [] # 存储发现的相机列表 # ====================== 虚拟传感器类 ====================== class VirtualSensor: """模拟真实传感器输入的虚拟传感器""" def __init__(self): self.state = False # 传感器状态(触发/未触发) self.trigger_delay = 0.5 # 默认触发延迟(秒) self.trigger_count = 0 # 触发计数器 self.sensor_type = "光电传感器" # 传感器类型 self.mock_mode = False # 模拟模式 def trigger(self): """模拟传感器触发""" self.state = True self.trigger_count += 1 logging.info(f"传感器触发 #{self.trigger_count}") time.sleep(self.trigger_delay) self.state = False def set_delay(self, delay): """设置触发延迟时间""" self.trigger_delay = max(0.1, min(delay, 5.0)) # 限制在0.1-5秒之间 def set_type(self, sensor_type): """设置传感器类型""" self.sensor_type = sensor_type def enable_mock(self, enable): """启用/禁用模拟模式""" self.mock_mode = enable if enable: logging.info("传感器模拟模式已启用") def mock_trigger(self): """模拟传感器触发(随机间隔)""" if self.mock_mode: interval = random.uniform(0.5, 3.0) threading.Timer(interval, self.trigger).start() # 创建虚拟传感器实例 virtual_sensor = VirtualSensor() # ====================== 传感器信号处理线程 ====================== class SensorThread(QThread): """处理传感器信号的线程""" sensor_triggered = pyqtSignal() def __init__(self, sensor): super().__init__() self.sensor = sensor self.running = True self.mock_timer = QTimer() self.mock_timer.timeout.connect(self.mock_sensor_check) def run(self): while self.running: if self.sensor.state: self.sensor_triggered.emit() # 等待传感器复位 while self.sensor.state: time.sleep(0.01) time.sleep(0.05) # 减少CPU占用 def start_mock(self, interval=1000): """启动模拟传感器触发""" self.mock_timer.start(interval) def stop_mock(self): """停止模拟传感器触发""" self.mock_timer.stop() def mock_sensor_check(self): """检查并触发模拟传感器""" if self.sensor.mock_mode: self.sensor.trigger() # ====================== 图像处理线程 ====================== class ImageProcessingThread(QThread): """图像处理线程,避免阻塞UI""" processing_complete = pyqtSignal(bool, float, np.ndarray) def __init__(self, sample_path, test_image, threshold, use_ssim): super().__init__() self.sample_path = sample_path self.test_image = test_image self.threshold = threshold self.use_ssim = use_ssim def run(self): try: # 执行检测 is_qualified, diff_ratio, marked_image = self.check_print_quality( self.sample_path, self.test_image, self.threshold, self.use_ssim ) # 发出信号 self.processing_complete.emit(is_qualified, diff_ratio, marked_image) except Exception as e: logging.exception(f"图像处理线程错误: {str(e)}") self.processing_complete.emit(None, None, None) def check_print_quality(self, sample_image_path, test_image, threshold=0.05, use_ssim=True): """ 优化的布料印花检测算法 :param sample_image_path: 合格样本图像路径 :param test_image: 测试图像 (numpy数组) :param threshold: 差异阈值 :param use_ssim: 是否使用SSIM结构相似性指标 :return: 是否合格,差异值,标记图像 """ try: # 读取样本图像 sample_img_data = np.fromfile(sample_image_path, dtype=np.uint8) sample_image = cv2.imdecode(sample_img_data, cv2.IMREAD_GRAYSCALE) if sample_image is None: logging.error(f"无法解码样本图像: {sample_image_path}") return None, None, None # 确保测试图像是灰度图 if len(test_image.shape) == 3: # 如果是彩色图像 test_image_gray = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY) else: test_image_gray = test_image.copy() # 图像配准 - 使用特征匹配解决轻微位移问题 aligned_image = self.align_images(sample_image, test_image_gray) if aligned_image is None: aligned_image = test_image_gray # 配准失败则使用原始图像 logging.warning("图像配准失败,使用原始图像") # 确保两个图像大小一致 if aligned_image.shape != sample_image.shape: aligned_image = cv2.resize(aligned_image, (sample_image.shape[1], sample_image.shape[0])) # 方法1: 极速SSIM算法 (优化版) if use_ssim: # 使用优化的SSIM计算 score = self.fast_ssim(sample_image, aligned_image) diff_ratio = 1.0 - score # 差异比例 # 计算绝对差异作为差异图 diff = cv2.absdiff(sample_image, aligned_image) _, thresholded = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY) else: # 方法2: 传统绝对差异法 diff = cv2.absdiff(sample_image, aligned_image) # 自适应阈值处理 thresholded = cv2.adaptiveThreshold( diff, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2 ) # 计算差异比例 diff_pixels = np.count_nonzero(thresholded) total_pixels = sample_image.size diff_ratio = diff_pixels / total_pixels # 形态学操作去除噪声 kernel = np.ones((3, 3), np.uint8) thresholded = cv2.morphologyEx(thresholded, cv2.MORPH_OPEN, kernel) thresholded = cv2.morphologyEx(thresholded, cv2.MORPH_CLOSE, kernel) # 多尺度缺陷检测 marked_image = self.detect_defects(aligned_image, thresholded) # 判断是否合格 is_qualified = diff_ratio <= threshold return is_qualified, diff_ratio, marked_image except Exception as e: logging.exception(f"检测过程中发生错误: {str(e)}") return None, None, None def fast_ssim(self, img1, img2): """优化的SSIM计算,提高性能""" # 图像下采样以提高速度 if img1.shape[0] > 512 or img1.shape[1] > 512: scale = 0.5 img1 = cv2.resize(img1, (0, 0), fx=scale, fy=scale) img2 = cv2.resize(img2, (0, 0), fx=scale, fy=scale) # 计算SSIM score = ssim(img1, img2, win_size=3, data_range=img1.max() - img1.min()) return max(0.0, min(1.0, score)) # 确保在0-1范围内 def align_images(self, image1, image2): """ 使用特征匹配对齐两幅图像 :param image1: 参考图像 :param image2: 待对齐图像 :return: 对齐后的图像 """ # 使用ORB检测器(比SIFT更快) orb = cv2.ORB_create() # 查找关键点和描述符 kp1, des1 = orb.detectAndCompute(image1, None) kp2, des2 = orb.detectAndCompute(image2, None) # 如果关键点不足,尝试使用SIFT if des1 is None or des2 is None or len(des1) < 4 or len(des2) < 4: sift = cv2.SIFT_create() kp1, des1 = sift.detectAndCompute(image1, None) kp2, des2 = sift.detectAndCompute(image2, None) # 如果还是没有足够的关键点,返回None if des1 is None or des2 is None or len(des1) < 4 or len(des2) < 4: return None # 使用BFMatcher进行特征匹配 bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) matches = bf.match(des1, des2) # 至少需要4个点计算变换矩阵 if len(matches) < 4: return None # 提取匹配点坐标 src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2) dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2) # 计算变换矩阵(使用RANSAC) M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0) # 应用变换 aligned_image = cv2.warpPerspective( image2, M, (image1.shape[1], image1.shape[0]), flags=cv2.INTER_LINEAR ) return aligned_image def detect_defects(self, image, mask): """ 多尺度缺陷检测和标记 :param image: 原始图像 :param mask: 差异掩码 :return: 标记后的图像 """ # 创建彩色标记图像 marked_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) # 查找轮廓 contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 设置最小缺陷尺寸(避免标记小噪点) min_defect_area = max(10, image.size * 0.0001) # 自适应最小面积 # 标记缺陷区域 defect_count = 0 for cnt in contours: area = cv2.contourArea(cnt) if area > min_defect_area: defect_count += 1 # 计算轮廓的边界框 x, y, w, h = cv2.boundingRect(cnt) # 绘制边界框 cv2.rectangle(marked_image, (x, y), (x+w, y+h), (0, 0, 255), 2) # 在缺陷中心添加文本标签 cv2.putText( marked_image, f"Defect {defect_count}: {area}px", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1 ) # 添加缺陷统计信息 cv2.putText( marked_image, f"Total Defects: {defect_count}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2 ) return marked_image # ====================== 网络配置检查 ====================== def ctypes_array_to_str(array): """将ctypes字符数组转换为Python字符串""" # 转换为字节串 byte_str = bytes(array) # 找到第一个空字节的位置 null_index = byte_str.find(b'\0') if null_index >= 0: # 截取到第一个空字节 byte_str = byte_str[:null_index] # 尝试UTF-8解码 try: return byte_str.decode('utf-8') except UnicodeDecodeError: # 尝试GBK解码(常见于中文设备) try: return byte_str.decode('gbk') except UnicodeDecodeError: # 最终回退方案 return byte_str.decode('latin1', errors='replace') def check_network_configuration(): """检查网络配置是否适合海康相机""" global discovered_cameras # 尝试使用海康SDK枚举设备 device_list = MV_CC_DEVICE_INFO_LIST() ret = MvCamera.MV_CC_EnumDevices(MV_GIGE_DEVICE | MV_USB_DEVICE, device_list) if ret == 0 and device_list.nDeviceNum > 0: discovered_cameras = [] for i in range(device_list.nDeviceNum): # 获取设备信息指针 device_info_ptr = device_list.pDeviceInfo[i] # 解引用指针获取实际设备信息 device_info = device_info_ptr.contents # 检查设备类型 if device_info.nTLayerType == MV_GIGE_DEVICE: # 处理GigE设备 - 正确转换IP地址 ip_int = device_info.SpecialInfo.stGigEInfo.nCurrentIp # 将32位整数分解为4个字节 ip_bytes = [ (ip_int >> 24) & 0xFF, # 最高字节 (ip_int >> 16) & 0xFF, # 次高字节 (ip_int >> 8) & 0xFF, # 次低字节 ip_int & 0xFF # 最低字节 ] ip = ".".join(map(str, ip_bytes)) # 正确解码模型名和序列号 model = ctypes_array_to_str(device_info.SpecialInfo.stGigEInfo.chModelName) serial = ctypes_array_to_str(device_info.SpecialInfo.stGigEInfo.chSerialNumber) discovered_cameras.append({"ip": ip, "model": model, "serial": serial}) elif device_info.nTLayerType == MV_USB_DEVICE: # 处理USB设备 model = ctypes_array_to_str(device_info.SpecialInfo.stUsb3VInfo.chModelName) serial = ctypes_array_to_str(device_info.SpecialInfo.stUsb3VInfo.chSerialNumber) discovered_cameras.append({"ip": "USB", "model": model, "serial": serial}) logging.info(f"发现 {len(discovered_cameras)} 台真实相机") return True else: # 模拟相机发现 discovered_cameras = [ {"ip": "192.168.1.101", "model": "MV-CA016-10GC", "serial": "SN123456"}, {"ip": "192.168.1.102", "model": "MV-CA020-10GC", "serial": "SN789012"} ] logging.info(f"使用模拟相机数据: {len(discovered_cameras)} 台网络相机") return bool(discovered_cameras) # ====================== 主窗口类 ====================== class MainWindow(QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) # 初始化顺序保持不变 self.setup_variables() self.setup_ui() self.setup_connections() self.load_settings() def setup_variables(self): """初始化变量""" self.isOpen = False # 相机是否打开 self.isGrabbing = False # 相机是否在取流 self.cam = None # 相机对象 self.current_frame = None # 当前帧 self.settings = None # 用于存储设置 self.image_thread = None # 图像处理线程 self.progress = None # 进度对话框 # 创建传感器线程 self.sensor_thread = SensorThread(virtual_sensor) def setup_ui(self): """初始化用户界面""" # 主窗口设置 self.setWindowTitle("布料印花检测系统") self.setGeometry(100, 100, 3000, 900) # 创建主窗口部件和布局 central_widget = QWidget() self.setCentralWidget(central_widget) main_layout = QVBoxLayout(central_widget) # ===== 顶部工具栏 ===== toolbar = QToolBar("主工具栏") self.addToolBar(toolbar) # 创建工具栏按钮 self.bnEnum = QPushButton("枚举设备") self.bnOpen = QPushButton("打开设备") self.bnClose = QPushButton("关闭设备") self.bnStart = QPushButton("开始取流") self.bnStop = QPushButton("停止取流") self.bnSaveImage = QPushButton("保存图像") # 添加到工具栏 toolbar.addWidget(self.bnEnum) toolbar.addWidget(self.bnOpen) toolbar.addWidget(self.bnClose) toolbar.addWidget(self.bnStart) toolbar.addWidget(self.bnStop) toolbar.addWidget(self.bnSaveImage) # ===== 设备选择区域 ===== device_group = QGroupBox("设备控制") device_layout = QHBoxLayout(device_group) self.ComboDevices = QComboBox() self.ComboDevices.setMinimumWidth(300) device_layout.addWidget(QLabel("选择设备:")) device_layout.addWidget(self.ComboDevices) # 触发模式 trigger_group = QGroupBox("采集模式") trigger_layout = QVBoxLayout(trigger_group) self.radioContinueMode = QRadioButton("连续采集模式") self.radioTriggerMode = QRadioButton("触发采集模式") self.radioContinueMode.setChecked(True) trigger_layout.addWidget(self.radioContinueMode) trigger_layout.addWidget(self.radioTriggerMode) # 软件触发按钮 self.bnSoftwareTrigger = QPushButton("软触发") self.bnSoftwareTrigger.setEnabled(False) device_layout.addWidget(trigger_group) device_layout.addWidget(self.bnSoftwareTrigger) main_layout.addWidget(device_group) # ===== 图像显示区域 ===== image_group = QGroupBox("图像显示") image_layout = QHBoxLayout(image_group) self.image_label = QLabel() self.image_label.setAlignment(Qt.AlignCenter) self.image_label.setStyleSheet("background-color: black;") image_layout.addWidget(self.image_label) # 参数控制 param_group = QGroupBox("相机参数") param_layout = QGridLayout(param_group) param_layout.addWidget(QLabel("曝光时间(μs):"), 0, 0) self.edtExposureTime = QLineEdit("10000") param_layout.addWidget(self.edtExposureTime, 0, 1) param_layout.addWidget(QLabel("增益(dB):"), 1, 0) self.edtGain = QLineEdit("0") param_layout.addWidget(self.edtGain, 1, 1) param_layout.addWidget(QLabel("帧率(fps):"), 2, 0) self.edtFrameRate = QLineEdit("30") param_layout.addWidget(self.edtFrameRate, 2, 1) self.bnGetParam = QPushButton("获取参数") self.bnSetParam = QPushButton("设置参数") param_layout.addWidget(self.bnGetParam, 0, 2) param_layout.addWidget(self.bnSetParam, 1, 2) image_layout.addWidget(param_group) main_layout.addWidget(image_group) # ===== 检测控制区域 ===== detection_group = QGroupBox("检测控制") detection_layout = QGridLayout(detection_group) # 样本控制 self.bnSaveSample = QPushButton("保存样本") self.bnPreviewSample = QPushButton("预览样本") self.lblSamplePath = QLabel("当前样本: 未设置样本") detection_layout.addWidget(QLabel("样本控制:"), 0, 0) detection_layout.addWidget(self.bnSaveSample, 0, 1) detection_layout.addWidget(self.bnPreviewSample, 0, 2) detection_layout.addWidget(self.lblSamplePath, 0, 3) # 检测参数 detection_layout.addWidget(QLabel("差异阈值:"), 1, 0) self.sliderDiffThreshold = QSlider(Qt.Horizontal) self.sliderDiffThreshold.setRange(1, 20) # 1% - 20% self.sliderDiffThreshold.setValue(5) detection_layout.addWidget(self.sliderDiffThreshold, 1, 1, 1, 2) self.lblDiffValue = QLabel("5%") detection_layout.addWidget(self.lblDiffValue, 1, 3) # 检测方法 self.cbUseSSIM = QCheckBox("使用SSIM算法") self.cbUseSSIM.setChecked(True) detection_layout.addWidget(self.cbUseSSIM, 2, 0) # 检测按钮 self.bnCheckPrint = QPushButton("检测布料印花") detection_layout.addWidget(self.bnCheckPrint, 2, 1, 1, 3) # 检测结果显示 self.lblCurrentDiff = QLabel("当前差异度: --") self.lblDiffStatus = QLabel("状态: --") self.lblDiffStatus.setStyleSheet("font-weight: bold;") detection_layout.addWidget(self.lblCurrentDiff, 3, 0, 1, 2) detection_layout.addWidget(self.lblDiffStatus, 3, 2, 1, 2) main_layout.addWidget(detection_group) # ===== 传感器控制区域 ===== sensor_group = QGroupBox("传感器控制") sensor_layout = QGridLayout(sensor_group) # 传感器类型 sensor_layout.addWidget(QLabel("传感器类型:"), 0, 0) self.comboSensorType = QComboBox() self.comboSensorType.addItems(["光电传感器", "接近传感器", "编码器"]) sensor_layout.addWidget(self.comboSensorType, 0, 1) # 传感器延迟 sensor_layout.addWidget(QLabel("触发延迟():"), 1, 0) self.edtSensorDelay = QLineEdit("0.5") sensor_layout.addWidget(self.edtSensorDelay, 1, 1) self.bnSetSensorDelay = QPushButton("设置延迟") sensor_layout.addWidget(self.bnSetSensorDelay, 1, 2) # 传感器模拟 self.cbMockSensor = QCheckBox("启用传感器模拟") self.cbMockSensor.setChecked(False) sensor_layout.addWidget(self.cbMockSensor, 2, 0) self.spinMockInterval = QSpinBox() self.spinMockInterval.setRange(500, 5000) self.spinMockInterval.setValue(1000) self.spinMockInterval.setSuffix("ms") sensor_layout.addWidget(self.spinMockInterval, 2, 1) self.bnStartMock = QPushButton("开始模拟") self.bnStartMock.setEnabled(False) self.bnStopMock = QPushButton("停止模拟") self.bnStopMock.setEnabled(False) sensor_layout.addWidget(self.bnStartMock, 2, 2) sensor_layout.addWidget(self.bnStopMock, 2, 3) # 手动触发 self.bnManualTrigger = QPushButton("手动触发传感器") sensor_layout.addWidget(self.bnManualTrigger, 3, 0, 1, 4) main_layout.addWidget(sensor_group) # ===== 历史记录区域 ===== history_group = QGroupBox("检测历史") history_layout = QVBoxLayout(history_group) self.cbHistory = QComboBox() history_layout.addWidget(self.cbHistory) main_layout.addWidget(history_group) # ===== 状态栏 ===== self.statusBar = QStatusBar() self.setStatusBar(self.statusBar) self.statusBar.showMessage("就绪", 5000) # 帧状态标签 self.lblFrameStatus = QLabel("帧状态: --") self.statusBar.addPermanentWidget(self.lblFrameStatus) # 启用控件 self.enable_controls() # ====================== 相机操作方法 ====================== def enum_devices(self): """枚举设备""" self.ComboDevices.clear() # 多次尝试枚举 max_retries = 3 device_list = None for i in range(max_retries): device_list = MV_CC_DEVICE_INFO_LIST() ret = MvCamera.MV_CC_EnumDevices(MV_GIGE_DEVICE | MV_USB_DEVICE, device_list) if ret == 0 and device_list.nDeviceNum > 0: break else: logging.warning(f"枚举设备失败 (尝试 {i+1}/{max_retries}), 错误码: {ret}") time.sleep(1) # 等待1秒后重试 if device_list is None or device_list.nDeviceNum == 0: self.statusBar.showMessage("未找到任何设备", 5000) return # 将设备添加到下拉框 for i in range(device_list.nDeviceNum): device_info_ptr = device_list.pDeviceInfo[i] device_info = device_info_ptr.contents # 检查设备类型并添加 if device_info.nTLayerType == MV_GIGE_DEVICE: ip_int = device_info.SpecialInfo.stGigEInfo.nCurrentIp ip_bytes = [(ip_int >> 24) & 0xFF, (ip_int >> 16) & 0xFF, (ip_int >> 8) & 0xFF, ip_int & 0xFF] ip = ".".join(map(str, ip_bytes)) model = ctypes_array_to_str(device_info.SpecialInfo.stGigEInfo.chModelName) serial = ctypes_array_to_str(device_info.SpecialInfo.stGigEInfo.chSerialNumber) device_str = f"[{i}]GigE: {model} ({ip})" self.ComboDevices.addItem(device_str, i) elif device_info.nTLayerType == MV_USB_DEVICE: model = ctypes_array_to_str(device_info.SpecialInfo.stUsb3VInfo.chModelName) serial = ctypes_array_to_str(device_info.SpecialInfo.stUsb3VInfo.chSerialNumber) device_str = f"[{i}]USB: {model} (SN:{serial})" self.ComboDevices.addItem(device_str, i) self.statusBar.showMessage(f"已发现 {device_list.nDeviceNum} 个设备", 5000) # 自动选择第一个设备 if device_list.nDeviceNum > 0: self.ComboDevices.setCurrentIndex(0) def open_device(self): """打开真实海康设备""" if self.isOpen: QMessageBox.warning(self, "错误", "设备已打开", QMessageBox.Ok) return if self.ComboDevices.currentIndex() < 0: QMessageBox.warning(self, "错误", "请先选择设备", QMessageBox.Ok) return # 重新枚举设备以确保列表最新 device_list = MV_CC_DEVICE_INFO_LIST() ret = MvCamera.MV_CC_EnumDevices(MV_GIGE_DEVICE | MV_USB_DEVICE, device_list) if ret != 0: QMessageBox.warning(self, "错误", f"枚举设备失败! 错误码: {ret}", QMessageBox.Ok) return device_index = self.ComboDevices.currentIndex() if device_index >= device_list.nDeviceNum: QMessageBox.warning(self, "错误", "选择的设备索引无效", QMessageBox.Ok) return # 获取设备信息 device_info = device_list.pDeviceInfo[device_index] try: # 创建相机实例 self.cam = MvCamera() # 创建句柄 ret = self.cam.MV_CC_CreateHandle(device_info) if ret != 0: error_msg = self.get_camera_error(ret) QMessageBox.critical( self, "创建句柄失败", f"错误码: {ret}\n{error_msg}\n\n" "可能原因:\n" "1. 设备未正确连接\n" "2. 设备被其他程序占用\n" "3. 驱动程序未正确安装", QMessageBox.Ok ) self.cam = None return # 打开设备 ret = self.cam.MV_CC_OpenDevice(MV_ACCESS_Exclusive, 0) if ret != 0: error_msg = self.get_camera_error(ret) QMessageBox.critical( self, "打开设备失败", f"错误码: {ret}\n{error_msg}\n\n" "解决方案:\n" "1. 检查设备电源和连接\n" "2. 重启设备和计算机\n" "3. 确保设备未被其他程序占用", QMessageBox.Ok ) self.cam.MV_CC_DestroyHandle() self.cam = None return # 注册图像回调 - 修复语法错误的位置 ret = self.cam.MV_CC_RegisterImageCallBack(self.image_callback, None) if ret != 0: logging.warning(f"注册图像回调失败! 错误码: {ret}") # 回调注册失败不是致命错误,继续执行 # 设置基本参数 self.set_reasonable_default_params() self.isOpen = True self.enable_controls() self.statusBar.showMessage("设备已成功打开", 5000) except Exception as e: logging.exception(f"打开设备时发生异常: {str(e)}") QMessageBox.critical( self, "严重错误", f"打开设备时发生异常:\n{str(e)}\n\n" "请检查:\n" "1. SDK是否正确安装\n" "2. Python环境是否匹配SDK架构(32/64位)\n" "3. 依赖库是否完整", QMessageBox.Ok ) if self.cam: self.cam.MV_CC_DestroyHandle() self.cam = None def log_device_info(self, device_info): """记录设备详细信息用于调试""" if device_info.nTLayerType == MV_GIGE_DEVICE: ip_int = device_info.SpecialInfo.stGigEInfo.nCurrentIp ip_bytes = [ (ip_int >> 24) & 0xFF, (ip_int >> 16) & 0xFF, (ip_int >> 8) & 0xFF, ip_int & 0xFF ] ip = ".".join(map(str, ip_bytes)) model = ctypes_array_to_str(device_info.SpecialInfo.stGigEInfo.chModelName) serial = ctypes_array_to_str(device_info.SpecialInfo.stGigEInfo.chSerialNumber) logging.info(f"尝试打开设备: GigE相机 - 型号: {model}, IP: {ip}, SN: {serial}") elif device_info.nTLayerType == MV_USB_DEVICE: model = ctypes_array_to_str(device_info.SpecialInfo.stUsb3VInfo.chModelName) serial = ctypes_array_to_str(device_info.SpecialInfo.stUsb3VInfo.chSerialNumber) logging.info(f"尝试打开设备: USB相机 - 型号: {model}, SN: {serial}") def get_camera_error(self, error_code): """获取相机错误信息描述(增强版)""" error_map = { 0x80000000: "错误或无效参数", 0x80000001: "不支持的功能", 0x80000002: "相机未初始化", 0x80000003: "相机资源不足", 0x80000004: "设备未找到 - 请检查物理连接和IP配置", 0x80000005: "设备已被占用 - 关闭其他使用相机的程序", 0x80000006: "参数超出范围", 0x80000007: "参数类型错误", 0x80000008: "参数值错误", 0x80000009: "超时 - 操作未在规定时间内完成", 0x8000000A: "设备已打开", 0x8000000B: "设备未打开", 0x8000000C: "设备未连接 - 检查物理连接", 0x8000000D: "资源锁定 - 资源被其他操作占用", 0x8000000E: "无数据 - 没有可用的图像数据", 0x8000000F: "内存不足 - 系统内存不足", 0x80000010: "不支持的操作系统", 0x80000011: "内部错误 - SDK内部错误", 0x80000012: "文件访问错误", 0x80000013: "文件格式错误", 0x80000014: "访问被拒绝 - 权限不足", 0x80000015: "缓冲区太小 - 提供的缓冲区不足以容纳数据", 0x80000016: "无效句柄", 0x80000017: "无效ID", 0x80000018: "无效访问权限", 0x80000019: "无效IP配置 - 检查相机IP设置", 0x8000001A: "无效注册表类型", 0x8000001B: "无效注册表大小", 0x8000001C: "无效属性", 0x8000001D: "设备已被其他程序占用 - 关闭其他相机应用程序", } def set_reasonable_default_params(self): """设置合理的默认参数""" if not self.isOpen or self.cam is None: return try: # 设置合理的曝光时间 ret = self.cam.MV_CC_SetFloatValue("ExposureTime", 10000) if ret == 0: self.edtExposureTime.setText("10000") # 设置合理的增益 ret = self.cam.MV_CC_SetFloatValue("Gain", 0) if ret == 0: self.edtGain.setText("0") # 设置合理的帧率 ret = self.cam.MV_CC_SetFloatValue("AcquisitionFrameRate", 30) if ret == 0: self.edtFrameRate.setText("30") # 设置像素格式为Mono8(灰度) ret = self.cam.MV_CC_SetEnumValue("PixelFormat", PixelType_Gvsp_Mono8) if ret != 0: logging.warning(f"设置像素格式失败! 错误码: {ret}") # 设置自动白平衡(如果是彩色相机) ret = self.cam.MV_CC_SetEnumValue("BalanceWhiteAuto", 1) # 1 = Continuous if ret != 0: logging.debug("此相机可能不支持自动白平衡") # 设置自动曝光 ret = self.cam.MV_CC_SetEnumValue("ExposureAuto", 2) # 2 = Continuous if ret != 0: logging.debug("此相机可能不支持自动曝光") self.statusBar.showMessage("已设置默认参数", 2000) except Exception as e: logging.error(f"设置默认参数时出错: {str(e)}") def close_device(self): """关闭真实设备""" if not self.isOpen or self.cam is None: return # 停止取流 if self.isGrabbing: self.stop_grabbing() # 关闭设备 ret = self.cam.MV_CC_CloseDevice() if ret != 0: logging.error(f"关闭设备失败! 错误码: {ret}") # 销毁句柄 self.cam.MV_CC_DestroyHandle() self.cam = None self.isOpen = False self.isGrabbing = False self.enable_controls() self.statusBar.showMessage("设备已关闭", 3000) def start_grabbing(self): """开始真实取流""" if not self.isOpen: QMessageBox.warning(self, "错误", "请先打开设备", QMessageBox.Ok) return # 开始取流 ret = self.cam.MV_CC_StartGrabbing() if ret != 0: QMessageBox.warning(self, "错误", f"开始取流失败! 错误码: {ret}", QMessageBox.Ok) return self.isGrabbing = True self.enable_controls() self.statusBar.showMessage("已开始取流", 3000) def stop_grabbing(self): """停止真实取流""" if not self.isGrabbing or self.cam is None: return # 停止取流 ret = self.cam.MV_CC_StopGrabbing() if ret != 0: logging.error(f"停止取流失败! 错误码: {ret}") self.isGrabbing = False self.enable_controls() self.statusBar.showMessage("已停止取流", 3000) def set_continue_mode(self): """设置连续采集模式""" if self.isOpen: # 实际设置相机为连续模式 if self.cam: ret = self.cam.MV_CC_SetEnumValue("TriggerMode", 0) if ret == 0: self.statusBar.showMessage("已设置为连续采集模式", 3000) else: self.statusBar.showMessage(f"设置连续模式失败! 错误码: {ret}", 3000) self.bnSoftwareTrigger.setEnabled(False) def set_software_trigger_mode(self): """设置触发采集模式""" if self.isOpen: # 实际设置相机为软件触发模式 if self.cam: ret = self.cam.MV_CC_SetEnumValue("TriggerMode", 1) # 1 = On if ret == 0: self.statusBar.showMessage("已设置为触发采集模式", 3000) else: self.statusBar.showMessage(f"设置触发模式失败! 错误码: {ret}", 3000) self.bnSoftwareTrigger.setEnabled(self.isGrabbing) def trigger_once(self): """执行软触发""" if self.isOpen and self.isGrabbing and self.cam: # 执行软触发 ret = self.cam.MV_CC_SetCommandValue("TriggerSoftware") if ret == 0: self.statusBar.showMessage("已执行软触发", 3000) else: self.statusBar.showMessage(f"软触发失败! 错误码: {ret}", 3000) def get_param(self): """获取相机参数""" if not self.isOpen or self.cam is None: QMessageBox.warning(self, "错误", "设备未打开", QMessageBox.Ok) return try: # 获取曝光时间 exposure = ctypes.c_float() ret = self.cam.MV_CC_GetFloatValue("ExposureTime", exposure) if ret == 0: self.edtExposureTime.setText(f"{exposure.value:.2f}") # 获取增益 gain = ctypes.c_float() ret = self.cam.MV_CC_GetFloatValue("Gain", gain) if ret == 0: self.edtGain.setText(f"{gain.value:.2f}") # 获取帧率 frame_rate = ctypes.c_float() ret = self.cam.MV_CC_GetFloatValue("AcquisitionFrameRate", frame_rate) if ret == 0: self.edtFrameRate.setText(f"{frame_rate.value:.2f}") self.statusBar.showMessage("已获取相机参数", 3000) except Exception as e: logging.error(f"获取参数错误: {str(e)}") self.statusBar.showMessage("获取参数失败", 3000) def set_param(self): """设置相机参数""" if not self.isOpen or self.cam is None: QMessageBox.warning(self, "错误", "设备未打开", QMessageBox.Ok) return try: exposure = float(self.edtExposureTime.text()) gain = float(self.edtGain.text()) frame_rate = float(self.edtFrameRate.text()) # 验证参数范围 if not (5000 <= exposure <= 20000): raise ValueError("曝光时间应在5000-20000μs之间") if not (0 <= gain <= 20): raise ValueError("增益应在0-20dB之间") if not (10 <= frame_rate <= 60): raise ValueError("帧率应在10-60fps之间") # 设置曝光时间 ret = self.cam.MV_CC_SetFloatValue("ExposureTime", exposure) if ret != 0: logging.error(f"设置曝光失败! 错误码: {ret}") # 设置增益 ret = self.cam.MV_CC_SetFloatValue("Gain", gain) if ret != 0: logging.error(f"设置增益失败! 错误码: {ret}") # 设置帧率 ret = self.cam.MV_CC_SetFloatValue("AcquisitionFrameRate", frame_rate) if ret != 0: logging.error(f"设置帧率失败! 错误码: {ret}") self.statusBar.showMessage(f"已设置参数: 曝光={exposure}μs, 增益={gain}dB, 帧率={frame_rate}fps", 3000) except ValueError as e: QMessageBox.warning(self, "输入错误", str(e), QMessageBox.Ok) def save_image_dialog(self): """保存图像对话框""" if not self.isGrabbing: QMessageBox.warning(self, "错误", "请先开始取流", QMessageBox.Ok) return file_path, _ = QFileDialog.getSaveFileName( self, "保存图像", os.path.join(os.getcwd(), "capture.bmp"), "BMP Files (*.bmp);;All Files (*)" ) if file_path: # 在实际应用中这里会保存真实图像 # 需要从相机获取当前帧并保存 self.statusBar.showMessage(f"图像已保存至: {file_path}", 5000) def enable_controls(self): """设置控件状态""" # 相机控制 self.bnOpen.setEnabled(not self.isOpen) self.bnClose.setEnabled(self.isOpen) self.bnStart.setEnabled(self.isOpen and not self.isGrabbing) self.bnStop.setEnabled(self.isOpen and self.isGrabbing) self.bnSaveImage.setEnabled(self.isGrabbing) self.bnSoftwareTrigger.setEnabled(self.isGrabbing and self.radioTriggerMode.isChecked()) # 检测控制 self.bnCheckPrint.setEnabled(self.isGrabbing and bool(current_sample_path)) self.bnSaveSample.setEnabled(self.isGrabbing) self.bnPreviewSample.setEnabled(bool(current_sample_path)) # 参数控制 self.bnGetParam.setEnabled(self.isOpen) self.bnSetParam.setEnabled(self.isOpen) # ====================== 检测相关函数 ====================== def save_sample_image(self): """保存标准样本""" if not self.isGrabbing: QMessageBox.warning(self, "错误", "请先开始取流", QMessageBox.Ok) return file_path, _ = QFileDialog.getSaveFileName( self, "保存标准样本", os.path.join(os.getcwd(), "sample.bmp"), "BMP Files (*.bmp);;All Files (*)" ) if file_path: global current_sample_path current_sample_path = file_path # 保存当前帧作为样本 if hasattr(self, 'current_frame') and self.current_frame is not None: cv2.imwrite(file_path, self.current_frame) self.statusBar.showMessage(f"标准样本已保存: {file_path}", 5000) else: self.statusBar.showMessage("无法保存样本: 无有效图像", 5000) self.update_sample_display() self.save_settings() def preview_sample(self): """预览样本""" global current_sample_path if not current_sample_path or not os.path.exists(current_sample_path): QMessageBox.warning(self, "错误", "请先设置有效的标准样本图像", QMessageBox.Ok) return # 显示样本图像 sample_image = cv2.imread(current_sample_path) if sample_image is not None: # 转换为QPixmap并显示 if len(sample_image.shape) == 2: h, w = sample_image.shape bytes_per_line = w q_img = QImage(sample_image.data, w, h, bytes_per_line, QImage.Format_Grayscale8) else: h, w, ch = sample_image.shape bytes_per_line = ch * w q_img = QImage(sample_image.data, w, h, bytes_per_line, QImage.Format_RGB888) pixmap = QPixmap.fromImage(q_img) self.lblImageDisplay.setPixmap(pixmap.scaled( self.lblImageDisplay.width(), self.lblImageDisplay.height(), Qt.KeepAspectRatio )) self.statusBar.showMessage("正在预览样本图像", 3000) else: QMessageBox.warning(self, "错误", "无法加载样本图像", QMessageBox.Ok) def update_diff_threshold(self, value): """更新差异度阈值显示""" self.lblDiffValue.setText(f"{value}%") def check_print(self): """执行检测""" global is_processing, current_sample_path, detection_history if is_processing: return is_processing = True # 检查条件 if not self.isGrabbing: QMessageBox.warning(self, "错误", "请先开始取流", QMessageBox.Ok) is_processing = False return if not current_sample_path or not os.path.exists(current_sample_path): QMessageBox.warning(self, "错误", "请先设置有效的标准样本图像", QMessageBox.Ok) is_processing = False return # 获取当前帧 if not hasattr(self, 'current_frame') or self.current_frame is None: QMessageBox.warning(self, "错误", "无有效图像可用于检测", QMessageBox.Ok) is_processing = False return test_image = self.current_frame # 显示进度对话框 self.progress = QProgressDialog("正在检测布料质量...", "取消", 0, 100, self) self.progress.setWindowModality(Qt.WindowModal) self.progress.setValue(30) # 获取参数 diff_threshold = self.sliderDiffThreshold.value() / 100.0 use_ssim = self.cbUseSSIM.isChecked() # 启动图像处理线程 self.image_thread = ImageProcessingThread( current_sample_path, test_image, diff_threshold, use_ssim ) self.image_thread.processing_complete.connect(self.handle_processing_result) self.image_thread.start() # ====================== 传感器相关函数 ====================== def sensor_triggered(self): """处理传感器触发事件""" if not self.cbEnableSensor.isChecked(): logging.debug("传感器触发但未启用,忽略") return if not self.isGrabbing: logging.warning("传感器触发时相机未就绪") self.statusBar.showMessage("传感器触发但相机未就绪", 3000) return # 确保相机已连接 if not self.isOpen or self.cam is None: logging.warning("传感器触发时相机未连接") self.statusBar.showMessage("传感器触发但相机未连接", 3000) return # 设置触发模式(如果是触发模式) if self.radioTriggerMode.isChecked(): self.set_software_trigger_mode() # 在实际系统中,这里会确保布料移动到正确位置 logging.info("传感器触发 - 开始检测") self.statusBar.showMessage("传感器触发 - 开始检测", 3000) # 执行检测 self.check_print() def manual_sensor_trigger(self): """手动触发传感器""" virtual_sensor.trigger() self.statusBar.showMessage("手动触发传感器", 3000) def set_sensor_delay(self): """设置传感器触发延迟""" try: delay = float(self.edtSensorDelay.text()) virtual_sensor.set_delay(delay) self.save_settings() self.statusBar.showMessage(f"传感器延迟已设置为 {delay} 秒", 3000) except ValueError: QMessageBox.warning(self, "输入错误", "请输入有效的数字(0.1-5.0)", QMessageBox.Ok) def set_sensor_type(self, sensor_type): """设置传感器类型""" virtual_sensor.set_type(sensor_type) self.save_settings() self.statusBar.showMessage(f"传感器类型已设置为 {sensor_type}", 3000) def enable_sensor_mock(self, state): """启用/禁用传感器模拟""" virtual_sensor.enable_mock(state == Qt.Checked) self.bnStartMock.setEnabled(state == Qt.Checked) self.bnStopMock.setEnabled(state == Qt.Checked) self.spinMockInterval.setEnabled(state == Qt.Checked) def start_mock_sensor(self): """启动模拟传感器""" interval = self.spinMockInterval.value() self.sensor_thread.start_mock(interval) self.statusBar.showMessage(f"传感器模拟已启动,间隔 {interval}ms", 3000) def stop_mock_sensor(self): """停止模拟传感器""" self.sensor_thread.stop_mock() self.statusBar.showMessage("传感器模拟已停止", 3000) # ====================== 辅助方法 ====================== def load_settings(self): """加载应用程序设置""" self.settings = QSettings("ClothInspection", "CameraApp") # 加载样本路径 sample_path = self.settings.value("current_sample_path", "") if sample_path: global current_sample_path current_sample_path = sample_path self.update_sample_display() # 加载检测参数 diff_threshold = self.settings.value("diff_threshold", 5, type=int) self.sliderDiffThreshold.setValue(diff_threshold) self.update_diff_threshold(diff_threshold) # 加载传感器设置 sensor_delay = self.settings.value("sensor_delay", 0.5, type=float) self.edtSensorDelay.setText(str(sensor_delay)) virtual_sensor.set_delay(sensor_delay) sensor_type = self.settings.value("sensor_type", "光电传感器") self.comboSensorType.setCurrentText(sensor_type) virtual_sensor.set_type(sensor_type) def save_settings(self): """保存应用程序设置""" # 保存样本路径 self.settings.setValue("current_sample_path", current_sample_path) # 保存检测参数 self.settings.setValue("diff_threshold", self.sliderDiffThreshold.value()) # 保存传感器设置 self.settings.setValue("sensor_delay", float(self.edtSensorDelay.text())) self.settings.setValue("sensor_type", self.comboSensorType.currentText()) def update_sample_display(self): """更新样本路径显示""" global current_sample_path if current_sample_path: self.lblSamplePath.setText(f"当前样本: {os.path.basename(current_sample_path)}") self.lblSamplePath.setToolTip(current_sample_path) else: self.lblSamplePath.setText("当前样本: 未设置样本") def update_history_display(self): """更新历史记录显示""" global detection_history self.cbHistory.clear() for i, result in enumerate(detection_history[-10:]): # 显示最近10条记录 timestamp = result['timestamp'].strftime("%H:%M:%S") status = "合格" if result['qualified'] else "不合格" ratio = f"{result['diff_ratio']*100:.2f}%" self.cbHistory.addItem(f"[{timestamp}] {status} - 差异: {ratio}") def handle_processing_result(self, is_qualified, diff_ratio, marked_image): """处理检测结果""" global is_processing, detection_history self.progress.setValue(100) self.progress.close() if is_qualified is None: QMessageBox.critical(self, "检测错误", "检测过程中发生错误", QMessageBox.Ok) is_processing = False return # 更新UI显示 self.update_diff_display(diff_ratio, is_qualified) # 显示结果 result_text = f"布料印花 {'合格' if is_qualified else '不合格'}\n差异度: {diff_ratio*100:.2f}%\n阈值: {self.sliderDiffThreshold.value()}%" QMessageBox.information(self, "检测结果", result_text, QMessageBox.Ok) # 显示标记图像 if marked_image is not None: # 转换为QPixmap并显示 if len(marked_image.shape) == 2: h, w = marked_image.shape bytes_per_line = w q_img = QImage(marked_image.data, w, h, bytes_per_line, QImage.Format_Grayscale8) else: h, w, ch = marked_image.shape bytes_per_line = ch * w q_img = QImage(marked_image.data, w, h, bytes_per_line, QImage.Format_RGB888) pixmap = QPixmap.fromImage(q_img) self.lblImageDisplay.setPixmap(pixmap.scaled( self.lblImageDisplay.width(), self.lblImageDisplay.height(), Qt.KeepAspectRatio )) self.statusBar.showMessage("已显示缺陷标记图像", 5000) # 记录检测结果 detection_result = { 'timestamp': datetime.datetime.now(), 'qualified': is_qualified, 'diff_ratio': diff_ratio, 'threshold': self.sliderDiffThreshold.value() } detection_history.append(detection_result) self.update_history_display() is_processing = False def update_diff_display(self, diff_ratio, is_qualified): """更新差异度显示""" self.lblCurrentDiff.setText(f"当前差异度: {diff_ratio*100:.2f}%") if is_qualified: self.lblDiffStatus.setText("状态: 合格") self.lblDiffStatus.setStyleSheet("color: green;") else: self.lblDiffStatus.setText("状态: 不合格") self.lblDiffStatus.setStyleSheet("color: red;") def image_callback(self, pData, pFrameInfo, pUser): """图像数据回调函数""" try: if pFrameInfo.contents.nFrameLen <= 0: return # 将原始数据转换为numpy数组 data = (ctypes.c_ubyte * pFrameInfo.contents.nFrameLen).from_address(pData) image = np.frombuffer(data, dtype=np.uint8) # 根据帧信息解码图像 if pFrameInfo.contents.enPixelType == PixelType_Gvsp_Mono8: # 单通道灰度图 image = image.reshape(pFrameInfo.contents.nHeight, pFrameInfo.contents.nWidth) elif pFrameInfo.contents.enPixelType == PixelType_Gvsp_RGB8_Packed: # RGB24 image = image.reshape(pFrameInfo.contents.nHeight, pFrameInfo.contents.nWidth, 3) image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) else: # 其他格式需要转换 image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED) # 显示图像 self.display_real_image(image) # 保存当前帧用于检测 self.current_frame = image.copy() # 更新帧状态 self.lblFrameStatus.setText(f"帧状态: {pFrameInfo.contents.nWidth}x{pFrameInfo.contents.nHeight}") except Exception as e: logging.exception(f"图像回调错误: {str(e)}") def display_real_image(self, image): """显示真实相机图像""" if len(image.shape) == 2: # 灰度图 h, w = image.shape bytes_per_line = w q_img = QImage(image.data, w, h, bytes_per_line, QImage.Format_Grayscale8) elif len(image.shape) == 3: # 彩色图 h, w, ch = image.shape bytes_per_line = ch * w q_img = QImage(image.data, w, h, bytes_per_line, QImage.Format_RGB888) else: return pixmap = QPixmap.fromImage(q_img) self.lblImageDisplay.setPixmap(pixmap.scaled( self.lblImageDisplay.width(), self.lblImageDisplay.height(), Qt.KeepAspectRatio )) # ====================== 连接方法 ====================== def setup_connections(self): """连接信号和槽""" # 相机控制 self.bnEnum.clicked.connect(self.enum_devices) self.bnOpen.clicked.connect(self.open_device) self.bnClose.clicked.connect(self.close_device) self.bnStart.clicked.connect(self.start_grabbing) self.bnStop.clicked.connect(self.stop_grabbing) self.bnSaveImage.clicked.connect(self.save_image_dialog) # 参数控制 self.bnGetParam.clicked.connect(self.get_param) self.bnSetParam.clicked.connect(self.set_param) # 触发模式 self.radioContinueMode.clicked.connect(self.set_continue_mode) self.radioTriggerMode.clicked.connect(self.set_software_trigger_mode) self.bnSoftwareTrigger.clicked.connect(self.trigger_once) # 检测控制 self.bnCheckPrint.clicked.connect(self.check_print) self.bnSaveSample.clicked.connect(self.save_sample_image) self.bnPreviewSample.clicked.connect(self.preview_sample) self.sliderDiffThreshold.valueChanged.connect(self.update_diff_threshold) # 传感器控制 self.bnSetSensorDelay.clicked.connect(self.set_sensor_delay) self.bnManualTrigger.clicked.connect(self.manual_sensor_trigger) self.comboSensorType.currentTextChanged.connect(self.set_sensor_type) self.cbMockSensor.stateChanged.connect(self.enable_sensor_mock) self.bnStartMock.clicked.connect(self.start_mock_sensor) self.bnStopMock.clicked.connect(self.stop_mock_sensor) # 连接传感器信号 self.sensor_thread.sensor_triggered.connect(self.sensor_triggered) def closeEvent(self, event): """关闭应用程序时执行清理""" self.save_settings() # 停止传感器线程 if self.sensor_thread.isRunning(): self.sensor_thread.stop_mock() self.sensor_thread.quit() self.sensor_thread.wait(2000) # 关闭相机 if self.cam: self.close_device() event.accept() # ====================== 主程序入口 ====================== if __name__ == "__main__": # 首先检查网络配置 if not check_network_configuration(): # 创建临时QApplication用于显示错误消息 app_temp = QApplication(sys.argv) error_msg = "网络配置检查失败,无法检测到海康相机。请检查:\n\n" error_msg += "1. 相机是否已正确连接并上电\n" error_msg += "2. 计算机和相机是否在同一子网\n" error_msg += "3. 防火墙是否阻止了相机通信\n" error_msg += "4. 网线连接是否正常\n\n" # 添加发现的相机信息(如果有) if discovered_cameras: error_msg += "发现的相机:\n" for cam in discovered_cameras: error_msg += f"- {cam['model']} (IP: {cam['ip']}, SN: {cam['serial']})\n" QMessageBox.critical(None, "网络错误", error_msg, QMessageBox.Ok) sys.exit(1) # 如果网络检查通过,继续运行主应用 app = QApplication(sys.argv) # 设置应用程序样式 app.setStyle("Fusion") # 创建主窗口 main_window = MainWindow() # 启动传感器线程 main_window.sensor_thread.start() # 显示主窗口 main_window.show() # 执行应用程序 sys.exit(app.exec_()) 这个程序运行后图像显示区域被挤压没有了我希望能美化ui布局把图像显示占据界面左下角的区域其他控件都放在界面上方和右侧
07-10
import os import cv2 import numpy as np import psutil import time import argparse import json from datetime import datetime import logging import signal import sys import traceback import threading import GPUtil import subprocess import gc import shutil import queue import concurrent.futures import tracemalloc import platform import requests import zipfile class VideoProcessor: def __init__(self, config): self.config = config self.canceled = False self.start_time = time.time() self.frame_counter = 0 self.progress = 0 self.status = "就绪" self.fps = 0.0 self.mem_usage = 0.0 self.cpu_percent = 0.0 self.system_mem_percent = 0.0 self.system_mem_used = 0.0 self.system_mem_available = 0.0 self.gpu_load = 0.0 self.gpu_memory_used = 0.0 self.gpu_memory_total = 0.0 self.logger = logging.getLogger("VideoProcessor") self.resources = [] # 跟踪需要释放的资源 self.monitor_active = False self.monitor_thread = None # 多线程队列 self.frame_queue = queue.Queue(maxsize=self.config.get('queue_size', 30)) self.processed_queue = queue.Queue(maxsize=self.config.get('queue_size', 30)) # CUDA流管理 self.cuda_streams = [] self.cuda_ctx = None # 检测移动环境 self.is_mobile = self.detect_mobile_environment() if self.is_mobile: self.logger.info("检测到移动环境,启用移动端优化配置") # 内存跟踪 if self.config.get('enable_memory_monitor', False): tracemalloc.start() self.logger.info("内存跟踪已启用") # 注册信号处理 signal.signal(signal.SIGINT, self.signal_handler) signal.signal(signal.SIGTERM, self.signal_handler) def detect_mobile_environment(self): """检测是否在移动环境中运行""" try: system = platform.system().lower() uname = os.uname() # Android检测 if 'linux' in system and 'android' in uname.version.lower(): self.logger.info("检测到Android环境") return True # iOS检测 if system == 'darwin' and 'ios' in uname.machine.lower(): self.logger.info("检测到iOS环境") return True return False except Exception as e: self.logger.warning(f"移动环境检测失败: {str(e)}") return False def signal_handler(self, signum, frame): """处理中断信号""" self.logger.warning(f"接收到中断信号: {signum}, 正在优雅地停止...") self.cancel() sys.exit(1) def start_resource_monitor(self, interval=1): """启动资源监控线程""" self.monitor_active = True self.monitor_thread = threading.Thread( target=self.monitor_resources, args=(interval,), daemon=True ) self.monitor_thread.start() self.logger.info("资源监控线程已启动") def stop_resource_monitor(self): """停止资源监控线程""" if self.monitor_thread and self.monitor_thread.is_alive(): self.monitor_active = False self.monitor_thread.join(timeout=2.0) self.logger.info("资源监控线程已停止") def monitor_resources(self, interval=1): """资源监控线程函数""" self.logger.info("资源监控开始") print("\n资源监控 | 时间戳 | CPU使用率 | 内存使用 | GPU使用率 | GPU显存") print("-" * 70) while self.monitor_active: try: # CPU监控 cpu_percent = psutil.cpu_percent(interval=None) # 内存监控 mem = psutil.virtual_memory() mem_usage = f"{mem.used / (1024**3):.1f}GB/{mem.total / (1024**3):.1f}GB" # GPU监控 gpu_info = "" try: gpus = GPUtil.getGPUs() if gpus: gpu = gpus[0] gpu_info = f"{gpu.load*100:.1f}% | {gpu.memoryUsed:.1f}MB/{gpu.memoryTotal:.0f}MB" # 更新GPU状态 self.gpu_load = gpu.load * 100 self.gpu_memory_used = gpu.memoryUsed self.gpu_memory_total = gpu.memoryTotal else: gpu_info = "No GPU" except Exception as e: gpu_info = f"Error: {str(e)}" timestamp = time.strftime('%H:%M:%S') print(f"{timestamp} | {cpu_percent:6.1f}% | {mem_usage:^15} | {gpu_info}") self.logger.info(f"资源监控 | {timestamp} | CPU: {cpu_percent}% | 内存: {mem_usage} | GPU: {gpu_info}") # 内存泄漏检测 if self.config.get('enable_memory_monitor', False): snapshot = tracemalloc.take_snapshot() top_stats = snapshot.statistics('lineno') self.logger.info("内存分配Top 10:") for stat in top_stats[:10]: self.logger.info(str(stat)) time.sleep(interval) except Exception as e: self.logger.error(f"资源监控出错: {str(e)}") time.sleep(5) # 出错后等待5秒再重试 def init_cuda(self): """初始化CUDA环境""" if not self.config.get('use_gpu_processing', False) or self.is_mobile: return try: device_id = self.config.get('gpu_device_index', 0) if cv2.cuda.getCudaEnabledDeviceCount() > device_id: # 设置CUDA设备 cv2.cuda.setDevice(device_id) device = cv2.cuda.DeviceInfo(device_id) self.logger.info(f"使用GPU设备: {device.name()}") # 创建CUDA流 num_streams = self.config.get('cuda_streams', 4) self.cuda_streams = [cv2.cuda_Stream() for _ in range(num_streams)] self.logger.info(f"已创建 {num_streams} 个CUDA流") # 创建CUDA上下文 self.cuda_ctx = cv2.cuda.Device(device_id).createContext() self.logger.info("CUDA上下文已创建") else: self.logger.warning("请求的GPU设备不可用,将使用CPU处理") self.config['use_gpu_processing'] = False except Exception as e: self.logger.error(f"CUDA初始化失败: {str(e)}") self.config['use_gpu_processing'] = False def open_video_with_acceleration(self, file_path): """使用硬件加速打开视频""" # 移动端使用专用API if self.is_mobile: self.logger.info("移动端: 使用Android专用API") try: # Android专用API cap = cv2.VideoCapture(file_path, cv2.CAP_ANDROID) if cap.isOpened(): self.logger.info("Android专用API打开成功") self.resources.append(cap) return cap else: self.logger.warning("Android专用API打开失败,尝试默认方式") except: self.logger.warning("Android专用API不可用,使用默认方式") # 桌面端或移动端备选方案 if self.config.get('hardware_acceleration', 'disable') == 'disable': cap = cv2.VideoCapture(file_path) self.resources.append(cap) return cap cap = cv2.VideoCapture() self.resources.append(cap) acceleration = { 'auto': cv2.VIDEO_ACCELERATION_ANY, 'any': cv2.VIDEO_ACCELERATION_ANY, 'nvidia': cv2.VIDEO_ACCELERATION_NVIDIA, 'intel': cv2.VIDEO_ACCELERATION_INTEL, 'vaapi': cv2.VIDEO_ACCELERATION_VAAPI }.get(self.config.get('hardware_acceleration', 'auto'), cv2.VIDEO_ACCELERATION_ANY) params = [ cv2.CAP_PROP_HW_ACCELERATION, acceleration, cv2.CAP_PROP_HW_DEVICE, self.config.get('gpu_device_index', 0) ] # 降低延迟的优化参数 if self.config.get('reduce_latency', True): params.extend([ cv2.CAP_PROP_BUFFERSIZE, self.config.get('buffer_size', 3), cv2.CAP_PROP_FPS, self.config.get('target_fps', 30) ]) # MJPEG压缩 if self.config.get('use_mjpeg', True): params.extend([ cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M','J','P','G') ]) # 设置解码线程数 decoding_threads = self.config.get('decoding_threads', 0) if decoding_threads > 0: params.extend([cv2.CAP_PROP_FFMPEG_THREADS, decoding_threads]) try: cap.open(file_path, apiPreference=cv2.CAP_FFMPEG, params=params) # Intel专用加速 if self.config.get('hardware_acceleration', '') == 'intel' and cap.isOpened(): cap.set(cv2.CAP_PROP_INTEL_VIDEO_SRC_HW_ACCEL, 1) except Exception as e: self.logger.error(f"硬件加速打开失败: {str(e)}, 使用默认方式") cap = cv2.VideoCapture(file_path) return cap def update_system_stats(self): """更新系统资源统计""" self.cpu_percent = psutil.cpu_percent(interval=0.1) mem = psutil.virtual_memory() self.system_mem_percent = mem.percent self.system_mem_used = mem.used / (1024 ** 3) # GB self.system_mem_available = mem.available / (1024 ** 3) # GB def print_progress(self): """美观的进度显示""" elapsed = time.time() - self.start_time eta = (100 - self.progress) * elapsed / max(1, self.progress) if self.progress > 0 else 0 # 进度条 bar_length = 30 filled_length = int(bar_length * self.progress / 100) bar = '█' * filled_length + '-' * (bar_length - filled_length) # 队列状态 queue_status = f"Q: {self.frame_queue.qsize()}/{self.processed_queue.qsize()}" progress_str = ( f"进度: |{bar}| {self.progress}% " f"| 速度: {self.fps:.1f}fps " f"| 用时: {elapsed:.1f}s " f"| 剩余: {eta:.1f}s " f"| CPU: {self.cpu_percent:.0f}% " f"| 内存: {self.mem_usage:.1f}MB " f"| GPU: {self.gpu_load:.1f}% " f"| {queue_status}" ) print(f"\r{progress_str}", end="") self.logger.info(progress_str) def capture_thread(self, cap, total_frames): """视频捕获线程 (生产者)""" frame_idx = 0 while cap.isOpened() and not self.canceled and frame_idx < total_frames: ret, frame = cap.read() if not ret: break # 放入队列,非阻塞方式防止死锁 try: self.frame_queue.put((frame_idx, frame), timeout=1.0) frame_idx += 1 except queue.Full: if self.canceled: break time.sleep(0.01) # 发送结束信号 self.frame_queue.put((None, None)) self.logger.info(f"捕获线程完成,共捕获 {frame_idx} 帧") def processing_thread(self, output_resolution): """视频处理线程 (消费者)""" output_width, output_height = output_resolution while not self.canceled: try: # 获取帧,带超时防止死锁 frame_idx, frame = self.frame_queue.get(timeout=2.0) # 结束信号 if frame_idx is None: self.processed_queue.put((None, None)) self.frame_queue.task_done() break # 处理帧 try: # 移动端使用轻量级算法 if self.is_mobile: # 移动端优化:使用Canny边缘检测替代复杂特征检测 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 100, 200) # 将边缘检测结果与原帧合并 frame[:, :, 0] = np.minimum(frame[:, :, 0] + edges, 255) frame[:, :, 1] = np.minimum(frame[:, :, 1] + edges, 255) frame[:, :, 2] = np.minimum(frame[:, :, 2] + edges, 255) # 移动端使用快速插值方法 processed_frame = cv2.resize(frame, output_resolution, interpolation=cv2.INTER_LINEAR) else: # 桌面端使用完整算法 if self.config.get('use_gpu_processing', False) and self.cuda_streams: # 选择CUDA流 (轮询) stream_idx = frame_idx % len(self.cuda_streams) stream = self.cuda_streams[stream_idx] # 上传到GPU gpu_frame = cv2.cuda_GpuMat() gpu_frame.upload(frame, stream=stream) # GPU处理 if output_resolution: gpu_frame = cv2.cuda.resize(gpu_frame, output_resolution, stream=stream) # 下载回CPU processed_frame = gpu_frame.download(stream=stream) else: # CPU处理 if output_resolution: processed_frame = cv2.resize(frame, output_resolution) else: processed_frame = frame # 放入已处理队列 self.processed_queue.put((frame_idx, processed_frame), timeout=1.0) except cv2.error as e: if 'CUDA' in str(e): self.logger.error(f"GPU处理失败: {str(e)},切换到CPU模式") self.config['use_gpu_processing'] = False # 使用CPU重试 processed_frame = cv2.resize(frame, output_resolution) if output_resolution else frame self.processed_queue.put((frame_idx, processed_frame), timeout=1.0) else: self.logger.error(f"处理帧 {frame_idx} 失败: {str(e)}") except Exception as e: self.logger.error(f"处理帧 {frame_idx} 时出错: {str(e)}") self.frame_queue.task_done() except queue.Empty: if self.canceled: break except Exception as e: self.logger.error(f"处理线程出错: {str(e)}") self.logger.info("处理线程退出") def writer_thread(self, out, total_frames): """写入线程""" frame_idx = 0 last_log_time = time.time() while not self.canceled and frame_idx < total_frames: try: # 获取已处理帧 idx, processed_frame = self.processed_queue.get(timeout=2.0) # 结束信号 if idx is None: break # 写入输出 if processed_frame is not None: out.write(processed_frame) # 更新计数 self.frame_counter += 1 frame_idx += 1 # 计算帧率 elapsed = time.time() - self.start_time self.fps = self.frame_counter / elapsed if elapsed > 0 else 0 # 更新内存使用 process = psutil.Process(os.getpid()) self.mem_usage = process.memory_info().rss / (1024 ** 2) # MB # 更新系统状态 self.update_system_stats() # 更新进度 self.progress = int(frame_idx * 100 / total_frames) # 定期打印进度 current_time = time.time() if current_time - last_log_time > 1.0 or frame_idx % 50 == 0: self.print_progress() last_log_time = current_time # 内存管理 if frame_idx % 100 == 0: gc.collect() # 检查内存使用情况 if self.system_mem_percent > 90: self.logger.warning(f"系统内存使用超过90%! (当前: {self.system_mem_percent}%)") print(f"\n警告: 系统内存使用过高 ({self.system_mem_percent}%)") self.processed_queue.task_done() except queue.Empty: if self.canceled: break except Exception as e: self.logger.error(f"写入线程出错: {str(e)}") self.logger.info(f"写入线程完成,共写入 {frame_idx} 帧") def run(self): try: self.status = "处理中..." self.logger.info("视频处理开始") self.logger.info(f"主视频: {self.config['main_video']}") self.logger.info(f"副视频: {self.config['sub_video']}") self.logger.info(f"输出文件: {self.config['output_path']}") self.start_time = time.time() # 初始化CUDA self.init_cuda() # 启动资源监控 self.start_resource_monitor() # 打开主视频 self.logger.info("正在打开主视频...") main_cap = self.open_video_with_acceleration(self.config['main_video']) if not main_cap.isOpened(): self.status = "无法打开主视频文件!" self.logger.error(f"无法打开主视频文件: {self.config['main_video']}") return False # 获取主视频信息 main_fps = main_cap.get(cv2.CAP_PROP_FPS) main_width = int(main_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) main_height = int(main_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) main_total_frames = int(main_cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.logger.info(f"主视频信息: {main_width}x{main_height}@{main_fps:.1f}fps, 总帧数: {main_total_frames}") # 打开副视频 self.logger.info("正在打开副视频...") sub_cap = self.open_video_with_acceleration(self.config['sub_video']) if not sub_cap.isOpened(): self.status = "无法打开副视频文件!" self.logger.error(f"无法打开副视频文件: {self.config['sub_video']}") return False # 获取副视频信息 sub_fps = sub_cap.get(cv2.CAP_PROP_FPS) sub_width = int(sub_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) sub_height = int(sub_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) sub_total_frames = int(sub_cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.logger.info(f"副视频信息: {sub_width}x{sub_height}@{sub_fps:.1f}fps, 总帧数: {sub_total_frames}") # 创建输出目录 output_dir = os.path.dirname(self.config['output_path']) if output_dir and not os.path.exists(output_dir): try: os.makedirs(output_dir) self.logger.info(f"已创建输出目录: {output_dir}") except Exception as e: self.status = f"无法创建输出目录: {output_dir}" self.logger.error(f"创建输出目录失败: {str(e)}") return False # 创建输出视频 output_width, output_height = self.config['output_resolution'] fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(self.config['output_path'], fourcc, main_fps, (output_width, output_height)) self.resources.append(out) if not out.isOpened(): self.status = "无法创建输出视频文件!请检查分辨率设置。" self.logger.error(f"无法创建输出视频: {self.config['output_path']}, 分辨率: {output_width}x{output_height}") return False # 计算主视频分段参数 if self.config['main_segment_type'] == '秒': segment_length_main = int(float(self.config['segment_a']) * main_fps) else: segment_length_main = int(self.config['segment_a']) b1 = int(self.config['b1']) b2 = int(self.config['b2']) replace_frame_count = b2 - b1 + 1 # 计算副视频分段参数 if self.config['sub_segment_type'] == '秒': segment_length_sub = int(float(self.config['segment_c']) * sub_fps) else: segment_length_sub = int(self.config['segment_c']) d = int(self.config['d']) # 计算主视频段数 segments_main = (main_total_frames + segment_length_main - 1) // segment_length_main # 计算副视频段数 segments_sub = (sub_total_frames + segment_length_sub - 1) // segment_length_sub # 检查段数是否匹配 if segments_main > segments_sub: if self.config['sub_option'] == '循环使用': self.logger.warning(f"副视频段数不足({segments_sub}),将循环使用以满足主视频段数({segments_main})") else: self.status = "副视频段数不足,无法完成替换!" self.logger.error(f"副视频段数不足: {segments_sub} < {segments_main}") return False # 初始化性能监控 process = psutil.Process(os.getpid()) self.logger.info("="*50) self.logger.info("开始视频处理") self.logger.info(f"主视频: {self.config['main_video']} ({main_total_frames}帧, {main_fps:.1f}fps)") self.logger.info(f"副视频: {self.config['sub_video']} ({sub_total_frames}帧, {sub_fps:.1f}fps)") self.logger.info(f"输出文件: {self.config['output_path']}") self.logger.info(f"分辨率: {output_width}x{output_height}") self.logger.info(f"主视频分段数: {segments_main}, 每段{segment_length_main}帧") self.logger.info(f"替换帧范围: {b1}-{b2} (每段替换{replace_frame_count}帧)") self.logger.info(f"副视频分段数: {segments_sub}, 每段{segment_length_sub}帧") self.logger.info(f"副视频起始帧: {d}") self.logger.info(f"使用GPU处理: {self.config.get('use_gpu_processing', False)}") self.logger.info(f"CUDA流数量: {len(self.cuda_streams)}") self.logger.info(f"移动环境: {self.is_mobile}") self.logger.info("="*50) print("\n" + "="*50) print("开始视频处理") print(f"主视频: {self.config['main_video']} ({main_total_frames}帧, {main_fps:.1f}fps)") print(f"副视频: {self.config['sub_video']} ({sub_total_frames}帧, {sub_fps:.1f}fps)") print(f"输出文件: {self.config['output_path']}") print(f"分辨率: {output_width}x{output_height}") print(f"主视频分段数: {segments_main}, 每段{segment_length_main}帧") print(f"替换帧范围: {b1}-{b2} (每段替换{replace_frame_count}帧)") print(f"副视频分段数: {segments_sub}, 每段{segment_length_sub}帧") print(f"副视频起始帧: {d}") print(f"使用GPU处理: {self.config.get('use_gpu_processing', False)}") print(f"CUDA流数量: {len(self.cuda_streams)}") print(f"移动环境: {self.is_mobile}") print("="*50 + "\n") # 启动多线程处理 with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor: # 启动捕获线程 capture_future = executor.submit( self.capture_thread, main_cap, main_total_frames ) # 启动处理线程 processing_future = executor.submit( self.processing_thread, (output_width, output_height) ) # 启动写入线程 writer_future = executor.submit( self.writer_thread, out, main_total_frames ) # 等待所有线程完成 concurrent.futures.wait( [capture_future, processing_future, writer_future], return_when=concurrent.futures.ALL_COMPLETED ) if not self.canceled: self.status = "处理完成" self.progress = 100 self.print_progress() print(f"\n\n处理完成!输出文件: {self.config['output_path']}") self.logger.info(f"处理完成! 总帧数: {self.frame_counter}, 耗时: {time.time() - self.start_time:.1f}秒") self.logger.info(f"输出文件: {self.config['output_path']}") return True return False except Exception as e: self.status = f"处理过程中发生错误: {str(e)}" error_trace = traceback.format_exc() self.logger.error(f"处理过程中发生错误: {str(e)}") self.logger.error(f"错误详情:\n{error_trace}") print(f"\n\n错误: {str(e)}") return False finally: self.stop_resource_monitor() self.release_resources() if self.config.get('enable_memory_monitor', False): tracemalloc.stop() def release_resources(self): """释放所有资源""" self.logger.info("正在释放资源...") for resource in self.resources: try: if hasattr(resource, 'release'): resource.release() elif hasattr(resource, 'close'): resource.close() except Exception as e: self.logger.warning(f"释放资源时出错: {str(e)}") # 释放CUDA资源 if self.cuda_ctx: try: self.cuda_ctx.destroy() self.logger.info("CUDA上下文已释放") except Exception as e: self.logger.warning(f"释放CUDA上下文时出错: {str(e)}") self.resources = [] self.logger.info("资源已释放") def cancel(self): """取消处理""" self.canceled = True self.status = "正在取消..." self.logger.warning("用户请求取消处理") print("\n正在取消处理...") # 清空队列 while not self.frame_queue.empty(): try: self.frame_queue.get_nowait() self.frame_queue.task_done() except queue.Empty: break while not self.processed_queue.empty(): try: self.processed_queue.get_nowait() self.processed_queue.task_done() except queue.Empty: break self.stop_resource_monitor() self.release_resources() def get_video_info(file_path): """获取视频文件信息""" cap = None try: cap = cv2.VideoCapture(file_path) if cap.isOpened(): width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) duration = frame_count / fps if fps > 0 else 0 return { "width": width, "height": height, "fps": fps, "frame_count": frame_count, "duration": duration } return None except Exception as e: print(f"获取视频信息时出错: {str(e)}") return None finally: if cap and cap.isOpened(): cap.release() def validate_config(config): """验证配置参数""" # 检查文件存在 if not os.path.exists(config['main_video']): print(f"错误: 主视频文件不存在 - {config['main_video']}") return False if not os.path.exists(config['sub_video']): print(f"错误: 副视频文件不存在 - {config['sub_video']}") return False # 检查输出目录 output_dir = os.path.dirname(config['output_path']) if output_dir and not os.path.exists(output_dir): try: os.makedirs(output_dir) print(f"已创建输出目录: {output_dir}") except: print(f"错误: 无法创建输出目录 - {output_dir}") return False # 检查参数有效性 try: # 主视频参数 segment_a = float(config['segment_a']) if segment_a <= 0: print("错误: 分段长度必须大于0!") return False b1 = int(config['b1']) b2 = int(config['b2']) if b1 < 0 or b2 < 0: print("错误: 帧索引不能为负数!") return False if b1 > b2: print("错误: 替换开始帧(b1)必须小于或等于替换结束帧(b2)!") return False # 副视频参数 segment_c = float(config['segment_c']) if segment_c <= 0: print("错误: 分段长度必须大于0!") return False d = int(config['d']) if d < 0: print("错误: 帧索引不能为负数!") return False # 分辨率 width = int(config['output_resolution'][0]) height = int(config['output_resolution'][1]) if width <= 0 or height <= 0: print("错误: 分辨率必须大于0!") return False return True except ValueError: print("错误: 请输入有效的数字参数!") return False def save_config(config, file_path): """保存配置到文件""" try: with open(file_path, 'w') as f: json.dump(config, f, indent=2) print(f"配置已保存到: {file_path}") except Exception as e: print(f"保存配置时出错: {str(e)}") def load_config(file_path): """从文件加载配置""" try: with open(file_path, 'r') as f: config = json.load(f) # 确保配置中包含所有必要字段 required_keys = [ 'main_video', 'sub_video', 'output_path', 'main_segment_type', 'segment_a', 'b1', 'b2', 'sub_segment_type', 'segment_c', 'd', 'sub_option', 'output_resolution' ] for key in required_keys: if key not in config: print(f"警告: 配置文件中缺少 '{key}' 参数") return config except FileNotFoundError: print(f"错误: 配置文件不存在 - {file_path}") except Exception as e: print(f"加载配置时出错: {str(e)}") return None def create_default_config(): """创建默认配置""" return { "main_video": "main_video.mp4", "sub_video": "sub_video.mp4", "output_path": "output/output_video.mp4", "main_segment_type": "秒", # 默认按秒分段 "segment_a": "1", # 默认1秒 "b1": "1", # 默认替换开始帧 "b2": "1", # 默认替换结束帧 "sub_segment_type": "帧", # 默认按帧分段 "segment_c": "1", # 默认1帧 "d": "1", # 默认起始帧 "sub_option": "循环使用", "output_resolution": [1280, 720], "hardware_acceleration": "auto", "gpu_device_index": 0, "reduce_latency": True, "decoding_threads": 4, "use_gpu_processing": True, "cuda_streams": 4, "queue_size": 30, "buffer_size": 3, "target_fps": 30, "use_mjpeg": True, "enable_memory_monitor": False, "mobile_optimized": True # 新增移动端优化标志 } def detect_hardware_acceleration(): """更全面的硬件加速支持检测""" print("\n=== 硬件加速支持检测 ===") print(f"OpenCV版本: {cv2.__version__}") # 检测CUDA支持 if cv2.cuda.getCudaEnabledDeviceCount() > 0: print("CUDA支持: 可用") for i in range(cv2.cuda.getCudaEnabledDeviceCount()): try: device = cv2.cuda.getDevice(i) print(f" 设备 {i}: {device.name()}, 计算能力: {device.majorVersion()}.{device.minorVersion()}") except: print(f" 设备 {i}: 信息获取失败") else: print("CUDA支持: 不可用") # 检测OpenCL支持 print(f"OpenCL支持: {'可用' if cv2.ocl.haveOpenCL() else '不可用'}") # 获取FFMPEG信息 try: result = subprocess.run(['ffmpeg', '-version'], capture_output=True, text=True) ffmpeg_version = result.stdout.split('\n')[0] print(f"FFMPEG版本: {ffmpeg_version}") except: print("FFMPEG版本: 未找到") # 检测可用加速类型 acceleration_types = { 'NVIDIA': cv2.VIDEO_ACCELERATION_NVIDIA, 'Intel': cv2.VIDEO_ACCELERATION_INTEL, 'VAAPI': cv2.VIDEO_ACCELERATION_VAAPI, 'ANY': cv2.VIDEO_ACCELERATION_ANY } print("\n支持的硬件加速类型:") available_accelerations = [] for name, accel_type in acceleration_types.items(): cap = cv2.VideoCapture() try: params = [cv2.CAP_PROP_HW_ACCELERATION, accel_type] test_result = cap.open("", apiPreference=cv2.CAP_FFMPEG, params=params) status = "可用" if test_result else "不可用" print(f"- {name}: {status}") if test_result: available_accelerations.append(name.lower()) except: print(f"- {name}: 检测失败") finally: if cap.isOpened(): cap.release() # 如果没有可用的硬件加速,提供备选方案 if not available_accelerations: print("\n警告: 未检测到任何硬件加速支持!") print("建议:") print("1. 使用软件解码 (设置 hardware_acceleration: 'disable')") print("2. 安装以下备选库:") print(" - NVIDIA GPU 用户: 安装 CUDA Toolkit 和 cuDNN") print(" - Intel GPU 用户: 安装 Intel Media SDK") print(" - AMD/其他 GPU 用户: 安装 VAAPI") print("3. 重新编译OpenCV以支持硬件加速") print("4. 使用支持硬件加速的FFmpeg版本") else: print("\n检测到以下可用的硬件加速类型:") print(", ".join(available_accelerations)) print("在配置文件中设置 'hardware_acceleration' 参数使用") def preview_frame(config, frame_index, is_main=True): """预览指定视频的指定帧""" video_path = config['main_video'] if is_main else config['sub_video'] cap = cv2.VideoCapture(video_path) if not cap.isOpened(): print(f"无法打开视频文件: {video_path}") return total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) if frame_index >= total_frames: print(f"帧索引超出范围 (最大: {total_frames-1})") cap.release() return cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index) ret, frame = cap.read() if ret: # 创建预览窗口 window_name = f"预览: {'主视频' if is_main else '副视频'} - 帧 {frame_index}" cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) # 调整窗口大小 height, width = frame.shape[:2] max_height = 800 if height > max_height: scale = max_height / height frame = cv2.resize(frame, (int(width * scale), max_height)) cv2.imshow(window_name, frame) cv2.waitKey(0) cv2.destroyAllWindows() else: print(f"无法读取帧 {frame_index}") cap.release() def batch_process(config_file, output_dir): """批量处理多个配置""" try: with open(config_file) as f: batch_configs = json.load(f) except Exception as e: print(f"加载批量配置文件失败: {str(e)}") return total_tasks = len(batch_configs) print(f"\n开始批量处理 {total_tasks} 个任务") for i, config in enumerate(batch_configs): print(f"\n处理任务 {i+1}/{total_tasks}") print(f"主视频: {config['main_video']}") print(f"副视频: {config['sub_video']}") # 添加时间戳到输出文件名 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") base_name = os.path.basename(config['output_path']) config['output_path'] = os.path.join( output_dir, f"{timestamp}_{base_name}" ) # 验证配置 if not validate_config(config): print(f"任务 {i+1} 配置验证失败,跳过") continue # 创建处理器 processor = VideoProcessor(config) success = processor.run() if success: print(f"任务 {i+1} 完成: {config['output_path']}") else: print(f"任务 {i+1} 失败") # 任务间延迟,让系统冷却 if i < total_tasks - 1: print("\n等待5秒,准备下一个任务...") time.sleep(5) def setup_logging(): """配置日志系统""" log_dir = "logs" if not os.path.exists(log_dir): os.makedirs(log_dir) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") log_file = os.path.join(log_dir, f"video_processor_{timestamp}.log") logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler(log_file), logging.StreamHandler() ] ) logger = logging.getLogger() logger.info(f"日志系统初始化完成, 日志文件: {log_file}") return logger, log_file def install_termux_dependencies(): """安装Termux所需的依赖""" print("正在安装Termux依赖...") commands = [ "pkg update && pkg upgrade -y", "pkg install python libjpeg-turbo libvulkan vulkan-loader-android ffmpeg -y", "pkg install vulkan-tools vulkan-validation-layers -y", "pkg install ocl-icd opencl-headers -y" ] for cmd in commands: print(f"执行: {cmd}") result = subprocess.run(cmd, shell=True) if result.returncode != 0: print(f"命令执行失败: {cmd}") return False print("Termux依赖安装完成") return True def verify_gpu_support(): """验证GPU支持情况""" print("\n验证GPU支持:") # 验证MediaCodec支持 print("\n1. MediaCodec支持:") result = subprocess.run(["ffmpeg", "-hwaccels"], capture_output=True, text=True) if "mediacodec" in result.stdout: print(" ✓ 支持MediaCodec硬件加速") else: print(" ✗ 不支持MediaCodec硬件加速") # 验证Vulkan支持 print("\n2. Vulkan支持:") try: result = subprocess.run(["vulkaninfo"], capture_output=True, text=True) if "deviceName" in result.stdout: print(" ✓ 支持Vulkan API") else: print(" ✗ 不支持Vulkan API") except FileNotFoundError: print(" ✗ vulkaninfo未安装,无法验证Vulkan支持") # 验证OpenCL支持 print("\n3. OpenCL支持:") try: result = subprocess.run(["clinfo"], capture_output=True, text=True) if "Platform Name" in result.stdout: print(" ✓ 支持OpenCL") else: print(" ✗ 不支持OpenCL") except FileNotFoundError: print(" ✗ clinfo未安装,无法验证OpenCL支持") print("\n验证完成") def setup_termux_gpu_acceleration(): """设置Termux GPU加速环境""" print("="*50) print("Termux GPU加速视频处理设置") print("="*50) # 安装基础依赖 if not install_termux_dependencies(): print("依赖安装失败,无法继续设置") return # 验证GPU支持 verify_gpu_support() # 下载并编译CLBlast print("\n编译安装CLBlast...") commands = [ "pkg install git cmake make -y", "git clone https://github.com/CNugteren/CLBlast", "cd CLBlast && mkdir build && cd build", "cmake .. -DCMAKE_INSTALL_PREFIX=$PREFIX", "make install" ] for cmd in commands: print(f"执行: {cmd}") result = subprocess.run(cmd, shell=True) if result.returncode != 0: print(f"命令执行失败: {cmd}") return print("\nGPU加速环境设置完成!") print("现在可以使用以下命令进行硬件加速视频处理:") print("ffmpeg -hwaccel mediacodec -i input.mp4 -c:v h264_mediacodec output.mp4") # 创建示例批处理脚本 with open("gpu_batch_process.sh", "w") as f: f.write("""#!/bin/bash # GPU加速批处理脚本 for f in *.mp4; do echo "处理: $f" ffmpeg -hwaccel mediacodec -i "$f" -c:v h264_mediacodec "gpu_$f" done echo "所有视频处理完成!" """) print("\n已创建批处理脚本: gpu_batch_process.sh") print("使用命令运行: bash gpu_batch_process.sh") def main(): # 设置日志 logger, log_file = setup_logging() # 创建参数解析器 parser = argparse.ArgumentParser(description="专业视频帧替换工具", formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("--config", help="配置文件路径", default="") parser.add_argument("--save-config", help="保存默认配置到文件", action="store_true") parser.add_argument("--background", help="后台运行模式", action="store_true") parser.add_argument("--batch", help="批量处理模式,指定批量配置文件", default="") parser.add_argument("--preview-main", type=int, help="预览主视频指定帧", default=-1) parser.add_argument("--preview-sub", type=int, help="预览副视频指定帧", default=-1) parser.add_argument("--output-dir", help="批量处理输出目录", default="batch_output") parser.add_argument("--enable-gpu", help="启用GPU加速处理", action="store_true") parser.add_argument("--enable-mem-monitor", help="启用内存监控", action="store_true") parser.add_argument("--setup-termux", help="设置Termux GPU加速环境", action="store_true") args = parser.parse_args() # Termux GPU加速设置 if args.setup_termux: setup_termux_gpu_acceleration() return # 保存默认配置 if args.save_config: config_file = args.config if args.config else "video_config.json" default_config = create_default_config() save_config(default_config, config_file) print(f"默认配置已保存到: {config_file}") return # 批量处理模式 if args.batch: if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) batch_process(args.batch, args.output_dir) return # 加载配置 config = None if args.config: config = load_config(args.config) # 如果没有提供配置或加载失败,使用默认配置 if not config: print("使用默认配置") config = create_default_config() # 命令行参数覆盖配置 if args.enable_gpu: config['use_gpu_processing'] = True if args.enable_mem_monitor: config['enable_memory_monitor'] = True # 预览功能 if args.preview_main >= 0: preview_frame(config, args.preview_main, is_main=True) return if args.preview_sub >= 0: preview_frame(config, args.preview_sub, is_main=False) return # 后台模式处理 if args.background: print("后台模式运行中...") logger.info("后台模式启动") # 重定向标准输出到日志 sys.stdout = open(log_file, 'a') sys.stderr = sys.stdout # 显示硬件加速信息 detect_hardware_acceleration() # 显示配置 logger.info("\n当前配置:") logger.info(f"主视频: {config['main_video']}") logger.info(f"副视频: {config['sub_video']}") logger.info(f"输出文件: {config['output_path']}") logger.info(f"主视频分段方式: {config['main_segment_type']}, 长度: {config['segment_a']}") logger.info(f"替换帧范围: b1={config['b1']}, b2={config['b2']}") logger.info(f"副视频分段方式: {config['sub_segment_type']}, 长度: {config['segment_c']}") logger.info(f"副视频起始帧: d={config['d']}") logger.info(f"副视频不足时: {config['sub_option']}") logger.info(f"输出分辨率: {config['output_resolution'][0]}x{config['output_resolution'][1]}") logger.info(f"硬件加速: {config.get('hardware_acceleration', 'auto')}") logger.info(f"解码线程数: {config.get('decoding_threads', 0)}") logger.info(f"使用GPU处理: {config.get('use_gpu_processing', False)}") logger.info(f"CUDA流数量: {config.get('cuda_streams', 0)}") logger.info(f"队列大小: {config.get('queue_size', 30)}") logger.info(f"启用内存监控: {config.get('enable_memory_monitor', False)}") logger.info(f"移动端优化: {config.get('mobile_optimized', True)}") print("\n当前配置:") print(f"主视频: {config['main_video']}") print(f"副视频: {config['sub_video']}") print(f"输出文件: {config['output_path']}") print(f"主视频分段方式: {config['main_segment_type']}, 长度: {config['segment_a']}") print(f"替换帧范围: b1={config['b1']}, b2={config['b2']}") print(f"副视频分段方式: {config['sub_segment_type']}, 长度: {config['segment_c']}") print(f"副视频起始帧: d={config['d']}") print(f"副视频不足时: {config['sub_option']}") print(f"输出分辨率: {config['output_resolution'][0]}x{config['output_resolution'][1]}") print(f"硬件加速: {config.get('hardware_acceleration', 'auto')}") print(f"解码线程数: {config.get('decoding_threads', 0)}") print(f"使用GPU处理: {config.get('use_gpu_processing', False)}") print(f"CUDA流数量: {config.get('cuda_streams', 0)}") print(f"队列大小: {config.get('queue_size', 30)}") print(f"启用内存监控: {config.get('enable_memory_monitor', False)}") print(f"移动端优化: {config.get('mobile_optimized', True)}\n") # 验证配置 if not validate_config(config): logger.error("配置验证失败") return # 显示视频信息 main_info = get_video_info(config['main_video']) if main_info: logger.info("主视频信息:") logger.info(f" 尺寸: {main_info['width']}x{main_info['height']}") logger.info(f" 帧率: {main_info['fps']:.1f} fps") logger.info(f" 总帧数: {main_info['frame_count']}") logger.info(f" 时长: {main_info['duration']:.1f}秒") print("主视频信息:") print(f" 尺寸: {main_info['width']}x{main_info['height']}") print(f" 帧率: {main_info['fps']:.1f} fps") print(f" 总帧数: {main_info['frame_count']}") print(f" 时长: {main_info['duration']:.1f}秒") sub_info = get_video_info(config['sub_video']) if sub_info: logger.info("\n副视频信息:") logger.info(f" 尺寸: {sub_info['width']}x{sub_info['height']}") logger.info(f" 帧率: {sub_info['fps']:.1f} fps") logger.info(f" 总帧数: {sub_info['frame_count']}") logger.info(f" 时长: {sub_info['duration']:.1f}秒") print("\n副视频信息:") print(f" 尺寸: {sub_info['width']}x{sub_info['height']}") print(f" 帧率: {sub_info['fps']:.1f} fps") print(f" 总帧数: {sub_info['frame_count']}") print(f" 时长: {sub_info['duration']:.1f}秒") # 确认开始处理 if not args.background: print("\n按 Enter 开始处理,或输入 'c' 取消...") user_input = input().strip().lower() if user_input == 'c': logger.info("用户取消处理") print("处理已取消") return # 创建并运行处理器 logger.info("开始视频处理") processor = VideoProcessor(config) processor.run() # 保存配置 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") config_file = f"video_config_{timestamp}.json" save_config(config, config_file) logger.info(f"配置已保存: {config_file}") if __name__ == "__main__": main() 分析此代码所需依赖
06-30
import os import random import tkinter as tk from tkinter import filedialog, messagebox, ttk import shutil import tempfile import hashlib import time import pefile import zlib import sys import platform import psutil from Crypto.Cipher import AES # 仅保留但不用于代码段加密 from Crypto.Util.Padding import pad, unpad # 仅保留但不用于代码段加密 class ExeProtectorApp: def __init__(self, root): self.root = root self.root.title("EXE文件保护工具 v4.2") self.root.geometry("750x680") self.root.resizable(True, True) # 设置中文字体 self.style = ttk.Style() self.style.configure("TLabel", font=("SimHei", 10)) self.style.configure("TButton", font=("SimHei", 10)) self.style.configure("TProgressbar", thickness=20) # 创建主框架 self.main_frame = ttk.Frame(root, padding="20") self.main_frame.pack(fill=tk.BOTH, expand=True) # 文件选择部分 ttk.Label(self.main_frame, text="选择EXE文件:").grid(row=0, column=0, sticky=tk.W, pady=5) self.file_path_var = tk.StringVar() ttk.Entry(self.main_frame, textvariable=self.file_path_var, width=50).grid(row=0, column=1, padx=5, pady=5) ttk.Button(self.main_frame, text="浏览...", command=self.browse_file).grid(row=0, column=2, padx=5, pady=5) # 输出目录选择 ttk.Label(self.main_frame, text="输出目录:").grid(row=1, column=0, sticky=tk.W, pady=5) self.output_dir_var = tk.StringVar() ttk.Entry(self.main_frame, textvariable=self.output_dir_var, width=50).grid(row=1, column=1, padx=5, pady=5) ttk.Button(self.main_frame, text="浏览...", command=self.browse_output_dir).grid(row=1, column=2, padx=5, pady=5) # 选项设置 options_frame = ttk.LabelFrame(self.main_frame, text="选项", padding="10") options_frame.grid(row=2, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=10) # 随机字节增加量 ttk.Label(options_frame, text="随机字节增加范围 (KB):").grid(row=0, column=0, sticky=tk.W, pady=5) self.min_size_var = tk.IntVar(value=100) ttk.Entry(options_frame, textvariable=self.min_size_var, width=10).grid(row=0, column=1, padx=5, pady=5) ttk.Label(options_frame, text="至").grid(row=0, column=2, padx=5, pady=5) self.max_size_var = tk.IntVar(value=1000) ttk.Entry(options_frame, textvariable=self.max_size_var, width=10).grid(row=0, column=3, padx=5, pady=5) # 随机性强度 ttk.Label(options_frame, text="随机性强度:").grid(row=0, column=4, sticky=tk.W, pady=5) self.random_strength = tk.StringVar(value="medium") strength_options = ttk.Combobox(options_frame, textvariable=self.random_strength, state="readonly", width=12) strength_options['values'] = ("低", "中", "高") strength_options.grid(row=0, column=5, padx=5, pady=5) # 程序类型模拟 ttk.Label(options_frame, text="模拟程序类型:").grid(row=1, column=0, sticky=tk.W, pady=5) self.app_type = tk.StringVar(value="generic") app_types = ttk.Combobox(options_frame, textvariable=self.app_type, state="readonly", width=15) app_types['values'] = ("通用程序", "游戏程序", "办公软件", "系统工具", "开发工具") app_types.grid(row=1, column=1, padx=5, pady=5) # 处理方法 self.process_method = tk.StringVar(value="safe") ttk.Radiobutton(options_frame, text="安全模式", variable=self.process_method, value="safe").grid(row=1, column=2, sticky=tk.W, pady=5) ttk.Radiobutton(options_frame, text="增强模式", variable=self.process_method, value="enhanced").grid(row=1, column=3, sticky=tk.W, pady=5) ttk.Radiobutton(options_frame, text="标准保护", variable=self.process_method, value="standard").grid(row=1, column=4, sticky=tk.W, pady=5) ttk.Radiobutton(options_frame, text="高级保护", variable=self.process_method, value="advanced").grid(row=1, column=5, sticky=tk.W, pady=5) # 高级选项 advanced_frame = ttk.LabelFrame(self.main_frame, text="保护选项", padding="10") advanced_frame.grid(row=3, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=10) self.obfuscate_resources = tk.BooleanVar(value=True) ttk.Checkbutton(advanced_frame, text="混淆资源文件", variable=self.obfuscate_resources).grid(row=0, column=0, sticky=tk.W, pady=5) self.encrypt_sections = tk.BooleanVar(value=True) ttk.Checkbutton(advanced_frame, text="轻度代码变换", variable=self.encrypt_sections).grid(row=0, column=1, sticky=tk.W, pady=5) self.add_dummy_sections = tk.BooleanVar(value=True) ttk.Checkbutton(advanced_frame, text="添加随机数据块", variable=self.add_dummy_sections).grid(row=1, column=0, sticky=tk.W, pady=5) self.randomize_imports = tk.BooleanVar(value=True) ttk.Checkbutton(advanced_frame, text="随机化导入表顺序", variable=self.randomize_imports).grid(row=1, column=1, sticky=tk.W, pady=5) # 终极选项 ultra_frame = ttk.LabelFrame(self.main_frame, text="高级优化", padding="10") ultra_frame.grid(row=4, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=10) self.anti_vm = tk.BooleanVar(value=False) ttk.Checkbutton(ultra_frame, text="兼容虚拟机环境", variable=self.anti_vm).grid(row=0, column=0, sticky=tk.W, pady=5) self.anti_debug = tk.BooleanVar(value=False) ttk.Checkbutton(ultra_frame, text="调试模式兼容", variable=self.anti_debug).grid(row=0, column=1, sticky=tk.W, pady=5) self.random_pe_layout = tk.BooleanVar(value=True) ttk.Checkbutton(ultra_frame, text="随机PE结构布局", variable=self.random_pe_layout).grid(row=1, column=0, sticky=tk.W, pady=5) self.variable_section_count = tk.BooleanVar(value=True) ttk.Checkbutton(ultra_frame, text="随机区段数量", variable=self.variable_section_count).grid(row=1, column=1, sticky=tk.W, pady=5) # 处理按钮 ttk.Button(self.main_frame, text="保护文件", command=self.process_file).grid(row=5, column=0, columnspan=3, pady=20) # 状态和进度条 self.status_var = tk.StringVar(value="就绪") ttk.Label(self.main_frame, textvariable=self.status_var).grid(row=6, column=0, columnspan=2, sticky=tk.W, pady=5) self.progress_var = tk.DoubleVar(value=0) self.progress_bar = ttk.Progressbar(self.main_frame, variable=self.progress_var, length=100) self.progress_bar.grid(row=6, column=2, sticky=(tk.W, tk.E), pady=5) # 默认输出目录 self.output_dir_var.set(os.path.join(os.getcwd(), "protected_exes")) # 绑定窗口关闭事件 self.root.protocol("WM_DELETE_WINDOW", self.on_closing) # 初始化随机种子 self.initialize_random_seed() # 初始化随机种子,使用多种来源确保高随机性 def initialize_random_seed(self): # 使用多种系统信息和随机源作为种子材料,增强随机性 seed_material = ( time.time_ns().to_bytes(8, 'big') + os.getpid().to_bytes(4, 'big') + os.urandom(32) + # 增加随机字节数量 str(psutil.virtual_memory().available).encode() + str(psutil.cpu_percent(interval=0.1)).encode() + platform.node().encode() + str(random.getstate()).encode() ) # 使用SHA-512获取更复杂的哈希值作为种子 seed = int.from_bytes(hashlib.sha512(seed_material).digest(), 'big') random.seed(seed) # 额外增加随机状态初始化 random.getstate() # 浏览文件 def browse_file(self): file_path = filedialog.askopenfilename( filetypes=[("可执行文件", "*.exe"), ("所有文件", "*.*")] ) if file_path: self.file_path_var.set(file_path) # 浏览输出目录 def browse_output_dir(self): dir_path = filedialog.askdirectory() if dir_path: self.output_dir_var.set(dir_path) # 处理文件 def process_file(self): exe_path = self.file_path_var.get() output_dir = self.output_dir_var.get() if not exe_path: messagebox.showerror("错误", "请选择一个EXE文件") return if not os.path.exists(exe_path): messagebox.showerror("错误", "选择的文件不存在") return if not output_dir: messagebox.showerror("错误", "请选择输出目录") return if not os.path.exists(output_dir): try: os.makedirs(output_dir) except: messagebox.showerror("错误", "无法创建输出目录") return # 获取文件名和扩展名 file_name, file_ext = os.path.splitext(os.path.basename(exe_path)) # 添加随机字符串到输出文件名,确保每次不同 random_suffix = hashlib.sha256(str(time.time_ns()).encode() + os.urandom(16)).hexdigest()[:12] output_path = os.path.join(output_dir, f"{file_name}_protected_{random_suffix}{file_ext}") try: # 更新状态 self.status_var.set("正在处理文件...") self.progress_var.set(0) self.root.update() # 计算随机增加的字节大小 min_size = self.min_size_var.get() max_size = self.max_size_var.get() if min_size < 0 or max_size < 0 or min_size > max_size: messagebox.showerror("错误", "请设置有效的字节增加范围") return # 根据随机性强度调整随机范围 strength_factor = 1.0 if self.random_strength.get() == "高": strength_factor = 1.5 elif self.random_strength.get() == "低": strength_factor = 0.5 adjusted_min = int(min_size * strength_factor) adjusted_max = int(max_size * strength_factor) random_size_kb = random.randint(adjusted_min, adjusted_max) random_size_bytes = random_size_kb * 1024 # 复制原始文件 shutil.copy2(exe_path, output_path) # 计算原始文件哈希值 original_hash = self.calculate_file_hash(exe_path) # 更新进度 self.progress_var.set(5) self.root.update() # 根据选择的模式处理文件 if self.process_method.get() == "safe": self.safe_modify_exe_file(output_path, random_size_bytes) elif self.process_method.get() == "enhanced": self.enhanced_modify_exe_file(output_path, random_size_bytes) elif self.process_method.get() == "standard": self.standard_protection(output_path, random_size_bytes) else: self.advanced_protection(output_path, random_size_bytes) # 后续哈希计算、进度更新等 modified_hash = self.calculate_file_hash(output_path) self.progress_var.set(95) self.root.update() if self.verify_exe_file(output_path): self.status_var.set("文件处理完成") self.progress_var.set(100) messagebox.showinfo( "成功", f"文件保护成功!\n" f"原始文件大小: {os.path.getsize(exe_path) // 1024} KB\n" f"处理后文件大小: {os.path.getsize(output_path) // 1024} KB\n" f"增加了: {random_size_kb} KB\n\n" f"原始文件哈希 (MD5): {original_hash}\n" f"处理后文件哈希 (MD5): {modified_hash}\n\n" f"文件已保存至: {output_path}" ) else: self.status_var.set("文件验证失败") self.progress_var.set(100) messagebox.showwarning("警告", "处理后的文件可能需要在特定环境运行") except Exception as e: self.status_var.set("处理过程中出错") messagebox.showerror("错误", f"处理文件时出错: {str(e)}") finally: self.progress_var.set(0) # 每次处理后重新初始化随机种子,确保下一次处理的随机性不同 self.initialize_random_seed() # 计算文件哈希 def calculate_file_hash(self, file_path): hash_md5 = hashlib.md5() with open(file_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() # 安全模式:仅添加正常数据 def safe_modify_exe_file(self, file_path, additional_bytes): with open(file_path, 'ab') as f: # 根据选择的应用类型生成对应的数据 app_type = self.app_type.get() data = self.generate_application_specific_data(additional_bytes, app_type) f.write(data) # 增强模式:优化PE结构 def enhanced_modify_exe_file(self, file_path, additional_bytes): try: pe = pefile.PE(file_path) # 更新时间戳,使用更大的随机偏移 pe.FILE_HEADER.TimeDateStamp = int(time.time()) + random.randint(-86400, 86400) # 随机偏移1天内 # 随机化更多非关键的PE头字段 if self.random_pe_layout.get(): pe.FILE_HEADER.PointerToSymbolTable = random.getrandbits(32) pe.FILE_HEADER.NumberOfSymbols = random.randint(0, 2000) # 添加更多随机化字段 pe.OPTIONAL_HEADER.MajorLinkerVersion = random.randint(1, 25) pe.OPTIONAL_HEADER.MinorLinkerVersion = random.randint(0, 99) pe.OPTIONAL_HEADER.MajorImageVersion = random.randint(1, 20) pe.OPTIONAL_HEADER.MinorImageVersion = random.randint(0, 99) # 添加正常附加数据 self.safe_modify_exe_file(file_path, additional_bytes) pe.write(file_path) pe.close() except Exception as e: print(f"增强模式执行: {e}") self.safe_modify_exe_file(file_path, additional_bytes) # 标准保护:添加合理区段 def standard_protection(self, file_path, additional_bytes): try: pe = pefile.PE(file_path) # 随机决定添加的区段数量(1-4个),增加变化性 section_count = 1 if self.variable_section_count.get(): section_count = random.randint(1, 4) # 添加多个随机区段 for _ in range(section_count): # 创建新区段 new_section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__) # 生成随机但合理的区段名 new_section.Name = self.generate_sane_section_name() # 区段大小随机(1-16KB),范围更大 section_size = random.randint(0x1000, 0x4000) new_section.Misc_VirtualSize = section_size # 地址对齐,添加更大的随机偏移 base_virtual_address = (pe.sections[-1].VirtualAddress + pe.sections[-1].Misc_VirtualSize + 0x1000 - 1) & ~0xFFF new_section.VirtualAddress = base_virtual_address + random.randint(0, 0x2000) base_raw_data = (pe.sections[-1].PointerToRawData + pe.sections[-1].SizeOfRawData + 0x1000 - 1) & ~0xFFF new_section.PointerToRawData = base_raw_data + random.randint(0, 0x2000) new_section.SizeOfRawData = section_size # 随机选择合理的区段属性,增加更多可能性 section_flags = [ 0xC0000040, 0x40000040, 0x20000040, 0x80000040, 0x00000040, 0xE0000040, 0x00000080, 0x40000080 ] new_section.Characteristics = random.choice(section_flags) # 生成与程序类型匹配的区段数据 app_type = self.app_type.get() new_data = self.generate_application_specific_data(section_size, app_type) pe.set_bytes_at_offset(new_section.PointerToRawData, new_data) # 添加新区段到PE结构 pe.sections.append(new_section) pe.FILE_HEADER.NumberOfSections += 1 pe.OPTIONAL_HEADER.SizeOfImage = (new_section.VirtualAddress + new_section.Misc_VirtualSize + 0x1000 - 1) & ~0xFFF # 轻度代码变换 if self.encrypt_sections.get(): self.apply_mild_code_transformations(pe) # 随机化导入表顺序(如果启用) if self.randomize_imports.get() and hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'): # 多次随机打乱以增加随机性 for _ in range(random.randint(1, 3)): random.shuffle(pe.DIRECTORY_ENTRY_IMPORT) # 添加文件末尾数据 self.safe_modify_exe_file(file_path, additional_bytes) # 更新时间戳,添加随机偏移 pe.FILE_HEADER.TimeDateStamp = int(time.time()) + random.randint(-86400, 86400) # 随机偏移1天内 pe.write(file_path) pe.close() except Exception as e: print(f"标准保护执行: {e}") self.enhanced_modify_exe_file(file_path, additional_bytes) # 高级保护:进一步增加随机性 def advanced_protection(self, file_path, additional_bytes): try: pe = pefile.PE(file_path) # 随机决定添加的区段数量(2-5个),增加更多变化 section_count = 2 if self.variable_section_count.get(): section_count = random.randint(2, 5) # 添加多个随机区段 for _ in range(section_count): new_section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__) new_section.Name = self.generate_sane_section_name() # 区段大小变化更大(1-32KB) section_size = random.randint(0x1000, 0x8000) new_section.Misc_VirtualSize = section_size # 地址对齐,添加更大的随机偏移 base_virtual_address = (pe.sections[-1].VirtualAddress + pe.sections[-1].Misc_VirtualSize + 0x1000 - 1) & ~0xFFF new_section.VirtualAddress = base_virtual_address + random.randint(0, 0x4000) base_raw_data = (pe.sections[-1].PointerToRawData + pe.sections[-1].SizeOfRawData + 0x1000 - 1) & ~0xFFF new_section.PointerToRawData = base_raw_data + random.randint(0, 0x4000) new_section.SizeOfRawData = section_size # 随机选择合理的区段属性,增加更多选项 section_flags = [ 0xC0000040, 0x40000040, 0x20000040, 0x80000040, 0x00000040, 0xE0000040, 0x00000080, 0x40000080, 0x80000080, 0x20000080, 0x00000100 ] new_section.Characteristics = random.choice(section_flags) # 生成特定类型的应用数据 app_type = self.app_type.get() new_data = self.generate_application_specific_data(section_size, app_type) pe.set_bytes_at_offset(new_section.PointerToRawData, new_data) pe.sections.append(new_section) pe.FILE_HEADER.NumberOfSections += 1 pe.OPTIONAL_HEADER.SizeOfImage = (new_section.VirtualAddress + new_section.Misc_VirtualSize + 0x1000 - 1) & ~0xFFF # 轻度代码变换 if self.encrypt_sections.get(): self.apply_mild_code_transformations(pe) # 混淆资源(如果启用) if self.obfuscate_resources.get() and hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'): self.obfuscate_pe_resources(pe) # 随机化导入表顺序,增加随机性 if self.randomize_imports.get() and hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'): # 多次随机打乱以确保随机性 for _ in range(random.randint(2, 5)): random.shuffle(pe.DIRECTORY_ENTRY_IMPORT) # 添加随机数据块,使用随机大小 if self.add_dummy_sections.get(): dummy_size = random.randint(additional_bytes // 3, additional_bytes * 2 // 3) self.safe_modify_exe_file(file_path, dummy_size) additional_bytes -= dummy_size # 添加文件末尾数据 self.safe_modify_exe_file(file_path, additional_bytes) # 随机化更多PE头字段 if self.random_pe_layout.get(): pe.FILE_HEADER.PointerToSymbolTable = random.getrandbits(32) pe.FILE_HEADER.NumberOfSymbols = random.randint(0, 5000) pe.OPTIONAL_HEADER.MajorImageVersion = random.randint(1, 20) pe.OPTIONAL_HEADER.MinorImageVersion = random.randint(0, 99) pe.OPTIONAL_HEADER.MajorSubsystemVersion = random.randint(4, 10) pe.OPTIONAL_HEADER.MinorSubsystemVersion = random.randint(0, 99) pe.OPTIONAL_HEADER.MajorOperatingSystemVersion = random.randint(5, 10) pe.OPTIONAL_HEADER.MinorOperatingSystemVersion = random.randint(0, 99) # 添加更多可随机化的字段 pe.OPTIONAL_HEADER.LoaderFlags = random.getrandbits(32) & 0x00000003 # 仅保留合法值 pe.OPTIONAL_HEADER.NumberOfRvaAndSizes = 16 # 标准值,但可以偶尔修改 if random.random() < 0.3: # 30%概率修改这个值 pe.OPTIONAL_HEADER.SizeOfHeaders = (pe.OPTIONAL_HEADER.SizeOfHeaders + random.randint(0x100, 0x800)) & ~0xFF # 保持对齐 # 更新时间戳,使用更大的随机偏移 pe.FILE_HEADER.TimeDateStamp = int(time.time()) + random.randint(-604800, 604800) # 随机偏移1周内 pe.write(file_path) pe.close() except Exception as e: print(f"高级保护执行: {e}") self.standard_protection(file_path, additional_bytes) # 生成模拟特定类型程序的数据,增强随机性和多样性 def generate_application_specific_data(self, size, app_type): """根据程序类型生成不同特征的数据,确保每次生成都不同""" data = bytearray() # 根据选择的应用类型生成对应的数据模板,增加更多模板项 type_templates = { "通用程序": [ b"C:\\Program Files\\Common Files\\\x00", b"HKLM\\Software\\Microsoft\\Windows\\\x00", b"ERROR_ACCESS_DENIED\x00", b"SUCCESS\x00", b"CONFIG_FILE\x00", b"LOG_FILE\x00", b"USER_SETTINGS\x00", b"APPLICATION_DATA\x00", b"SYSTEM32\x00", b"KERNEL32.DLL\x00", b"ADVAPI32.DLL\x00", (0x00000001).to_bytes(4, 'little'), (0x00000100).to_bytes(4, 'little'), (0x00010000).to_bytes(4, 'little'), (0x00100000).to_bytes(4, 'little'), ], "游戏程序": [ b"C:\\Program Files\\Game\\Data\\\x00", b"C:\\Users\\Public\\Documents\\GameSaves\\\x00", b"TEXTURE_", b"MODEL_", b"SOUND_", b"LEVEL_", b"SCORE_", b"PLAYER_", b"ENEMY_", b"WEAPON_", b"QUEST_", b"ACHIEVEMENT_", b"INVENTORY_", b"CHARACTER_", b"MAP_", b"DIFFICULTY_", (0x000F4240).to_bytes(4, 'little'), # 1000000 (0x000003E8).to_bytes(4, 'little'), # 1000 (0x00000064).to_bytes(4, 'little'), # 100 (0x0000000A).to_bytes(4, 'little'), # 10 ], "办公软件": [ b"C:\\Users\\%USERNAME%\\Documents\\\x00", b"File Format: DOCX\x00", b"File Format: XLSX\x00", b"File Format: PPTX\x00", b"Page ", b"Sheet ", b"Table ", b"Font ", b"Style ", b"Paragraph ", b"Header", b"Footer", b"Section", b"Template", b"Macro", b"Add-in", b"Spell Check", b"Grammar Check", b"Word Count", b"Character Count", (0x0000000A).to_bytes(4, 'little'), # 10 (0x00000014).to_bytes(4, 'little'), # 20 (0x00000064).to_bytes(4, 'little'), # 100 ], "系统工具": [ b"C:\\Windows\\System32\\\x00", b"C:\\Windows\\SysWOW64\\\x00", b"HKLM\\SYSTEM\\CurrentControlSet\\\x00", b"Driver ", b"Service ", b"Device ", b"Registry ", b"Process ", b"Thread ", b"Memory ", b"Disk ", b"Network ", b"Adapter ", b"Protocol ", b"Firewall ", b"Security ", b"Policy ", b"Account ", (0x00000001).to_bytes(4, 'little'), (0x00000000).to_bytes(4, 'little'), (0xFFFFFFFF).to_bytes(4, 'little'), (0x00000002).to_bytes(4, 'little'), ], "开发工具": [ b"C:\\Program Files\\Developer\\SDK\\\x00", b"C:\\Users\\%USERNAME%\\Source\\\x00", b"Compiler ", b"Linker ", b"Debugger ", b"Library ", b"Include ", b"Namespace ", b"Class ", b"Function ", b"Variable ", b"Pointer ", b"Array ", b"Struct ", b"Enum ", b"Union ", b"Template ", b"Exception ", b"Thread ", b"Mutex ", (0x00000000).to_bytes(4, 'little'), (0x00000001).to_bytes(4, 'little'), (0x00000002).to_bytes(4, 'little'), (0x00000003).to_bytes(4, 'little'), ] } # 获取对应类型的模板 templates = type_templates.get(app_type, type_templates["通用程序"]) # 根据随机性强度调整模板使用方式 template_usage = 0.7 # 70%使用模板,30%使用随机数据 if self.random_strength.get() == "高": template_usage = 0.5 # 50%使用模板,50%使用随机数据 elif self.random_strength.get() == "低": template_usage = 0.9 # 90%使用模板,10%使用随机数据 # 填充数据直到达到目标大小,使用更复杂的模式 while len(data) < size: # 随机选择使用模板还是生成随机数据 if random.random() < template_usage: # 随机选择一个模板并添加 item = random.choice(templates) data.extend(item) # 偶尔添加随机长度的空白或分隔符 if random.random() < 0.4: separator_length = random.randint(1, 16) if random.random() < 0.5: data.extend(b'\x00' * separator_length) else: data.extend(b' ' * separator_length) else: # 生成更复杂的随机数据 random_len = random.randint(1, 128) if random.random() < 0.3: # 生成随机ASCII文本 random_text = bytes(random.choice(b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.') for _ in range(random_len)) data.extend(random_text) elif random.random() < 0.6: # 生成随机二进制数据 data.extend(os.urandom(random_len)) else: # 生成随机数值数据 for _ in range(random_len // 4 + 1): num = random.getrandbits(32) data.extend(num.to_bytes(4, 'little')) return data[:size] # 生成更多样化的合理区段名 def generate_sane_section_name(self): # 扩展区段名基础列表 base_names = [ b'.data', b'.rdata', b'.text', b'.rsrc', b'.reloc', b'.bss', b'.edata', b'.idata', b'.pdata', b'.tls', b'.data1', b'.rdata2', b'.text1', b'.rsrc1', b'.data_', b'.rdata_', b'.text_', b'.rsrc_', b'.init', b'.fini', b'.ctors', b'.dtors', b'.gnu', b'.note', b'.eh_frame', b'.debug', b'.xdata', b'.pdata', b'.data2', b'.text2', b'.code', b'.const', b'.dynamic', b'.hash', b'.plt', b'.got', b'.shstrtab', b'.symtab', b'.strtab', b'.comment', b'.note.ABI-tag' ] # 随机选择基础名称并可能添加随机后缀 name = random.choice(base_names) if random.random() < 0.8: # 提高添加后缀的概率 # 添加更多样化的随机后缀 suffix_type = random.randint(0, 2) if suffix_type == 0: # 数字后缀 suffix = str(random.randint(10, 999)).encode() elif suffix_type == 1: # 字母后缀 suffix_length = random.randint(1, 3) suffix = bytes(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(suffix_length)) else: # 混合后缀 suffix = (str(random.randint(1, 9)) + random.choice('abcdef') + str(random.randint(10, 99))).encode() # 确保总长度不超过8字节 name = name[:8-len(suffix)] + suffix return name.ljust(8, b'\x00')[:8] # 确保正好8字节 # 轻度代码变换,增加更多变换类型 def apply_mild_code_transformations(self, pe): text_section = None for section in pe.sections: if b'.text' in section.Name: text_section = section break if text_section: data = pe.get_data(text_section.VirtualAddress, text_section.SizeOfRawData) if not isinstance(data, bytes): data = bytes(data) data_list = list(data) # 根据随机性强度调整变换程度 transform_count = len(data_list) // 200 if self.random_strength.get() == "高": transform_count = len(data_list) // 100 elif self.random_strength.get() == "低": transform_count = len(data_list) // 400 # 限制最大变换次数,但增加上限 transform_count = min(200, transform_count) # 随机选择位置进行更丰富的轻微变换 for _ in range(transform_count): i = random.randint(0, len(data_list) - 1) # 增加更多变换类型 transform_type = random.choice(range(8)) if transform_type == 0: # 加1 data_list[i] = (data_list[i] + 1) % 256 elif transform_type == 1: # 减1 data_list[i] = (data_list[i] - 1) % 256 elif transform_type == 2: # 与0xFF异或 data_list[i] ^= 0xFF elif transform_type == 3: # 左移一位 data_list[i] = (data_list[i] << 1) % 256 elif transform_type == 4: # 右移一位 data_list[i] = (data_list[i] >> 1) % 256 elif transform_type == 5: # 加一个小随机数 data_list[i] = (data_list[i] + random.randint(1, 5)) % 256 elif transform_type == 6: # 减一个小随机数 data_list[i] = (data_list[i] - random.randint(1, 5)) % 256 else: # 与一个随机数异或 data_list[i] ^= random.randint(1, 255) pe.set_bytes_at_offset(text_section.PointerToRawData, bytes(data_list)) # 增强资源混淆 def obfuscate_pe_resources(self, pe): try: # 遍历所有资源条目 for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries: if hasattr(resource_type, 'directory'): for resource_id in resource_type.directory.entries: if hasattr(resource_id, 'directory'): for resource_lang in resource_id.directory.entries: data_rva = resource_lang.data.struct.OffsetToData size = resource_lang.data.struct.Size # 读取资源数据 resource_data = list(pe.get_data(data_rva, size)) # 根据随机性强度调整混淆程度 step_size = 200 if self.random_strength.get() == "高": step_size = 100 elif self.random_strength.get() == "低": step_size = 400 # 增加更多变换类型 for i in range(0, len(resource_data), random.randint(step_size-50, step_size+50)): if i < len(resource_data): # 随机选择一种变换 transform_type = random.randint(0, 4) if transform_type == 0: resource_data[i] = (resource_data[i] + random.randint(1, 5)) % 256 elif transform_type == 1: resource_data[i] = (resource_data[i] - random.randint(1, 5)) % 256 elif transform_type == 2: resource_data[i] ^= random.randint(1, 255) elif transform_type == 3: resource_data[i] = (resource_data[i] << random.randint(1, 3)) % 256 else: resource_data[i] = (resource_data[i] >> random.randint(1, 3)) % 256 # 对部分资源进行块级混淆 if random.random() < 0.3 and size > 1024: block_size = random.randint(64, 256) for b in range(0, size, block_size): if random.random() < 0.2: # 20%的块会被翻转 start = b end = min(b + block_size, size) resource_data[start:end] = resource_data[start:end][::-1] # 写回修改后的资源数据 pe.set_bytes_at_offset(data_rva, bytes(resource_data)) except Exception as e: print(f"资源混淆错误: {e}") # 计算PE校验和 def calculate_pe_checksum(self, pe): try: with open(pe.name, 'rb') as f: data = f.read() checksum = 0 for i in range(0, len(data), 2): if i + 1 < len(data): w = (data[i+1] << 8) | data[i] checksum += w else: checksum += data[i] checksum = (checksum >> 16) + (checksum & 0xffff) checksum += (checksum >> 16) return 0 - checksum except: return 0x00000000 # 验证EXE文件 def verify_exe_file(self, file_path): try: pe = pefile.PE(file_path) pe.close() return True except: return False # 关闭窗口 def on_closing(self): if messagebox.askokcancel("退出", "确定要退出程序吗?"): self.root.destroy() if __name__ == "__main__": root = tk.Tk() app = ExeProtectorApp(root) root.mainloop() 用户希望每次输出的程序减小TEXT代码段熵值 相对上次输出的程序静态特征都有很大差异 绕过qvm静态分析查杀 2025-07-10 09:57:50 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:f4722f1fd2c14e65558207bd3415ed0a已删除此文件,如果您发现误删,可从隔离区恢复此文件。 2025-07-10 10:07:34 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:3f519280c806a531c2fd494f392add06已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_2587963335de.exe 2025-07-10 10:05:41 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:ad5bf7bf68759eaa5f25587d0e977320已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_4df5eb7b2b48.exe 2025-07-10 10:04:55 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:ce816bbac00149bda721dec01d623773已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_5795dc64119b.exe 2025-07-10 10:03:42 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:224798374219b4fc6046b1da7f4eb632已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_17f73c7eda70.exe 2025-07-10 10:03:16 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:cb3c1534897e38110aeeea2aa71c9d5a已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_eb5a679318ab.exe 2025-07-10 09:57:50 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:f4722f1fd2c14e65558207bd3415ed0a已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_7e5282a2592431df.exe 2025-07-10 09:56:51 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:63d542dffd607fda49b4ca8880069471已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_0269d064dca0e33d.exe 2025-07-10 09:56:23 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:c7edea82a1abefe84502a88980b86356已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_98f344f33553ad04.exe 2025-07-10 09:55:20 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:8bb531f637cdae52b17ecab88b28b88d已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_32027bfd1919.exe 2025-07-10 09:54:37 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:f03cac215d14019e60e97a0a9dff564a已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_dc0e4952f25d.exe 2025-07-10 09:54:16 恶意软件(HEUR/QVM10.2.95BA.Malware.Gen)MD5:79f41c9ca52f656e0d14d7f99a7c0361已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_d0d314c24b51.exe 2025-07-10 09:36:22 恶意软件(HEUR/QVM10.2.958D.Malware.Gen)MD5:7a8102febd74e861b0f798e130e3bbdf已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_3766493edd57.exe 2025-07-10 09:35:48 恶意软件(HEUR/QVM10.2.958D.Malware.Gen)MD5:ceaa9c2e0ae64a9242eae202c127970a已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_e6c9d5d7be4e.exe 2025-07-10 09:34:47 恶意软件(HEUR/QVM10.2.958D.Malware.Gen)MD5:7cfe0a2c5d8fbce0d86a01613bef166e已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_e3d311d2cf8e.exe 2025-07-10 09:31:31 恶意软件(HEUR/QVM10.2.958D.Malware.Gen)MD5:416cfe49e997da0d85efc5eac7cd33f3已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server.exe 2025-07-10 09:18:33 恶意软件(HEUR/QVM10.2.958D.Malware.Gen)MD5:1dae68d1345fe77c6427f8f36ef257a3已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_100a4418.exe 2025-07-10 09:17:27 恶意软件(HEUR/QVM10.2.958D.Malware.Gen)MD5:22be3cbbdaf6adc964323e6cd7b1830e已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_a423e3f7.exe 2025-07-10 09:16:34 恶意软件(HEUR/QVM10.2.958D.Malware.Gen)MD5:19811a97d4f619ccc30fbd9355710c94已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_42c8f493.exe 2025-07-10 09:15:28 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:603aa37cafcc1a97ba7d3b6944e7c44f已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_1870a4d5.exe 2025-07-10 09:15:28 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:f4f3aaa34d45f6d392ab02d469529fa2已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_6ef68774.exe 2025-07-10 09:02:21 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:d963a3b990e387d512c1505cc1defc2d已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\appdata\local\temp\vmware-odaycaogen'\vmwarednd\4e3bc625\server_protected_b162fbecda18.exe 2025-07-10 09:02:20 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:d963a3b990e387d512c1505cc1defc2d已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_b162fbecda18.exe 2025-07-10 09:01:55 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:6613a15d293baa581027037940f38123已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_0c7cdb86447c.exe 2025-07-10 09:01:13 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:6e784a5d6e4c5cd3e102341571186c02已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_91770c617ba9.exe 2025-07-10 09:00:26 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:ebaba98d0a9815b21c637a4f2cb2fea8已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_c5080514e48f.exe 2025-07-10 08:59:26 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:3e2c4fc7477c93fe7446866dfa0c0d2a已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_917a4f029497.exe 2025-07-10 08:59:00 恶意软件(HEUR/QVM10.2.9551.Malware.Gen)MD5:fce5281cf86dca7226f2dde2764ceee1已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_e3ea24fe8204.exe 2025-07-09 20:41:17 恶意软件(HEUR/QVM10.2.9279.Malware.Gen)MD5:28391ff043ea6b95c0a22c42b827fd13已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_d94e61705c0d.exe 2025-07-09 20:39:48 恶意软件(HEUR/QVM10.2.9279.Malware.Gen)MD5:0dbbcc8071f854fa75fe485c055e9cb0已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_df81323ec213.exe 2025-07-09 20:39:17 恶意软件(HEUR/QVM10.2.9279.Malware.Gen)MD5:1d3e05e3d94613d048aff46974c9eae5已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_08d18b05d328.exe 2025-07-09 20:38:43 恶意软件(HEUR/QVM10.2.9279.Malware.Gen)MD5:cd26422e884d3110bc75080d27c1c069已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_e06bbef612d8.exe 2025-07-09 20:38:08 恶意软件(HEUR/QVM10.2.9279.Malware.Gen)MD5:8b12ffcdf948eca4636292ee8399b390已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_61f1aadc6f43.exe 2025-07-09 20:37:25 恶意软件(HEUR/QVM10.2.9279.Malware.Gen)MD5:7eb6481e075f5eeb5465e6395b55db92已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\users\odaycaogen'\desktop\server_protected_789d8774794b.exe 2025-07-09 18:54:48 恶意软件(HEUR/QVM10.2.9231.Malware.Gen)MD5:5c7d6aec8656f4850e914408c8779c35已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\program files\360\360safe\softmgr\whitelist\myapp_36171265\454336_protected_9d9289dd.exe 2025-07-09 18:54:43 恶意软件(HEUR/QVM10.2.9231.Malware.Gen)MD5:5c7d6aec8656f4850e914408c8779c35已删除此文件,如果您发现误删,可从隔离区恢复此文件。 c:\program files\360\360safe\softmgr\whitelist\myapp_36170329\454336_protected_9d9289dd.exe 2025-07-09 18:48:32 恶意软件(HEUR/QVM10.2.9231.Malware.Gen)MD5:8b18925bca5cf9c236bae2ed5f39e67c已删除此文件,如果您发现误删,可从隔离区恢复此文件。
最新发布
07-11
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值