SYS_CONTEXT(c1,c2)

本文详细介绍了Oracle数据库中的SYS_CONTEXT函数,该函数用于获取当前会话的环境信息,如终端、语言设置、用户身份等。文章提供了函数的语法、功能、参数及返回值,并通过实例展示了如何查询多种环境变量。
【语法】SYS_CONTEXT(c1,c2)
【功能】返回系统c1对应的c2的值。可以使用在SQL/PLSQL中,但不可以用在并行查询或者RAC环境中

【参数】
c1,'USERENV'
c2,参数表,详见示例

【返回】字符串


【示例】
select
SYS_CONTEXT('USERENV','TERMINAL') terminal,
SYS_CONTEXT('USERENV','LANGUAGE') language,
SYS_CONTEXT('USERENV','SESSIONID') sessionid,
SYS_CONTEXT('USERENV','INSTANCE') instance,
SYS_CONTEXT('USERENV','ENTRYID') entryid,
SYS_CONTEXT('USERENV','ISDBA') isdba,
SYS_CONTEXT('USERENV','NLS_TERRITORY') nls_territory,
SYS_CONTEXT('USERENV','NLS_CURRENCY') nls_currency,
SYS_CONTEXT('USERENV','NLS_CALENDAR') nls_calendar,
SYS_CONTEXT('USERENV','NLS_DATE_FORMAT') nls_date_format,
SYS_CONTEXT('USERENV','NLS_DATE_LANGUAGE') nls_date_language,
SYS_CONTEXT('USERENV','NLS_SORT') nls_sort,
SYS_CONTEXT('USERENV','CURRENT_USER') current_user,
SYS_CONTEXT('USERENV','CURRENT_USERID') current_userid,
SYS_CONTEXT('USERENV','SESSION_USER') session_user,
SYS_CONTEXT('USERENV','SESSION_USERID') session_userid,
SYS_CONTEXT('USERENV','PROXY_USER') proxy_user,
SYS_CONTEXT('USERENV','PROXY_USERID') proxy_userid,
SYS_CONTEXT('USERENV','DB_DOMAIN') db_domain,
SYS_CONTEXT('USERENV','DB_NAME') db_name,
SYS_CONTEXT('USERENV','HOST') host,
SYS_CONTEXT('USERENV','OS_USER') os_user,
SYS_CONTEXT('USERENV','EXTERNAL_NAME') external_name,
SYS_CONTEXT('USERENV','IP_ADDRESS') ip_address,
SYS_CONTEXT('USERENV','NETWORK_PROTOCOL') network_protocol,
SYS_CONTEXT('USERENV','BG_JOB_ID') bg_job_id,
SYS_CONTEXT('USERENV','FG_JOB_ID') fg_job_id,
SYS_CONTEXT('USERENV','AUTHENTICATION_TYPE') authentication_type,
SYS_CONTEXT('USERENV','AUTHENTICATION_DATA') authentication_data
from dual
移植完之后,烧录进路由器,日志显示[ oal_sys_GetConfigAddr ] 3552: read config from addr1 00200000 addr2 002c0000 [ oal_sys_GetConfigAddr ] 3564: checking addr1 00200000 seq1:0 [ oal_sys_GetConfigAddr ] 3577: checking addr2 002c0000 seq1:1 [ oal_sys_writeCfgFlash ] 4157: Write config index:2 addr:200000 save user config to /var/run/misc/misc_rw/0x00200000... write user config to flash success. #file: src/http_inetd.c;line: 568; error = No such file or directory #msg: Create SSL context for https access successfully. MlmePeriodicExec: RxRing1 full, trigger rx1_done_func !!! cos is crashed !!! restart cos... [ cmem_testSharedBuffExist ] 396: shared buffer exist 32769 [ cos_init ] 1083: check_smd_error! error: device not found pidof mobile > /var/tmp/tmppidof echo -1000 > /proc/638/oom_score_adj pidof qmuxd > /var/tmp/tmppidof echo -1000 > /proc/580/oom_score_adj pidof httpd > /var/tmp/tmppidof echo -1000 > /proc/636/oom_score_adj pidof dhcpd > /var/tmp/tmppidof echo -1000 > /proc//oom_score_adj /bin/sh: can't create /proc//oom_score_adj: Permission denied msg_len: 17; msg: start server... MlmePeriodicExec: RxRing1 full, trigger rx1_done_func 1538352041 USER: lte_sms_status_valid invalid . 1538352041 USER: The service status is limited. 1538352041 USER: Network type is LTE. qmi_qmux: Sending sys indication=15 to qmux_client_id=0x1 on conn_id=0[ getWanLteLinkConfigObj ] 753: can't get object struct OID:LTE_ISP_PROF. will create default objects! [ getWanLteLinkConfigObj ] 753: can't get object struct OID:LTE_ISP_PROF. will create default objects! ---------------> save 638 <--------------- [ oal_sys_GetConfigAddr ] 3552: read config from addr1 00200000 addr2 002c0000 [ oal_sys_GetConfigAddr ] 3564: checking addr1 00200000 seq1:2 [ oal_sys_GetConfigAddr ] 3577: checking addr2 002c0000 seq1:1 [ oal_sys_writeCfgFlash ] 4157: Write config index:3 addr:2c0000 save bak user config to /var/run/misc/misc_rw_bak/0x002C0000... write user config to flash success. 1538352041 USER: Get ISP's profile now. 1538352041 USER: GetCurrProf result is:ipver:2 staticApn:0 authType:0 pkgName:China Unicom profname:China Unicom apn:3gnet usr: psw: 1538352041 USER: lte_sms_status_valid invalid . 1538352042 USER: Get ISP's profile now. 1538352042 USER: GetCurrProf result is:ipver:2 staticApn:0 authType:0 pkgName:China Unicom profname:China Unicom apn:3gnet usr: psw: [ checkDnsProxyLive ] 5306: Can't find dnsProxy progress, restart it. MlmePeriodicExec: RxRing1 full, trigger rx1_done_func [ findDynDomainName ] 618: Cloud ddns is not available now. #file: src/http_rpm_ssl.c;line: 918; error = No such file or directory #msg: SAN is DNS:192.168.225.100,IP:192.168.225.100 N: C15E8934C12FEA6E63ABD4EDB556ECA0FC350E453C349EE9D5A83726148319C2D382618DA8D85ABEBC11443FF11612E602583DFAA4A2A57B079AB373D08057BA43B413484735D11A10B270FD3764BCBF74A3F94C0E77AC62C77432A7A45844342C7EC60E06DC556EC76AC382C97329A1648CCD6D7E3B52C6193795C7C8F02ACF8A7E97107DC24327AB99A280CF4C4716307E7C8D83CA017920F559D9FDF7B392D134A5AE385CC2581A866F4E62D2E53C1F2805E46F06A79C8ABC49E7DCE21A867EFBC0DEB3CF0CDA8BC1177B4AD1B5AE4CFFDD5A619A813BC0B1432E245160FB9AE2802951C6902E2916A2D3675C6BF0087BD1144D2D0A922B550E1D7424E091 E: 010001 D: 49CB5C9C4BDF8FDD39DF611EA856386EC56130057F10EBD67C29BA86274CD2DF759D6BC2467DEEBF9C8110654089B6839DC07442BB62B0B94F711BEBE6559C6522A24DBB8725841BA8E498572235290CFDA3F8272CDC41E6DE8C3B2A9BFEED6465D8A997BFC98537061679EF6716B27BFBA5FA5DBF4F3F6B07439DBF0531967C3CAC7B042C1A6900D85BBD43D97727C060BEC43158DB81E7ED667D0DFDD7785CF8163C004E2C963D596FCE09CB37C3994C0E1AD0F56AF345A316521246EBEC7C7BD09B1860A2A10562413440D8A2DEC386EC6BD9A07ACEC4D8AF69F3E27A73488E335C0A87C6E04CC86D1F34B1E54B6F41AA6FBD1041B28C0CF834F8707873FB P: E9D3189C6D38643E08E4DB1D8094DCD16213CCA805D525BB5D84F0B72D369D6887EDD1B234E3037A71402E0148BB74DDC1AD7C434EE41AE345EBAB166A0959188209709E96621DEC0CE66593ED36A7EE3019B5C56E9F4759A5678A458E199A0C4B04E6F5E12F22AE06F694AD2ABF1CE596F9CBF587C007BC30A66613290DC197 Q: D3B53F238DF20DC2429F6E58BDEC14A8333B57ADCC8ED381DF07C197D75DED1705EDC57D823D9F37BD1C11A33B93BA5228520FD58A47B55E686E33D6B7079EE5348FB183EF30D7D1BC49BDF2102660C355D0B00353DB99B163E709DAC251F12593A316B505417B9E798762214AB979597B819601AD1FA4344A72496F6C32E417 DP: D15D13A754E06E59616B518ADC0BEEA088BA03C0DAE01C55E509D8E39B5750A03D3DC34A56B281F5CB9BD4D58F8351C411DC3DECA2A1FA354FF111931344E582DD4488936741D95AF7BE006BF0B5C4ADB06204F10FF90C51079A6B239675F40A57CDBEE3A04AAD39B7C0682599A5DFC0EF6B21A5EA741638FA32B42929D44867 DQ: 26398E79E3F2356BC2CDB885CB5CE9E9E8BE2939973A9E93CF6A3E192568AF1B3E15CD57B90159531E11085A39B295ACA9DD0F40D623D66A825E8B7B94368B0905718072BAE84DBF6FC8BCC40D0870E63403671462BABEC859AA688C1813E57F8C65A0FEF440FB8A04955E3176A5807498A930258355B8373CCDDFD67F69BCE7 QP: D33F43C022C2A4D949C89F54DC4328CA5C68647DBB9AE4CF2D4B03C0DE555BE7057316111280C0C2633CCF9189A11C6D9C43E16B48E21D7767BA66BFA01AE3EE13EFC467798941B47FF14D0FD8DDDA1087BC26E28EF623F187B1A70D716A20F5B9EFC7515346A15833AF91F94EF90AA4C0CD638543C249834D8515247A4D55AE ---------------> save 636 <--------------- [ oal_sys_GetConfigAddr ] 3552: read config from addr1 00200000 addr2 002c0000 [ oal_sys_GetConfigAddr ] 3564: checking addr1 00200000 seq1:2 [ oal_sys_GetConfigAddr ] 3577: checking addr2 002c0000 seq1:3 [ oal_sys_writeCfgFlash ] 4157: Write config index:4 addr:200000 save user config to /var/run/misc/misc_rw/0x00200000... write user config to flash success. #file: src/http_inetd.c;line: 581; error = No such file or directory #msg: Create SSL context for https access successfully. [ oal_ltedev_getStatus ] 2991: [l_lteFirmVer:EG060KEAAAR01A12M2G_BAF] [ oal_ltedev_getStatus ] 3000: [l_lteSN:MPA24EN090000930P] [ oal_ltedev_getStatus ] 3009: [l_imei:864012012598835] MlmePeriodicExec: RxRing1 full, trigger rx1_done_func MlmePeriodicExec: RxRing1 full, trigger rx1_done_func MlmePeriodicExec: RxRing1 full, trigger rx1_done_func 4b00000 [ lteUpgradeStatusCheckHandler ] 4854: lte last upgrade status:0 open DNS error: No such file or directory error: device not found error: device not found MlmePeriodicExec: RxRing1 full, trigger rx1_done_func MlmePeriodicExec: RxRing1 full, trigger rx1_done_func qmi_qmux: Sending sys indication=15 to qmux_client_id=0x1 on conn_id=0MlmePeriodicExec: RxRing1 full, trigger rx1_done_func MlmePeriodicExec: RxRing1 full, trigger rx1_done_func MlmePeriodicExec: RxRing1 full, trigger rx1_done_func MlmePeriodicExec: RxRing1 full, trigger rx1_done_func sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 sendto /var/tmp/10 msg 2011 error No such file or directory ,pid 707 MlmePeriodicExec: RxRing1 full, trigger rx1_done_func try sendto /var/tmp/10 msg 2011 error No such file or directory 10 times, quit sending,pid 707 [ oal_getAllHostInfoEntry ] 1403: send write leases msg error! MlmePeriodicExec: RxRing1 full, trigger rx1_done_func MlmePeriodicExec: RxRing1 full, trigger rx1_done_func ,这有什么问题
10-09
import os import numpy as np import openpyxl from openpyxl import load_workbook from openpyxl.styles import PatternFill, Border, Side from openpyxl.comments import Comment from collections import defaultdict import tkinter as tk from tkinter import ttk, filedialog, messagebox, scrolledtext, END import time import threading from ttkthemes import ThemedTk from difflib import SequenceMatcher import hashlib import re # 颜色定义 COLOR_ADDED = 'C6EFCE' # 新增 - 浅绿 COLOR_DELETED = 'FFC7CE' # 删除 - 浅红 COLOR_CHANGED = 'FFEB9C' # 修改 - 浅黄 COLOR_BORDER = '002060' # 边框颜色 COLOR_CONTEXT_MATCH = 'D9E1F2' # 上下文匹配 - 浅蓝 COLOR_IGNORED = 'D9D9D9' # 被忽略的差异 - 灰色 class SmartExcelComparator: def __init__(self): self.root = ThemedTk(theme="arc") self.root.title("智能Excel差异检测工具") self.root.geometry("1200x800") # 设置应用图标 try: self.root.iconbitmap("diff_icon.ico") except: pass # 创建UI self.create_widgets() self.root.mainloop() def create_widgets(self): # 创建主框架 main_frame = ttk.Frame(self.root) main_frame.pack(fill="both", expand=True, padx=15, pady=15) # 创建顶部控制面板 control_frame = ttk.LabelFrame(main_frame, text="文件选择与配置") control_frame.pack(fill="x", padx=10, pady=10) # 文件选择区域 file_frame = ttk.Frame(control_frame) file_frame.pack(fill="x", padx=10, pady=10) # 文件1 ttk.Button(file_frame, text="选择文件1", command=lambda: self.select_file(1)).grid(row=0, column=0, padx=5, pady=5, sticky="w") self.file1_label = ttk.Label(file_frame, text="未选择", width=80, anchor="w") self.file1_label.grid(row=0, column=1, padx=5, pady=5, sticky="w") # 文件2 ttk.Button(file_frame, text="选择文件2", command=lambda: self.select_file(2)).grid(row=1, column=0, padx=5, pady=5, sticky="w") self.file2_label = ttk.Label(file_frame, text="未选择", width=80, anchor="w") self.file2_label.grid(row=1, column=1, padx=5, pady=5, sticky="w") # 配置区域 config_frame = ttk.Frame(control_frame) config_frame.pack(fill="x", padx=10, pady=10) # 精确度配置 ttk.Label(config_frame, text="精确度阈值 (0.1-0.9):").grid(row=0, column=0, sticky="w", padx=(0, 5)) self.precision_var = tk.DoubleVar(value=0.75) ttk.Scale(config_frame, from_=0.1, to=0.9, orient="horizontal", length=250, variable=self.precision_var).grid(row=0, column=1, padx=5) # 操作按钮 button_frame = ttk.Frame(control_frame) button_frame.pack(fill="x", padx=10, pady=10) ttk.Button(button_frame, text="开始比较", command=self.start_comparison, style="Accent.TButton").pack(side="left", padx=5) ttk.Button(button_frame, text="查看标记结果", command=self.view_result).pack(side="left", padx=5) ttk.Button(button_frame, text="导出报告", command=self.export_report).pack(side="left", padx=5) ttk.Button(button_frame, text="设置", command=self.show_settings).pack(side="right", padx=5) # 进度条 self.progress = ttk.Progressbar(control_frame, orient="horizontal", length=600, mode="determinate") self.progress.pack(fill="x", padx=10, pady=10) # 结果展示区域 result_frame = ttk.LabelFrame(main_frame, text="比较结果") result_frame.pack(fill="both", expand=True, padx=10, pady=5) # 创建标签页 self.notebook = ttk.Notebook(result_frame) self.notebook.pack(fill="both", expand=True) # 文本结果标签页 text_tab = ttk.Frame(self.notebook) self.notebook.add(text_tab, text="文本报告") self.result_text = scrolledtext.ScrolledText(text_tab, wrap="word", font=("Consolas", 10)) self.result_text.pack(fill="both", expand=True) # 统计标签页 stats_tab = ttk.Frame(self.notebook) self.notebook.add(stats_tab, text="统计概览") self.stats_text = scrolledtext.ScrolledText(stats_tab, wrap="word", font=("Consolas", 10)) self.stats_text.pack(fill="both", expand=True) # 状态栏 self.status_var = tk.StringVar(value="就绪") status_bar = ttk.Frame(self.root, height=25) status_bar.pack(fill="x", side="bottom") ttk.Label(status_bar, textvariable=self.status_var, relief="sunken", anchor="w").pack(fill="x") # 初始化变量 self.file1_path = None self.file2_path = None self.marked_file1_path = None self.marked_file2_path = None self.report_path = None # 设置样式 self.set_styles() def set_styles(self): """设置现代化UI样式""" style = ttk.Style() style.theme_use('arc') style.configure('Accent.TButton', font=('Arial', 10, 'bold'), foreground='white', background='#4CAF50') style.map('Accent.TButton', background=[('active', '#45a049')]) def select_file(self, file_num): path = filedialog.askopenfilename(filetypes=[("Excel文件", "*.xlsx *.xls")]) if path: if file_num == 1: self.file1_path = path self.file1_label.config(text=os.path.basename(path)) else: self.file2_path = path self.file2_label.config(text=os.path.basename(path)) def update_status(self, message): self.status_var.set(message) self.root.update_idletasks() def start_comparison(self): if not self.file1_path or not self.file2_path: messagebox.showerror("错误", "请选择两个要比较的Excel文件") return # 禁用按钮防止重复点击 self.root.after(0, self.disable_buttons) # 在新线程中执行比较 threading.Thread(target=self.run_comparison, daemon=True).start() def disable_buttons(self): for widget in self.root.winfo_children(): if isinstance(widget, ttk.Button): widget.configure(state="disabled") def enable_buttons(self): for widget in self.root.winfo_children(): if isinstance(widget, ttk.Button): widget.configure(state="normal") def run_comparison(self): start_time = time.time() self.result_text.delete(1.0, END) self.stats_text.delete(1.0, END) self.progress['value'] = 0 self.update_status("开始比较...") try: threshold = self.precision_var.get() self.marked_file1_path, self.marked_file2_path, stats = self.compare_files( self.file1_path, self.file2_path, threshold) self.show_statistics(stats) elapsed = time.time() - start_time self.update_status(f"比较完成! 耗时: {elapsed:.2f}秒") self.result_text.insert(END, f"\n比较完成! 耗时: {elapsed:.2f}秒\n") except Exception as e: messagebox.showerror("错误", f"比较过程中出错: {str(e)}") self.update_status(f"错误: {str(e)}") import traceback traceback.print_exc() finally: self.root.after(0, self.enable_buttons) def normalize_value(self, value): """标准化单元格值以进行更准确的比较""" if value is None: return "" # 如果是数字,转换为浮点数进行比较 if isinstance(value, (int, float)): return float(value) # 如果是字符串,去除两端空白和特殊空白字符 if isinstance(value, str): # 标准化空白字符(将各种空白替换为空格) value = re.sub(r'\s+', ' ', value.strip()) # 移除特殊字符差异(可选) # value = re.sub(r'[^\w\s]', '', value) # 转换为小写(可选,根据需求) # value = value.lower() return value return value def calculate_similarity(self, val1, val2): """计算两个值的相似度(0.0-1.0),考虑语义相似度""" # 标准化值 norm_val1 = self.normalize_value(val1) norm_val2 = self.normalize_value(val2) # 如果标准化后值相同,相似度为1.0 if norm_val1 == norm_val2: return 1.0 # 如果其中一个是None,相似度为0.0 if val1 is None or val2 is None: return 0.0 # 数值相似度计算 if isinstance(norm_val1, float) and isinstance(norm_val2, float): max_val = max(abs(norm_val1), abs(norm_val2), 1) return 1.0 - abs(norm_val1 - norm_val2) / max_val # 字符串相似度计算(使用改进的SequenceMatcher) if isinstance(norm_val1, str) and isinstance(norm_val2, str): # 使用SequenceMatcher计算相似度 matcher = SequenceMatcher(None, norm_val1, norm_val2) # 考虑常见拼写错误和缩写 common_chars = set(norm_val1) & set(norm_val2) char_similarity = len(common_chars) / max(len(set(norm_val1)), len(set(norm_val2)), 1) # 加权计算最终相似度 ratio = matcher.ratio() return 0.7 * ratio + 0.3 * char_similarity return 0.0 def get_cell_context(self, sheet, row, col, radius=2): """获取单元格周围的上下文内容(行和列方向)""" row_context = [] col_context = [] # 行上下文(同一行左侧和右侧) for c in range(max(1, col - radius), min(sheet.max_column, col + radius) + 1): if c == col: continue cell = sheet.cell(row=row, column=c) if cell.value is not None: row_context.append(str(cell.value)) # 列上下文(同一列上方和下方) for r in range(max(1, row - radius), min(sheet.max_row, row + radius) + 1): if r == row: continue cell = sheet.cell(row=r, column=col) if cell.value is not None: col_context.append(str(cell.value)) return row_context, col_context def context_similarity(self, context1, context2): """计算两个上下文的相似度(使用改进的Jaccard相似度)""" if not context1 and not context2: return 1.0 if not context1 or not context2: return 0.0 # 使用加权Jaccard相似度 set1 = set(context1) set2 = set(context2) intersection = set1 & set2 union = set1 | set2 # 考虑顺序相似性(如果上下文顺序重要) seq_sim = SequenceMatcher(None, context1, context2).ratio() # 加权组合 jaccard_sim = len(intersection) / len(union) if union else 0.0 return 0.6 * jaccard_sim + 0.4 * seq_sim def calculate_position_offset(self, sheet1, sheet2): """使用特征点匹配计算位置偏移""" # 查找特征点(唯一值) feature_points = {} # 收集sheet1的特征点 for row in range(1, min(100, sheet1.max_row) + 1): for col in range(1, min(50, sheet1.max_column) + 1): val = sheet1.cell(row, col).value if val and val not in feature_points: feature_points[val] = [(row, col)] elif val: feature_points[val].append((row, col)) # 查找在sheet2中唯一的特征点 unique_features = [] for val, positions in feature_points.items(): if len(positions) == 1: # 在sheet1中唯一 # 检查在sheet2中是否唯一 count_in_sheet2 = 0 for row in range(1, min(100, sheet2.max_row) + 1): for col in range(1, min(50, sheet2.max_column) + 1): if sheet2.cell(row, col).value == val: count_in_sheet2 += 1 if count_in_sheet2 == 1: # 在sheet2中也唯一 unique_features.append((val, positions[0])) if not unique_features: return 0, 0 # 没有找到足够的特征点 # 计算位置偏移 row_shifts = [] col_shifts = [] for val, (r1, c1) in unique_features: # 在sheet2中查找该值 for row in range(1, sheet2.max_row + 1): for col in range(1, sheet2.max_column + 1): if sheet2.cell(row, col).value == val: row_shifts.append(row - r1) col_shifts.append(col - c1) break # 使用中位数作为最终偏移量(对异常值更鲁棒) avg_row_shift = int(np.median(row_shifts)) if row_shifts else 0 avg_col_shift = int(np.median(col_shifts)) if col_shifts else 0 return avg_row_shift, avg_col_shift def find_best_match(self, sheet1, row1, col1, sheet2, context_map, matched_positions, row_shift, col_shift, threshold): """为单元格在另一个sheet中寻找最佳匹配(使用改进的AI匹配算法)""" val1 = sheet1.cell(row=row1, column=col1).value if val1 is None: return None, -1 # 获取行列上下文 row_context1, col_context1 = self.get_cell_context(sheet1, row1, col1) best_match = None best_score = -1 # 计算预期位置 expected_row = row1 + row_shift expected_col = col1 + col_shift # 搜索范围(基于偏移量和阈值) search_radius = max(1, int(10 * (1 - threshold))) # 阈值越低,搜索范围越大 row_range = range(max(1, expected_row - search_radius), min(sheet2.max_row, expected_row + search_radius) + 1) col_range = range(max(1, expected_col - search_radius), min(sheet2.max_column, expected_col + search_radius) + 1) for r2 in row_range: for c2 in col_range: if (r2, c2) in matched_positions: continue val2 = sheet2.cell(row=r2, column=c2).value if val2 is None: continue # 计算距离惩罚因子 row_dist = abs(r2 - expected_row) col_dist = abs(c2 - expected_col) distance_penalty = max(0, 1.0 - (row_dist + col_dist) * 0.05) # 每格距离降低5%的分数 # 值相似度 value_sim = self.calculate_similarity(val1, val2) # 获取上下文 row_context2, col_context2 = context_map[(r2, c2)] # 计算上下文相似度(行和列分开计算) row_context_sim = self.context_similarity(row_context1, row_context2) col_context_sim = self.context_similarity(col_context1, col_context2) # 组合相似度(值 + 行上下文 + 列上下文) total_sim = ( 0.5 * value_sim + 0.3 * row_context_sim + 0.2 * col_context_sim ) * distance_penalty if total_sim > best_score: best_score = total_sim best_match = (r2, c2) return best_match, best_score def compare_files(self, file1, file2, threshold): self.update_status(f"比较文件: {os.path.basename(file1)} 和 {os.path.basename(file2)}") self.result_text.insert(END, f"===== 文件比较报告 =====\n") self.result_text.insert(END, f"文件1: {os.path.basename(file1)}\n") self.result_text.insert(END, f"文件2: {os.path.basename(file2)}\n") self.result_text.insert(END, f"相似度阈值: {threshold:.2f}\n\n") # 加载工作簿 wb1 = load_workbook(file1) wb2 = load_workbook(file2) # 创建标记后的工作簿 marked_wb1 = load_workbook(file1) marked_wb2 = load_workbook(file2) # 设置样式 thin_border = Border(left=Side(style='thin', color=COLOR_BORDER), right=Side(style='thin', color=COLOR_BORDER), top=Side(style='thin', color=COLOR_BORDER), bottom=Side(style='thin', color=COLOR_BORDER)) added_fill = PatternFill(start_color=COLOR_ADDED, end_color=COLOR_ADDED, fill_type="solid") deleted_fill = PatternFill(start_color=COLOR_DELETED, end_color=COLOR_DELETED, fill_type="solid") changed_fill = PatternFill(start_color=COLOR_CHANGED, end_color=COLOR_CHANGED, fill_type="solid") context_fill = PatternFill(start_color=COLOR_CONTEXT_MATCH, end_color=COLOR_CONTEXT_MATCH, fill_type="solid") ignored_fill = PatternFill(start_color=COLOR_IGNORED, end_color=COLOR_IGNORED, fill_type="solid") # 比较每个sheet common_sheets = set(wb1.sheetnames) & set(wb2.sheetnames) total_sheets = len(common_sheets) if total_sheets == 0: self.result_text.insert(END, "两个文件没有共同的sheet\n") return None, None, {} self.result_text.insert(END, f"共同sheet: {', '.join(common_sheets)}\n\n") # 进度设置 progress_step = 100 / total_sheets current_progress = 0 # 保存标记结果的文件路径 save_dir = os.path.dirname(file1) timestamp = time.strftime("%Y%m%d_%H%M%S") marked_file1 = os.path.join(save_dir, f"标记结果_{timestamp}_{os.path.basename(file1)}") marked_file2 = os.path.join(save_dir, f"标记结果_{timestamp}_{os.path.basename(file2)}") # 统计信息 stats = { 'total_sheets': total_sheets, 'sheets': {}, 'total_changes': 0, 'added': 0, 'deleted': 0, 'modified': 0, 'matched_by_context': 0, 'ignored': 0 } # 处理每个sheet for sheet_idx, sheet_name in enumerate(common_sheets): self.progress['value'] = current_progress self.update_status(f"处理sheet: {sheet_name} ({sheet_idx+1}/{total_sheets})") self.result_text.insert(END, f"\n--- Sheet: {sheet_name} ---\n") # 获取工作表对象 sheet1 = wb1[sheet_name] sheet2 = wb2[sheet_name] marked_ws1 = marked_wb1[sheet_name] marked_ws2 = marked_wb2[sheet_name] # 初始化sheet统计 sheet_stats = { 'rows': max(sheet1.max_row, sheet2.max_row), 'cols': max(sheet1.max_column, sheet2.max_column), 'added': 0, 'deleted': 0, 'modified': 0, 'matched_by_context': 0, 'ignored': 0, 'changes': [] } # 计算整体位置偏移 self.update_status(f"计算 {sheet_name} 的位置偏移...") row_shift, col_shift = self.calculate_position_offset(sheet1, sheet2) self.result_text.insert(END, f"检测到位置偏移: 行偏移={row_shift}, 列偏移={col_shift}\n") # 构建sheet2的上下文映射 (位置 -> (行上下文, 列上下文)) context_map = {} for row in range(1, sheet2.max_row + 1): for col in range(1, sheet2.max_column + 1): context_map[(row, col)] = self.get_cell_context(sheet2, row, col) # 用于跟踪匹配情况 matched_in_ws1 = {} matched_in_ws2 = {} # 第一阶段:精确匹配(相同位置且值相同) self.update_status(f"执行 {sheet_name} 的精确匹配...") min_row = min(sheet1.max_row, sheet2.max_row) min_col = min(sheet1.max_column, sheet2.max_column) for row in range(1, min_row + 1): for col in range(1, min_col + 1): cell1 = sheet1.cell(row=row, column=col) cell2 = sheet2.cell(row=row, column=col) # 使用相似度计算而不是直接比较 similarity = self.calculate_similarity(cell1.value, cell2.value) if similarity > threshold: matched_in_ws1[(row, col)] = (row, col) matched_in_ws2[(row, col)] = (row, col) # 第二阶段:基于上下文的智能匹配 self.update_status(f"执行 {sheet_name} 的上下文匹配...") for row1 in range(1, sheet1.max_row + 1): for col1 in range(1, sheet1.max_column + 1): if (row1, col1) in matched_in_ws1: continue # 已匹配 cell1 = sheet1.cell(row=row1, column=col1) if cell1.value is None: continue # 寻找最佳匹配 best_match, match_score = self.find_best_match( sheet1, row1, col1, sheet2, context_map, matched_in_ws2, row_shift, col_shift, threshold) if best_match and match_score >= threshold: matched_in_ws1[(row1, col1)] = best_match matched_in_ws2[best_match] = (row1, col1) sheet_stats['matched_by_context'] += 1 stats['matched_by_context'] += 1 # 标记上下文匹配 marked_cell1 = marked_ws1.cell(row=row1, column=col1) marked_cell1.fill = context_fill marked_cell1.comment = Comment( f"上下文匹配到: ({best_match[0]},{best_match[1]}), 分数: {match_score:.2f}", "Diff Tool") marked_cell2 = marked_ws2.cell(row=best_match[0], column=best_match[1]) marked_cell2.fill = context_fill marked_cell2.comment = Comment( f"上下文匹配自: ({row1},{col1}), 分数: {match_score:.2f}", "Diff Tool") # 第三阶段:处理差异 self.update_status(f"标记 {sheet_name} 的差异...") # 处理文件1中的单元格 for row1 in range(1, sheet1.max_row + 1): for col1 in range(1, sheet1.max_column + 1): cell1 = sheet1.cell(row=row1, column=col1) if cell1.value is None: continue marked_cell1 = marked_ws1.cell(row=row1, column=col1) if (row1, col1) in matched_in_ws1: # 已匹配 - 检查值是否相同 row2, col2 = matched_in_ws1[(row1, col1)] cell2 = sheet2.cell(row=row2, column=col2) # 使用相似度计算而不是直接比较 similarity = self.calculate_similarity(cell1.value, cell2.value) if similarity < 1.0: # 值不同 - 标记为修改 marked_cell1.fill = changed_fill comment = f"修改为: {cell2.value} (相似度: {similarity:.2f})" marked_cell1.comment = Comment(comment, "Diff Tool") marked_cell2 = marked_ws2.cell(row=row2, column=col2) marked_cell2.fill = changed_fill marked_cell2.comment = Comment(f"原值: {cell1.value} (相似度: {similarity:.2f})", "Diff Tool") sheet_stats['modified'] += 1 stats['modified'] += 1 sheet_stats['changes'].append(('修改', row1, col1, cell1.value, cell2.value)) self.result_text.insert(END, f"修改: [{row1},{col1}] '{cell1.value}' -> '{cell2.value}' (相似度: {similarity:.2f})\n") else: # 值相同但格式不同 - 标记为忽略 marked_cell1.fill = ignored_fill marked_cell1.comment = Comment(f"内容相同但格式不同 (相似度: {similarity:.2f})", "Diff Tool") marked_cell2 = marked_ws2.cell(row=row2, column=col2) marked_cell2.fill = ignored_fill marked_cell2.comment = Comment(f"内容相同但格式不同 (相似度: {similarity:.2f})", "Diff Tool") sheet_stats['ignored'] += 1 stats['ignored'] += 1 else: # 未匹配 - 标记为删除 marked_cell1.fill = deleted_fill marked_cell1.comment = Comment(f"在文件2中删除: {cell1.value}", "Diff Tool") sheet_stats['deleted'] += 1 stats['deleted'] += 1 sheet_stats['changes'].append(('删除', row1, col1, cell1.value, None)) self.result_text.insert(END, f"删除: [{row1},{col1}] {cell1.value}\n") # 处理文件2中的新增单元格 for row2 in range(1, sheet2.max_row + 1): for col2 in range(1, sheet2.max_column + 1): if (row2, col2) in matched_in_ws2: continue # 已匹配 cell2 = sheet2.cell(row=row2, column=col2) if cell2.value is None: continue # 未匹配 - 标记为新增 marked_cell2 = marked_ws2.cell(row=row2, column=col2) marked_cell2.fill = added_fill marked_cell2.comment = Comment(f"在文件2中新增: {cell2.value}", "Diff Tool") sheet_stats['added'] += 1 stats['added'] += 1 sheet_stats['changes'].append(('新增', row2, col2, None, cell2.value)) self.result_text.insert(END, f"新增: [{row2},{col2}] = '{cell2.value}'\n") # 更新统计 stats['sheets'][sheet_name] = sheet_stats stats['total_changes'] += sheet_stats['added'] + sheet_stats['deleted'] + sheet_stats['modified'] self.result_text.insert(END, f"变更统计: 新增={sheet_stats['added']}, 删除={sheet_stats['deleted']}, 修改={sheet_stats['modified']}, 忽略={sheet_stats['ignored']}\n") self.result_text.insert(END, f"上下文匹配: {sheet_stats['matched_by_context']}\n") current_progress += progress_step # 保存标记结果 self.update_status("保存标记结果...") marked_wb1.save(marked_file1) marked_wb2.save(marked_file2) self.result_text.insert(END, f"\n文件1标记结果已保存到: {marked_file1}\n") self.result_text.insert(END, f"文件2标记结果已保存到: {marked_file2}\n") self.progress['value'] = 100 self.update_status("比较完成! 点击'查看标记结果'查看差异") return marked_file1, marked_file2, stats def show_statistics(self, stats): """显示统计信息""" self.stats_text.delete(1.0, END) self.stats_text.insert(END, "===== 差异统计概览 =====\n\n") self.stats_text.insert(END, f"总工作表数量: {stats['total_sheets']}\n") self.stats_text.insert(END, f"总变更数: {stats['total_changes']}\n") self.stats_text.insert(END, f"上下文匹配数: {stats.get('matched_by_context', 0)}\n\n") # 按类型统计 total_added = stats['added'] total_deleted = stats['deleted'] total_modified = stats['modified'] self.stats_text.insert(END, "=== 变更类型分布 ===\n") self.stats_text.insert(END, f"新增单元格: {total_added} ({(total_added/stats['total_changes'] * 100) if stats['total_changes'] > 0 else 0:.1f}%)\n") self.stats_text.insert(END, f"删除单元格: {total_deleted} ({(total_deleted/stats['total_changes'] * 100) if stats['total_changes'] > 0 else 0:.1f}%)\n") self.stats_text.insert(END, f"修改单元格: {total_modified} ({(total_modified/stats['total_changes'] * 100) if stats['total_changes'] > 0 else 0:.1f}%)\n\n") # 各工作表详细统计 self.stats_text.insert(END, "=== 各工作表统计 ===\n") for sheet_name, sheet_stats in stats['sheets'].items(): self.stats_text.insert(END, f"\n【{sheet_name}】\n") self.stats_text.insert(END, f" 尺寸: {sheet_stats['rows']}行 × {sheet_stats['cols']}列\n") self.stats_text.insert(END, f" 新增: {sheet_stats['added']} | 删除: {sheet_stats['deleted']} | 修改: {sheet_stats['modified']}\n") self.stats_text.insert(END, f" 上下文匹配: {sheet_stats['matched_by_context']}\n") # 显示前5个主要变更 if sheet_stats['changes']: self.stats_text.insert(END, " 主要变更:\n") top_changes = sheet_stats['changes'][:5] for change_type, row, col, old_val, new_val in top_changes: if change_type == '新增': self.stats_text.insert(END, f" [{row},{col}] + '{new_val}'\n") elif change_type == '删除': self.stats_text.insert(END, f" [{row},{col}] - '{old_val}'\n") else: # 修改 self.stats_text.insert(END, f" [{row},{col}] '{old_val}' → '{new_val}'\n") # 建议 self.stats_text.insert(END, "\n=== 分析建议 ===\n") if total_modified > total_added and total_modified > total_deleted: self.stats_text.insert(END, "• 数据修改是主要变更类型,建议重点审查关键字段\n") if total_added > total_modified and total_added > total_deleted: self.stats_text.insert(END, "• 大量新增数据,建议验证数据完整性\n") if total_deleted > total_modified and total_deleted > total_added: self.stats_text.insert(END, "• 大量数据删除,建议确认是否为预期操作\n") if stats.get('matched_by_context', 0) > 0: self.stats_text.insert(END, f"• 检测到 {stats['matched_by_context']} 个上下文匹配,请确认位置变化是否合理\n") def view_result(self): """查看标记结果""" files = [] if hasattr(self, 'marked_file1_path') and self.marked_file1_path and os.path.exists(self.marked_file1_path): files.append(self.marked_file1_path) if hasattr(self, 'marked_file2_path') and self.marked_file2_path and os.path.exists(self.marked_file2_path): files.append(self.marked_file2_path) if not files: messagebox.showinfo("信息", "没有可用的标记结果,请先进行比较") return for file_path in files: try: # 在默认应用中打开报告 import os import sys if os.name == 'nt': # Windows os.startfile(file_path) elif os.name == 'posix': # macOS, Linux import subprocess subprocess.run(['open', file_path] if sys.platform == 'darwin' else ['xdg-open', file_path]) self.update_status(f"正在打开: {file_path}") except Exception as e: messagebox.showerror("错误", f"无法打开文件: {str(e)}") def export_report(self): """导出HTML报告""" if not hasattr(self, 'marked_file1_path') or not self.marked_file1_path: messagebox.showinfo("信息", "请先进行比较") return # 获取保存路径 save_path = filedialog.asksaveasfilename( defaultextension=".html", filetypes=[("HTML文件", "*.html"), ("所有文件", "*.*")], title="保存HTML报告" ) if not save_path: return try: # 生成HTML报告 html_content = self.generate_html_report() with open(save_path, 'w', encoding='utf-8') as f: f.write(html_content) self.report_path = save_path messagebox.showinfo("成功", f"HTML报告已保存到: {save_path}") self.update_status(f"报告已导出: {save_path}") except Exception as e: messagebox.showerror("错误", f"导出报告失败: {str(e)}") def generate_html_report(self): """生成HTML格式的报告""" import datetime # 获取当前时间 current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") # 构建HTML内容 html = f""" <!DOCTYPE html> <html lang="zh-CN"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Excel差异检测报告</title> <style> * {{ margin: 0; padding: 0; box-sizing: border-box; font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; }} body {{ background-color: #f5f7fa; color: #333; line-height: 1.6; }} .container {{ width: 90%; max-width: 1200px; margin: 0 auto; padding: 20px; }} header {{ text-align: center; padding: 30px 0; margin-bottom: 30px; background: linear-gradient(135deg, #4a69bd, #1e3799); color: white; border-radius: 10px; box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); }} h1 {{ font-size: 2.5rem; margin-bottom: 10px; }} .header-info {{ display: flex; justify-content: space-around; flex-wrap: wrap; background: rgba(255, 255, 255, 0.1); padding: 15px; border-radius: 8px; margin-top: 20px; }} .info-item {{ margin: 10px; padding: 10px; min-width: 200px; text-align: left; }} .stats-summary {{ display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 20px; margin-bottom: 30px; }} .stat-card {{ background: white; border-radius: 10px; padding: 20px; box-shadow: 0 4px 10px rgba(0, 0, 0, 0.05); transition: transform 0.3s ease; }} .stat-card:hover {{ transform: translateY(-5px); box-shadow: 0 6px 15px rgba(0, 0, 0, 0.1); }} .stat-title {{ font-size: 1.2rem; font-weight: 600; margin-bottom: 10px; color: #4a69bd; }} .stat-value {{ font-size: 2.5rem; font-weight: 700; margin-bottom: 10px; }} .added {{ color: #2ecc71; }} .deleted {{ color: #e74c3c; }} .modified {{ color: #f39c12; }} .matched {{ color: #3498db; }} .sheet-section {{ background: white; border-radius: 10px; padding: 25px; margin-bottom: 30px; box-shadow: 0 4px 10px rgba(0, 0, 0, 0.05); }} .sheet-header {{ display: flex; justify-content: space-between; align-items: center; flex-wrap: wrap; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 1px solid #eee; }} .sheet-name {{ font-size: 1.5rem; font-weight: 600; color: #1e3799; }} .change-type {{ display: inline-block; padding: 4px 10px; border-radius: 20px; font-size: 0.8rem; font-weight: 500; margin-right: 5px; }} .change-type-added {{ background: #e6f7ed; color: #2ecc71; }} .change-type-deleted {{ background: #fce8e6; color: #e74c3c; }} .change-type-modified {{ background: #fef5e7; color: #f39c12; }} .change-list {{ margin-top: 15px; }} .change-item {{ padding: 10px 15px; margin: 8px 0; border-radius: 8px; background: #f8f9fa; display: flex; justify-content: space-between; align-items: center; }} .change-item.added {{ border-left: 4px solid #2ecc71; }} .change-item.deleted {{ border-left: 4px solid #e74c3c; }} .change-item.modified {{ border-left: 4px solid #f39c12; }} .cell-location {{ font-weight: 600; min-width: 70px; }} .change-details {{ flex-grow: 1; margin: 0 15px; }} .old-value {{ text-decoration: line-through; color: #777; }} footer {{ text-align: center; margin-top: 40px; padding: 20px; color: #777; font-size: 0.9rem; }} @media (max-width: 768px) {{ .header-info {{ flex-direction: column; }} .stat-card {{ text-align: center; }} }} </style> </head> <body> <div class="container"> <header> <h1>📊 Excel差异检测报告</h1> <p>基于上下文感知的智能差异分析</p> <div class="header-info"> <div class="info-item"> <strong>生成时间:</strong> {current_time} </div> <div class="info-item"> <strong>工具版本:</strong> 智能Excel比较工具 v1.0 </div> <div class="info-item"> <strong>文件1:</strong> {os.path.basename(self.file1_path) if self.file1_path else '未选择'} </div> <div class="info-item"> <strong>文件2:</strong> {os.path.basename(self.file2_path) if self.file2_path else '未选择'} </div> </div> </header> <!-- 总体统计 --> <div class="stats-summary"> <div class="stat-card"> <div class="stat-title">总变更数</div> <div class="stat-value">{self.stats['total_changes']}</div> <div>检测到的所有变更</div> </div> <div class="stat-card"> <div class="stat-title">新增单元格</div> <div class="stat-value added">{self.stats['added']}</div> <div>文件2中新增的内容</div> </div> <div class="stat-card"> <div class="stat-title">删除单元格</div> <div class="stat-value deleted">{self.stats['deleted']}</div> <div>文件2中删除的内容</div> </div> <div class="stat-card"> <div class="stat-title">修改单元格</div> <div class="stat-value modified">{self.stats['modified']}</div> <div>相同位置但值改变的内容</div> </div> <div class="stat-card"> <div class="stat-title">上下文匹配</div> <div class="stat-value matched">{self.stats.get('matched_by_context', 0)}</div> <div>基于上下文智能匹配的单元格</div> </div> </div> <!-- 各工作表详细变更 --> <h2 style="margin: 30px 0 20px; color: #1e3799;">工作表详细变更</h2> """ # 添加每个工作表的变更详情 for sheet_name, sheet_stats in self.stats['sheets'].items(): html += f""" <div class="sheet-section"> <div class="sheet-header"> <div class="sheet-name">{sheet_name}</div> <div> <span class="change-type change-type-added">+{sheet_stats['added']}</span> <span class="change-type change-type-deleted">-{sheet_stats['deleted']}</span> <span class="change-type change-type-modified">~{sheet_stats['modified']}</span> </div> </div> <p>工作表尺寸: {sheet_stats['rows']}行 × {sheet_stats['cols']}列</p> <p>上下文匹配: {sheet_stats.get('matched_by_context', 0)}个单元格</p> <div class="change-list"> <h3>主要变更:</h3> """ # 添加变更详情 for idx, (change_type, row, col, old_val, new_val) in enumerate(sheet_stats['changes'][:10]): change_class = "" change_icon = "" change_desc = "" if change_type == '新增': change_class = "added" change_icon = "➕" change_desc = f"<div class='change-details'>新增: <span class='new-value'>{new_val}</span></div>" elif change_type == '删除': change_class = "deleted" change_icon = "➖" change_desc = f"<div class='change-details'>删除: <span class='old-value'>{old_val}</span></div>" else: # 修改 change_class = "modified" change_icon = "🔄" change_desc = f""" <div class="change-details"> <span class='old-value'>{old_val}</span> → <span class='new-value'>{new_val}</span> </div> """ html += f""" <div class="change-item {change_class}"> <div class="cell-location">{change_icon} [{row},{col}]</div> {change_desc} </div> """ html += """ </div> </div> """ # 分析建议 total_changes = self.stats['total_changes'] if total_changes > 0: added_percent = (self.stats['added'] / total_changes) * 100 deleted_percent = (self.stats['deleted'] / total_changes) * 100 modified_percent = (self.stats['modified'] / total_changes) * 100 html += """ <div class="sheet-section"> <h2>分析建议</h2> <ul style="list-style-type: none; padding: 15px;"> """ if modified_percent > 50: html += """ <li style="margin: 10px 0; padding-left: 20px; position: relative;"> <span style="position: absolute; left: 0;">⚠️</span> <strong>主要变更类型: 数据修改</strong> - 建议重点审查关键字段和公式的修改 </li> """ if added_percent > 50: html += """ <li style="margin: 10px 0; padding-left: 20px; position: relative;"> <span style="position: absolute; left: 0;">⚠️</span> <strong>主要变更类型: 新增数据</strong> - 建议验证新增数据的完整性和准确性 </li> """ if deleted_percent > 50: html += """ <li style="margin: 10px 0; padding-left: 20px; position: relative;"> <span style="position: absolute; left: 0;">⚠️</span> <strong>主要变更类型: 数据删除</strong> - 请确认删除操作是否符合预期 </li> """ if self.stats.get('matched_by_context', 0) > 0: html += f""" <li style="margin: 10px 0; padding-left: 20px; position: relative;"> <span style="position: absolute; left: 0;">ℹ️</span> <strong>上下文匹配:</strong> 检测到 {self.stats['matched_by_context']} 个上下文匹配项, 表明文件在结构上可能有较大变化 </li> """ html += """ </ul> </div> """ # 页脚 html += """ <footer> <p>报告由智能Excel比较工具生成 | 基于上下文感知的差异检测算法</p> <p>© 2023 Excel智能分析工具 | 保留所有权利</p> </footer> </div> </body> </html> """ return html def show_settings(self): """显示设置窗口""" settings_win = tk.Toplevel(self.root) settings_win.title("设置") settings_win.geometry("400x300") settings_win.resizable(False, False) # 设置窗口内容 ttk.Label(settings_win, text="高级设置", font=("Arial", 12, "bold")).pack(pady=10) # 上下文半径设置 frame_radius = ttk.Frame(settings_win) frame_radius.pack(fill="x", padx=20, pady=10) ttk.Label(frame_radius, text="上下文半径:").pack(side="left") self.radius_var = tk.IntVar(value=2) ttk.Spinbox(frame_radius, from_=1, to=5, width=5, textvariable=self.radius_var).pack(side="left", padx=10) # 相似度权重设置 frame_weights = ttk.Frame(settings_win) frame_weights.pack(fill="x", padx=20, pady=10) ttk.Label(frame_weights, text="值相似度权重:").pack(side="left") self.value_weight_var = tk.DoubleVar(value=0.6) ttk.Scale(frame_weights, from_=0.1, to=0.9, orient="horizontal", length=150, variable=self.value_weight_var).pack(side="left", padx=10) # 保存按钮 ttk.Button(settings_win, text="保存设置", command=settings_win.destroy).pack(pady=20) # 启动应用 if __name__ == "__main__": app = SmartExcelComparator() 将我这个代码里面生成的比较后的报告文档,生成两份,变为生成一份,将另一个报告的内容如果有差异放到同名sheet的右方,中间空几行用以区别。如果没有变更则不需要放置。
最新发布
12-10
LOCAL S32 playback_search_video_with_utc(DS_HANDLE_CONTEXT *ds_context, JSON_OBJPTR obj) { int chnIdx, evIdx; uint32_t start_time, end_time; EVENT_ENTRY *elem; U32 cid; PLAYBACK_CLIENT_INFO *client_info; int client_seq; JSON_OBJPTR json_playback_obj = NULL; JSON_OBJPTR json_array = NULL; JSON_OBJPTR json_obj = NULL; JSON_OBJPTR json_sub_obj = NULL; int start_index, end_index, total_events; char sresult_chn[32]; int result_index; uint64_t event_type_val = 0; uint8_t event_type_bit = 0; uint32_t result_start_time = 0; uint32_t result_end_time = 0; uint16_t result_channel = 0; const char *player_id = NULL; int is_carry_player_id = 0; int is_first_page = 0; #ifdef DUAL_ALGO_ENABLE JSON_OBJPTR dual_event_time_obj = NULL; JSON_OBJPTR event_time_obj = NULL; int last_event_channel = 0; #endif if (NULL == ds_context || NULL == obj || NULL == ds_context->res_obj) { return SLP_EINVARG; } if (!is_disk_available()) { STM_ERROR("disk is not available."); return ERROR_PLAYBACK_STORAGE_NOT_EXIST; } /* 获取请求中的所有参数,如果有参数缺失,返回参数错误 */ player_id = jso_obj_get_string_origin(obj, "player_id"); if (player_id != NULL) { is_carry_player_id = 1; } if (!is_carry_player_id) { /* id及player_id字段均未携带,返回参数错误 */ if (0 != jso_obj_get_int(obj, "id", (int*)&cid)) { return ERROR_PLAYBACK_ARGS_ILLEGAL; } } if (0 != jso_obj_get_int(obj, "channel", &chnIdx)) { return ERROR_PLAYBACK_ARGS_ILLEGAL; } if (0 != jso_obj_get_int(obj, "start_index", &start_index)) { return ERROR_PLAYBACK_ARGS_ILLEGAL; } if (0 != jso_obj_get_int(obj, "end_index", &end_index)) { return ERROR_PLAYBACK_ARGS_ILLEGAL; } if (0 != jso_obj_get_int(obj, "start_time", (int*)&start_time)) { return ERROR_PLAYBACK_ARGS_ILLEGAL; } if (0 != jso_obj_get_int(obj, "end_time", (int*)&end_time)) { return ERROR_PLAYBACK_ARGS_ILLEGAL; } if (end_time <= start_time) { STM_ERROR("given time is invalid."); return ERROR_PLAYBACK_TIME_INVALID; } /* 携带 player id */ if (is_carry_player_id) { int user_id = 0; client_seq = client_id_str_to_seq(player_id); if (0 == client_seq) { user_id = gen_client_id(0); client_seq = client_id_to_seq(user_id); if (0 != check_id(client_seq)) { STM_ERROR("invalid id[%d] or seq[%d]", user_id, client_seq); if (get_if_just_downloading()) { return ERROR_PLAYBACK_ID_OCCUPIED; } return ERROR_PLAYBACK_ID_REACHLIMIT; } set_client_id_str(user_id, player_id); } STM_INFO("player_id(%s) user_id(%d)", player_id, user_id); } /* 携带 user id */ else { client_seq = client_id_to_seq(cid); if (0 != check_id(client_seq)) { STM_ERROR("invalid id %u.", cid); return ERROR_PLAYBACK_ID_INVALID; } } client_info = get_client_by_id(client_seq); if (client_info == NULL) { STM_ERROR("invalid client info"); return ERROR_PLAYBACK_OUT_OF_RESOURCE; } client_info->task = TASK_PLAYBACK; if (start_index == 0) { is_first_page = 1; } #ifndef DUAL_ALGO_ENABLE int ret = stg_playback_load_playlist(client_seq, start_time, end_time, is_first_page); #else int ret = stg_playback_load_playlist(client_seq, start_time, end_time, is_first_page, chnIdx); #endif if (ret != OK) { return ret; } pthread_mutex_lock(&search_ev_mutex[client_seq - CLIENT_ID_MIN]); total_events = client_info->ev_list.evlist.event_cnt; result_index = 1; /* 构造json串 */ if (NULL == (json_playback_obj = jso_new_obj())) { goto error_exit; } if (NULL == (json_array = jso_new_array())) { goto error_exit; } jso_obj_add(ds_context->res_obj, "playback", json_playback_obj); jso_obj_add(json_playback_obj, "search_video_results", json_array); for (evIdx = start_index; evIdx <= end_index; evIdx++) { if (evIdx >= total_events || evIdx >= EVENT_OF_DAY_MAX) { break; } elem = &(client_info->ev_list.evlist.entries[evIdx]); result_channel = elem->channel_id; #ifdef DUAL_ALGO_ENABLE if ((elem->start_time < result_end_time) && (result_start_time < elem->end_time) && last_event_channel != (int)elem->channel_id) { /* 若两个事件索引的时间段有交错,且不为同一路侦测,则认为同属于一个视频片段,不新起一个json_sub_obj 仅增加start time后继续遍历 */ if (NULL == (event_time_obj = jso_new_obj())) { goto error_exit; } if (NULL == dual_event_time_obj) { /* 虽然正常执行dual_event_time_obj不会为空,但还是判断一下 */ continue; } jso_obj_add(dual_event_time_obj, (0 == result_channel) ? "1" : "2", event_time_obj); jso_add_int64(event_time_obj, "event_start_time", elem->start_time); event_time_obj = NULL; } else #endif { result_start_time = (elem->start_time <= start_time) ? start_time : elem->start_time; result_end_time = (elem->end_time >= end_time) ? end_time : elem->end_time; #ifdef DUAL_ALGO_ENABLE last_event_channel = (int)elem->channel_id; #endif if (NULL == (json_obj = jso_new_obj())) { goto error_exit; } if (NULL == (json_sub_obj = jso_new_obj())) { goto error_exit; } jso_array_add(json_array, json_obj); memset(sresult_chn, 0, 32); snprintf(sresult_chn, sizeof(sresult_chn), "%s_%d", "search_video_results", result_index); jso_obj_add(json_obj, sresult_chn, json_sub_obj); jso_add_int(json_sub_obj, "startTime", (int)result_start_time); jso_add_int(json_sub_obj, "endTime", (int)result_end_time); /* 为了适配类型从1开始 */ event_type_val = elem->type; event_type_bit = 0; while (event_type_val != 0) { event_type_bit++; event_type_val = event_type_val >> 0x01; } if(event_type_bit > 1){ event_type_bit--; } /* v6版本按照video_type返回,旧版本按照之前字段vedio_type返回 */ if (is_carry_player_id) { if(event_type_bit == 1) { jso_add_string(json_sub_obj, "video_type", "1"); } else { jso_add_string(json_sub_obj, "video_type", "2"); } } else { if(event_type_bit == 1) { jso_add_string(json_sub_obj, "vedio_type", "1"); } else { jso_add_string(json_sub_obj, "vedio_type", "2"); } } #ifdef DUAL_ALGO_ENABLE /* 若为多摄机型,则还需要增加每路的时间信息 */ if (NULL == (dual_event_time_obj = jso_new_obj())) { goto error_exit; } jso_obj_add(json_sub_obj, "chn_times", dual_event_time_obj); if (NULL == (event_time_obj = jso_new_obj())) { goto error_exit; } jso_obj_add(dual_event_time_obj, (0 == result_channel) ? "1" : "2", event_time_obj); jso_add_int64(event_time_obj, "event_start_time", result_start_time); event_time_obj = NULL; #endif result_index++; } } pthread_mutex_unlock(&search_ev_mutex[client_seq - CLIENT_ID_MIN]); if ((evIdx - 1) == end_index) { jso_add_int(json_playback_obj, "to_be_continued", 1); } else { jso_add_int(json_playback_obj, "to_be_continued", 0); } #ifdef SD_RECORD_FILTER_SUPPORT jso_add_bool(json_playback_obj, "filter_enable", TRUE); #ifdef DUAL_ALGO_ENABLE jso_add_bool(json_playback_obj, "filter_chn_enable", TRUE); #endif #endif STM_TRACE("%s", json_object_to_json_string(ds_context->res_obj)); STM_INFO("search events done."); return SLP_ENONE; error_exit: if (NULL != json_playback_obj) { jso_free_obj(json_playback_obj); } if (NULL != json_obj) { jso_free_obj(json_obj); } #ifdef DUAL_ALGO_ENABLE if (NULL != event_time_obj) { jso_free_obj(event_time_obj); } if (NULL != dual_event_time_obj) { jso_free_obj(dual_event_time_obj); } #endif pthread_mutex_unlock(&search_ev_mutex[client_seq - CLIENT_ID_MIN]); return SLP_ENOMEMORY; } #ifndef DUAL_ALGO_ENABLE int stg_playback_load_playlist(int client_seq, uint32_t start_time, uint32_t end_time, int is_first_page) #else int stg_playback_load_playlist(int client_seq, uint32_t start_time, uint32_t end_time, int is_first_page, int chn) #endif { long now_time = 0; int need_load = 0; PLAY_LIST *play_list = NULL; STM_SEARCH_PARAM search_param = {0}; PLAYBACK_CLIENT_INFO *client_info = NULL; client_info = get_client_by_id(client_seq); if (client_info == NULL) { STM_ERROR("invalid id %u.", client_seq); return ERROR_PLAYBACK_ID_INVALID; } now_time = get_sys_uptime(); play_list = &client_info->ev_list; pthread_mutex_lock(&search_ev_mutex[client_seq - CLIENT_ID_MIN]); #define EVENT_LIST_UPDATE_INTERVAL 7 //事件列表更新时间频率,单位:秒 /* 是否需要加载播放列表 */ if (play_list->search_finished == FALSE || play_list->search_start_time != start_time || play_list->search_end_time != end_time || (play_list->last_update_time + EVENT_LIST_UPDATE_INTERVAL < now_time && is_first_page)) { need_load = 1; } /* 需要加载索引 */ if (need_load) { search_param.disk_id = DISK_ID_ANY; search_param.start_time = start_time; search_param.end_time = end_time; #ifdef DUAL_ALGO_ENABLE search_param.channel = chn; #endif EVENT_BITMAP_ASSIGN(search_param.event_type, SEARCH_ANY_EVENT); if (playback_search_events(client_seq, play_list, &search_param, STORAGE_TYPE_VIDEO, TASK_PLAYBACK) == -1) { pthread_mutex_unlock(&search_ev_mutex[client_seq - CLIENT_ID_MIN]); STM_ERROR("playback_search_events fail."); return ERROR_PLAYBACK_SEARCH_FAILED; } play_list->search_start_time = start_time; play_list->search_end_time = end_time; play_list->search_finished = TRUE; play_list->last_update_time = now_time; } pthread_mutex_unlock(&search_ev_mutex[client_seq - CLIENT_ID_MIN]); if (need_load) { STM_INFO("clint_seq(%d) need_load(%s) search_finished(%d) time(%u)-(%u)", client_seq, need_load?"yes":"no", play_list->search_finished, start_time, end_time); } return OK; } int playback_search_events(U32 client_id, PLAY_LIST *play_list, STM_SEARCH_PARAM *search_param, int search_type, int task) { if (StgDisk_IsV1()) { return playback_search_events_v1(client_id, play_list, search_param, search_type, task); } int ret = playback_search_events_v2(play_list, search_param); return ret; } int playback_search_events_v2(PLAY_LIST *play_list, STM_SEARCH_PARAM *search_param) { int ret; StgSearchParams_S stg_param = {0}; StgSearchResult_S stg_result = {0}; if (playback_init_done == 0 || play_list == NULL) { return -1; } reset_event_list(play_list); stg_param.search_type = eSearchType_All; stg_param.max_nums = EVENT_OF_DAY_MAX; stg_param.start_time = search_param->start_time; stg_param.end_time = search_param->end_time; #ifdef DUAL_ALGO_ENABLE stg_param.channel = search_param->channel; #endif ret = StgMod_Search(&stg_param, &stg_result); if (ret != 0) { STM_ERROR("search fail!"); return -1; } if (stg_result.nums == 0 || stg_result.buf == NULL) { goto search_finish; } EVENT_LIST *ev_list = &play_list->evlist; ev_list->entries = (EVENT_ENTRY *)stg_result.buf; ev_list->total_event_cnt = stg_result.nums; ev_list->event_cnt = stg_result.nums; search_finish: play_list->search_finished = TRUE; STM_INFO("search_range(%u-%u) total(%d) det(%d) tim(%d)", stg_param.start_time, stg_param.end_time, stg_result.nums, stg_result.det_nums, stg_result.tim_nums); return 0; } int StgMod_Search(StgSearchParams_S *param, StgSearchResult_S *result) { int ret = 0; int need_reload = 0; if (param == NULL || result == NULL) { return -1; } if (param->start_time <= 0 || param->start_time >= param->end_time || (param->end_time - param->start_time >= SNAPSHOT_SEARCH_MAX_TIMESPAN)) { return -1; } //上锁 pthread_mutex_lock(&g_search_lock); if (!g_search_buf.is_init || param->start_time != g_search_buf.start_time || param->end_time != g_search_buf.end_time) { need_reload = 1; } uint32_t uptime_s = stg_sys_uptime_s(); if (g_search_buf.last_update_time + 10 < uptime_s && (param->search_type == eSearchType_All || (param->indx_start == 0 && param->search_type == eSearchType_Det))) { need_reload = 1; } if (need_reload) { StgDisk_RefcntAdd(); ret = StgSearchBuf_Load(param->start_time, param->end_time); StgDisk_RefcntDec(); if (ret != 0) { STM_ERROR("StgSearchBuf_Load fail"); goto exit; } } int event_nums = 0; int buf_size = 0; if (param->search_type == eSearchType_All) { event_nums = g_search_buf.total_nums; buf_size = event_nums * sizeof(EVENT_ENTRY); } else if (param->search_type == eSearchType_Tim) { event_nums = g_search_buf.tim_nums; buf_size = event_nums * sizeof(EventEntry_S); } else { event_nums = (param->indx_end - param->indx_start + 1); if (event_nums > g_search_buf.det_nums) { event_nums = g_search_buf.det_nums; } buf_size = event_nums * sizeof(EventEntry_S); } STM_INFO("search_type(%d) event_nums(%d) buf_size(%d)", param->search_type, event_nums, buf_size); memset(result, 0, sizeof(StgSearchResult_S)); if (event_nums == 0) { ret = 0; goto exit; } void *event_buf = malloc(buf_size); if (event_buf == NULL) { ret = -1; STM_ERROR("malloc fail"); goto exit; } memset(event_buf, 0, buf_size); result->total_nums = 0; result->det_nums = 0; result->nums = 0; int i; int idx = 0; int idx_start = 0; int result_nums = 0; EventEntry_S *ev_src; for (i = 0; i < g_search_buf.total_nums; i++) { idx = i; if (param->indx_order) { idx = g_search_buf.total_nums - i - 1; } if (idx >= g_search_buf.total_nums) { break; } ev_src = &g_search_buf.event_buf[idx]; #ifdef DUAL_ALGO_ENABLE /* 设备本地存储时,chn以0,1保存,但云端下发按照1,2下发,0表示全都要 所以此处判断非0 & 通道号是否不匹配*/ if ((0 != param->channel) && ((param->channel - 1) != ev_src->channel_id)) { continue; } #endif // 1.eSearchType_All if (param->search_type == eSearchType_All) { EVENT_ENTRY *ev_dst = (EVENT_ENTRY *)event_buf + result_nums; event_transform(ev_src, ev_dst); result_nums++; continue; } // 2.eSearchType_Tim if (param->search_type == eSearchType_Tim) { if (ev_src->event_flag == eEventType_Tim) { memcpy(event_buf + result_nums*sizeof(EventEntry_S), ev_src, sizeof(EventEntry_S)); result_nums++; } continue; } // 3.eSearchType_Det if (ev_src->event_flag == eEventType_Det) { if (idx_start >= param->indx_start) { // 录像事件转为侦测事件 memcpy(event_buf + result_nums*sizeof(EventEntry_S), ev_src, sizeof(EventEntry_S)); result_nums++; } idx_start++; if (idx_start > param->indx_end || result_nums >= event_nums) { break; } } } result->search_type = param->search_type; result->total_nums = g_search_buf.total_nums; result->tim_nums = g_search_buf.tim_nums; result->det_nums = g_search_buf.det_nums; result->max_nums = event_nums; result->nums = result_nums; if (result_nums == 0) { result->buf = NULL; free(event_buf); } else { result->buf = event_buf; } exit: //解锁 pthread_mutex_unlock(&g_search_lock); return ret; } static int StgSearchBuf_Load(uint32_t start_time, uint32_t end_time) { int ret; IndxMemSearch_S search; StgSearchBuf_Free(); #ifdef DUAL_ALGO_ENABLE /* 枪球机型每日的事件上限为 ((24*60*60) / 60) * 2 = 2880,故需要提高事件上限以免加载时丢失 */ if (IndxMemSearch_Init(&search, 2880) != 0) #else if (IndxMemSearch_Init(&search, 2048) != 0) #endif { STM_ERROR("init search buf fail!"); return -1; } ret = IndxMemSearch_Load(&g_mem_idx, &search, start_time, end_time); if (ret != 0) { STM_ERROR("Load search buf fail!"); goto exit; } if (search.nums == 0) { STM_INFO("search nums = 0!"); goto exit; } StgSearchBuf_S *sbuf = &g_search_buf; sbuf->event_buf = (EventEntry_S *)malloc(search.nums * sizeof(EventEntry_S)); if (sbuf->event_buf == NULL) { STM_ERROR("malloc fail"); ret = -1; goto exit; } int i; EventNode_S *head = search.head; for (i = 0; i < search.nums; i++) { if (head == NULL) { break; } EventEntry_S *ev = &sbuf->event_buf[sbuf->total_nums]; memcpy(ev, &head->event, sizeof(EventEntry_S)); sbuf->total_nums += 1; if (ev->event_flag) { sbuf->tim_nums += 1; } else { sbuf->det_nums += 1; } head = head->next; } sbuf->start_time = start_time; sbuf->end_time = end_time; sbuf->last_update_time = stg_sys_uptime_s(); sbuf->is_init = 1; STM_INFO("search range(%u-%u) total(%u) det(%u) tim(%u)", sbuf->start_time, sbuf->end_time, sbuf->total_nums, sbuf->det_nums, sbuf->tim_nums); exit: IndxMemSearch_DeInit(&search); return ret; } static int IndxMemSearch_Load(MemIdx_S *mem_idx, IndxMemSearch_S *search, uint32_t start_time, uint32_t end_time) { int ret; int i; int fd = -1; int file_id; MemFile_S mem_file; EventEntry_S *event_buf = NULL; if (mem_idx == NULL || search == NULL || start_time >= end_time) { return -1; } if (!mem_idx->is_valid || !search->is_init) { STM_ERROR("memory index is not init!"); return -1; } /* 读数据在另外线程执行,重新打开文件 */ for (i = 0; i < 2; i++) { fd = Sys01Index_Open(i, O_RDONLY); if (fd != -1) { break; } } if (fd == -1) { STM_ERROR("can't open fd!"); return -1; } event_buf = (EventEntry_S *)malloc(mem_idx->file_entrys * sizeof(EventEntry_S)); if (event_buf == NULL) { close(fd); STM_ERROR("malloc fail!"); return -1; } // 更新 search->nums = 0; search->start_time = start_time; search->end_time = end_time; for (file_id = 0; file_id < mem_idx->file_nums; file_id++) { memcpy(&mem_file, &mem_idx->mem_files[file_id], sizeof(MemFile_S)); if (!mem_file.is_used || mem_file.event_nums <= 0) { continue; } if (mem_file.event_nums > mem_idx->file_entrys) { STM_ERROR("file_id(%d) event nums(%d) more than %d", file_id, mem_file.event_nums, mem_idx->file_entrys); continue; } // 检索时间是否在范围内 if (mem_file.time_end <= start_time || mem_file.time_start >= end_time) { continue; } /* 加载文件事件 */ int file_pos = file_id * mem_idx->file_entrys; ret = Sys01Index_Read(fd, file_pos, mem_file.event_nums, event_buf); if (ret != 0) { STM_ERROR("read file_id(%d) fail!", file_id); break; } // 处理读取的数据 for (i = 0; i < mem_file.event_nums; i++) { EventEntry_S *event = &event_buf[i]; // 检查事件时间是否在范围内 if (event->end_time <= start_time || event->start_time >= end_time) { continue; } IndxMemSearch_Add(search, event); } } if (fd != -1) { close(fd); } if (event_buf) { free(event_buf); } STM_INFO("search range(%u -> %u) total_events(%d)", search->start_time, search->end_time, search->nums); return 0; } 分析一下上面代码的功能,给出流程图
09-30
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值