保存数据到文件(二)locals(),try...finally save data to file python

本文介绍了Python中如何通过try...finally结构处理文件操作中的异常,确保文件被正确关闭,同时展示了如何利用局部变量检查避免NameError,并妥善处理IOError。

如果在程序运行中,遇到异常,则try之后的子句不再运行,若是以当前的程序为例:


如若红色框内的语句发生了错误,则下面的子句即绿色框内的语句就不再执行,直接跳到except语句

如此可以的异常是,文件打开输入内容之后不再关闭,如何来解决这个问题呢?下面引入了finally:

try..finally的用法是:

1. 先执行try下的语句,如若try的语句没有异常,则执行完try语句之后,再执行finally语句。

2. 在执行try下的语句时,如若try下的子句有异常,则抛出这个异常,然后执行finally语句

也就是说无论有无异常,finally语句都是必须执行的。


如若把close的语句放到finally下,则会减少文件损坏的可能,能保证数据完全输出。


但是如果man_file或者是other_file没有赋值成功,亦或许man_data或者other_data的文本文件不存在,则会出现错误信息:

trackback为:name 'man_file' or 'other_file' is not defined.

如何避免掉这个错误呢?我们可以加入逻辑性判断:if 'man_file' in locals():

那么local()函数是什么呢?Python 有两个内置的函数,locals 和 globals,locals提供了基于 dictionary 的访问局部变量的方式; globals 提供了基于 dictionary 的访问全局变量的方式。为什么说是基于dictionary呢?因为变量是按dictionary的形式存储在名字空间的。dictinary的键字(key)就是变量名,dictionary的值(value)就是变量的值。


当代码要使用变量 man_file 的值时,Python会到所有可用的名字空间去查找变量:

第一步:局部名字空间 ——特指当前函数或类的方法。如果函数定义了一个局部变量 man_file,python使用这个变量,然后停止搜索。
第二步:全局名字空间——特指当前的模块。如果模块定义了一个名为man_file的变量,函数或类,python将使用这个变量然后停止搜索。
第三步:内置名字空间——对每个模块都是全局的。作为最后的尝试,python将假设man_file是内置函数或变量。

如果Python在这些名字空间找不到 man_file,它将放弃查找并引发一个 NameError 的异常,同时传递 "There is no variable named 'x' "这样的一个traceback。

所以程序改变为:


但是在输出File Error的时候并没有给出确切的异常信息,下面做一些小的改动就可以避免此问题:

except IOError as err:

           print('File Error: ' + err)

用err来作为输出异常信息的一部分。

但是这句话存在异常信息: TypeError: Can't convert 'IOError' object to str implicitly.

也就是说异常对象和字符串不能兼容,那么我们就要调用str()来完成转换:

except IOError as err:

           print('File Error: ' + str(err) )

用str()来强迫异常对象变成字符串类型。

最后输出的异常回馈,我们可以看出确切的异常信息,如下:





import os import pandas as pd import tkinter as tk from tkinter import ttk, filedialog, scrolledtext, messagebox from tkinter.colorchooser import askcolor from difflib import SequenceMatcher import re import openpyxl import threading import numpy as np from openpyxl.utils import get_column_letter import xlrd import gc import hashlib import json import tempfile from concurrent.futures import ThreadPoolExecutor, as_completed import unicodedata class EnhancedSignalComparator: def __init__(self, root): self.root = root self.root.title("增强版信号功能对比工具") self.root.geometry("1200x800") self.root.configure(bg="#f0f0f0") # 初始化变量 self.folder_path = tk.StringVar() self.search_text = tk.StringVar() self.files = [] self.results = {} # 存储信号对比结果 self.highlight_color = "#FFD700" # 默认高亮色 self.search_running = False self.stop_requested = False self.cache_dir = os.path.join(tempfile.gettempdir(), "excel_cache") self.file_cache = {} # 文件缓存 self.column_cache = {} # 列名缓存 self.max_workers = 4 # 最大并发线程数 # 创建缓存目录 os.makedirs(self.cache_dir, exist_ok=True) # 创建界面 self.create_widgets() def create_widgets(self): # 顶部控制面板 control_frame = ttk.Frame(self.root, padding=10) control_frame.pack(fill=tk.X) # 文件夹选择 ttk.Label(control_frame, text="选择文件夹:").grid(row=0, column=0, sticky=tk.W) folder_entry = ttk.Entry(control_frame, textvariable=self.folder_path, width=50) folder_entry.grid(row=0, column=1, padx=5, sticky=tk.EW) ttk.Button(control_frame, text="浏览...", command=self.browse_folder).grid(row=0, column=2) # 搜索输入 ttk.Label(control_frame, text="搜索信号:").grid(row=1, column=0, sticky=tk.W, pady=(10,0)) search_entry = ttk.Entry(control_frame, textvariable=self.search_text, width=50) search_entry.grid(row=1, column=1, padx=5, pady=(10,0), sticky=tk.EW) search_entry.bind("<Return>", lambda event: self.start_search_thread()) ttk.Button(control_frame, text="搜索", command=self.start_search_thread).grid(row=1, column=2, pady=(10,0)) ttk.Button(control_frame, text="停止", command=self.stop_search).grid(row=1, column=3, pady=(10,0), padx=5) # 高级选项 ttk.Label(control_frame, text="并发线程:").grid(row=2, column=0, sticky=tk.W, pady=(10,0)) self.thread_var = tk.StringVar(value="4") ttk.Combobox(control_frame, textvariable=self.thread_var, values=["1", "2", "4", "8"], width=5).grid(row=2, column=1, sticky=tk.W, padx=5, pady=(10,0)) # 文件过滤 ttk.Label(control_frame, text="文件过滤:").grid(row=2, column=2, sticky=tk.W, pady=(10,0)) self.filter_var = tk.StringVar(value="*.xlsx;*.xlsm;*.xls") ttk.Entry(control_frame, textvariable=self.filter_var, width=20).grid(row=2, column=3, sticky=tk.W, padx=5, pady=(10,0)) # 高亮颜色选择 ttk.Label(control_frame, text="高亮颜色:").grid(row=3, column=0, sticky=tk.W, pady=(10,0)) self.color_btn = tk.Button(control_frame, bg=self.highlight_color, width=3, command=self.choose_color) self.color_btn.grid(row=3, column=1, sticky=tk.W, padx=5, pady=(10,0)) # 进度条 self.progress = ttk.Progressbar(control_frame, orient="horizontal", length=200, mode="determinate") self.progress.grid(row=3, column=2, columnspan=2, sticky=tk.EW, padx=5, pady=(10,0)) # 结果标签 self.result_label = ttk.Label(control_frame, text="") self.result_label.grid(row=3, column=4, sticky=tk.W, padx=5, pady=(10,0)) # 对比面板 notebook = ttk.Notebook(self.root) notebook.pack(fill=tk.BOTH, expand=True, padx=10, pady=10) # 表格视图 self.table_frame = ttk.Frame(notebook) notebook.add(self.table_frame, text="表格视图") # 文本对比视图 self.text_frame = ttk.Frame(notebook) notebook.add(self.text_frame, text="行内容对比") # 状态栏 self.status_var = tk.StringVar() status_bar = ttk.Label(self.root, textvariable=self.status_var, relief=tk.SUNKEN, anchor=tk.W) status_bar.pack(side=tk.BOTTOM, fill=tk.X) # 初始化表格和文本区域 self.init_table_view() self.init_text_view() def init_table_view(self): """初始化表格视图""" # 创建树状表格 columns = ("信号", "文件", "行内容摘要") self.tree = ttk.Treeview(self.table_frame, columns=columns, show="headings") # 设置列标题 for col in columns: self.tree.heading(col, text=col) self.tree.column(col, width=200, anchor=tk.W) # 添加滚动条 scrollbar = ttk.Scrollbar(self.table_frame, orient=tk.VERTICAL, command=self.tree.yview) self.tree.configure(yscrollcommand=scrollbar.set) self.tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) # 绑定选择事件 self.tree.bind("<<TreeviewSelect>>", self.on_table_select) def init_text_view(self): """初始化文本对比视图""" self.text_panes = {} self.text_frame.columnconfigure(0, weight=1) self.text_frame.rowconfigure(0, weight=1) # 创建对比容器 self.compare_container = ttk.Frame(self.text_frame) self.compare_container.grid(row=0, column=0, sticky="nsew", padx=5, pady=5) # 添加差异高亮按钮 btn_frame = ttk.Frame(self.text_frame) btn_frame.grid(row=1, column=0, sticky="ew", padx=5, pady=5) ttk.Button(btn_frame, text="高亮显示差异", command=self.highlight_differences).pack(side=tk.LEFT) ttk.Button(btn_frame, text="导出差异报告", command=self.export_report).pack(side=tk.LEFT, padx=5) ttk.Button(btn_frame, text="清除缓存", command=self.clear_cache).pack(side=tk.LEFT, padx=5) ttk.Button(btn_frame, text="手动指定列名", command=self.manual_column_select).pack(side=tk.LEFT, padx=5) def browse_folder(self): """选择文件夹""" folder = filedialog.askdirectory(title="选择包含Excel文件文件夹") if folder: self.folder_path.set(folder) self.load_files() def load_files(self): """加载文件夹中的Excel文件(优化特殊字符处理)""" folder = self.folder_path.get() if not folder or not os.path.isdir(folder): return # 获取文件过滤模式 filter_patterns = self.filter_var.get().split(';') self.files = [] for file in os.listdir(folder): file_path = os.path.join(folder, file) # 跳过临时文件 if file.startswith('~$'): continue # 检查文件扩展名 file_lower = file.lower() matched = False for pattern in filter_patterns: # 移除通配符并转换为小写 ext = pattern.replace('*', '').lower() if file_lower.endswith(ext): matched = True break if matched: # 规范化文件名处理特殊字符 normalized_path = self.normalize_file_path(file_path) if normalized_path and os.path.isfile(normalized_path): self.files.append(normalized_path) self.status_var.set(f"找到 {len(self.files)} 个Excel文件") def normalize_file_path(self, path): """规范化文件路径,处理特殊字符""" try: # 尝试直接访问文件 if os.path.exists(path): return path # 尝试Unicode规范化 normalized = unicodedata.normalize('NFC', path) if os.path.exists(normalized): return normalized # 尝试不同编码方案 encodings = ['utf-8', 'shift_jis', 'euc-jp', 'cp932'] for encoding in encodings: try: decoded = path.encode('latin1').decode(encoding) if os.path.exists(decoded): return decoded except: continue # 最终尝试原始路径 return path except Exception as e: self.status_var.set(f"文件路径处理错误: {str(e)}") return path def get_file_hash(self, file_path): """计算文件哈希值用于缓存""" try: hash_md5 = hashlib.md5() with open(file_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() except Exception as e: self.status_var.set(f"计算文件哈希失败: {str(e)}") return str(os.path.getmtime(file_path)) def get_cache_filename(self, file_path): """获取缓存文件名""" file_hash = self.get_file_hash(file_path) return os.path.join(self.cache_dir, f"{os.path.basename(file_path)}_{file_hash}.cache") def load_header_cache(self, file_path): """加载列名缓存""" cache_file = self.get_cache_filename(file_path) if os.path.exists(cache_file): try: with open(cache_file, "r", encoding='utf-8') as f: return json.load(f) except: return None return None def save_header_cache(self, file_path, header_info): """保存列名缓存""" cache_file = self.get_cache_filename(file_path) try: with open(cache_file, "w", encoding='utf-8') as f: json.dump(header_info, f) return True except: return False def find_header_row(self, file_path): """查找列名行(增强版)""" # 检查缓存 cache = self.load_header_cache(file_path) if cache: return cache.get("header_row"), cache.get("signal_col") # 没有缓存则重新查找 if file_path.lower().endswith((".xlsx", ".xlsm")): return self.find_header_row_openpyxl(file_path) elif file_path.lower().endswith(".xls"): return self.find_header_row_xlrd(file_path) return None, None def find_header_row_openpyxl(self, file_path): """使用openpyxl查找列名行(增强版)""" try: wb = openpyxl.load_workbook(file_path, read_only=True, data_only=True) ws = wb.active # 尝试多种列名匹配模式 patterns = [ r'データ名', # 半角片假名 r'データ名', # 全角片假名 r'信号名', # 中文 r'Signal Name', # 英文 r'Data Name', r'信号名称', r'データ名称' ] # 扩大搜索范围:前100行和前100列 for row_idx in range(1, 101): # 1-100行 # 扩大列搜索范围到100列 for col_idx in range(1, 101): # 1-100列 try: cell = ws.cell(row=row_idx, column=col_idx) cell_value = cell.value if not cell_value: continue # 尝试所有匹配模式 cell_str = str(cell_value) for pattern in patterns: if re.search(pattern, cell_str, re.IGNORECASE): # 找到列名行后,尝试确定信号列 signal_col = None # 在同行中查找信号列 for col_idx2 in range(1, 101): # 1-100列 try: cell2 = ws.cell(row=row_idx, column=col_idx2) cell2_value = cell2.value if not cell2_value: continue cell2_str = str(cell2_value) if re.search(pattern, cell2_str, re.IGNORECASE): signal_col = col_idx2 break except: continue # 保存缓存 if signal_col is not None: header_info = {"header_row": row_idx, "signal_col": signal_col} self.save_header_cache(file_path, header_info) wb.close() return row_idx, signal_col except: continue wb.close() except Exception as e: self.status_var.set(f"查找列名行出错: {str(e)}") return None, None def find_header_row_xlrd(self, file_path): """使用xlrd查找列名行(增强版)""" try: wb = xlrd.open_workbook(file_path) ws = wb.sheet_by_index(0) # 尝试多种列名匹配模式 patterns = [ r'データ名', # 半角片假名 r'データ名', # 全角片假名 r'信号名', # 中文 r'Signal Name', # 英文 r'Data Name', r'信号名称', r'データ名称' ] # 扩大搜索范围:前100行和前100列 for row_idx in range(0, 100): # 0-99行 # 扩大列搜索范围到100列 for col_idx in range(0, 100): # 0-99列 try: cell_value = ws.cell_value(row_idx, col_idx) if not cell_value: continue # 尝试所有匹配模式 cell_str = str(cell_value) for pattern in patterns: if re.search(pattern, cell_str, re.IGNORECASE): # 找到列名行后,尝试确定信号列 signal_col = None # 在同行中查找信号列 for col_idx2 in range(0, 100): # 0-99列 try: cell2_value = ws.cell_value(row_idx, col_idx2) if not cell2_value: continue cell2_str = str(cell2_value) if re.search(pattern, cell2_str, re.IGNORECASE): signal_col = col_idx2 break except: continue # 保存缓存 if signal_col is not None: header_info = {"header_row": row_idx, "signal_col": signal_col} self.save_header_cache(file_path, header_info) return row_idx, signal_col except: continue except Exception as e: self.status_var.set(f"查找列名行出错: {str(e)}") return None, None def extract_row_content(self, ws, row_idx, header_row, max_cols=100): """高效提取行内容(最多到100列)""" content = [] # 扩展到100列 for col_idx in range(1, max_cols + 1): try: cell = ws.cell(row=row_idx, column=col_idx) if cell.value is not None and str(cell.value).strip() != '': # 使用列名缓存 col_key = f"{header_row}-{col_idx}" if col_key in self.column_cache: col_name = self.column_cache[col_key] else: col_name_cell = ws.cell(row=header_row, column=col_idx) col_name = col_name_cell.value if col_name_cell.value else f"列{get_column_letter(col_idx)}" self.column_cache[col_key] = col_name content.append(f"{col_name}: {str(cell.value).strip()}") except: continue return "\n".join(content) def start_search_thread(self): """启动搜索线程""" if self.search_running: return self.search_running = True self.stop_requested = False self.max_workers = int(self.thread_var.get()) threading.Thread(target=self.search_files, daemon=True).start() def stop_search(self): """停止搜索""" self.stop_requested = True self.status_var.set("正在停止搜索...") def search_files(self): """在文件中搜索内容(优化特殊文件处理)""" search_term = self.search_text.get().strip() if not search_term: self.status_var.set("请输入搜索内容") self.search_running = False return if not self.files: self.status_var.set("请先选择文件夹") self.search_running = False return # 重置结果和UI self.results = {} for item in self.tree.get_children(): self.tree.delete(item) total_files = len(self.files) processed_files = 0 found_signals = 0 # 使用线程池处理文件 # 在search_files方法中添加详细进度 with ThreadPoolExecutor(max_workers=self.max_workers) as executor: futures = {} for i, file_path in enumerate(self.files): if self.stop_requested: break future = executor.submit(self.process_file, file_path, search_term) futures[future] = (file_path, i) # 保存文件索引 for future in as_completed(futures): if self.stop_requested: break file_path, idx = futures[future] try: found = future.result() found_signals += found processed_files += 1 # 更详细的进度反馈 progress = int(processed_files / total_files * 100) self.progress["value"] = progress self.status_var.set( f"已处理 {processed_files}/{total_files} 个文件 | " f"当前: {os.path.basename(file_path)} | " f"找到: {found_signals} 个匹配" ) self.root.update_idletasks() except Exception as e: self.status_var.set(f"处理文件 {os.path.basename(file_path)} 出错: {str(e)}") # 更新结果 if self.stop_requested: self.status_var.set(f"搜索已停止,已处理 {processed_files}/{total_files} 个文件") elif found_signals == 0: self.status_var.set(f"未找到包含 '{search_term}' 的信号") else: self.status_var.set(f"找到 {len(self.results)} 个匹配信号,共 {found_signals} 处匹配") self.update_text_view() self.progress["value"] = 0 self.search_running = False gc.collect() # 强制垃圾回收释放内存 def process_file(self, file_path, search_term): """处理单个文件(增强异常处理)""" found = 0 try: # 获取列名行和信号列 header_row, signal_col = self.find_header_row(file_path) # 如果自动查找失败,尝试手动模式 if header_row is None or signal_col is None: self.status_var.set(f"文件 {os.path.basename(file_path)} 未找到列名行,尝试手动查找...") header_row, signal_col = self.manual_find_header_row(file_path) if header_row is None or signal_col is None: self.status_var.set(f"文件 {os.path.basename(file_path)} 无法确定列名行,已跳过") return found # 使用pandas处理所有Excel文件类型 found = self.process_file_with_pandas(file_path, search_term, header_row, signal_col) except Exception as e: self.status_var.set(f"处理文件 {os.path.basename(file_path)} 出错: {str(e)}") return found def manual_find_header_row(self, file_path): """手动查找列名行(当自动查找失败时使用)""" try: # 尝试打开文件 if file_path.lower().endswith((".xlsx", ".xlsm")): wb = openpyxl.load_workbook(file_path, read_only=True, data_only=True) ws = wb.active # 扫描整个工作表(最多1000行) for row_idx in range(1, 1001): for col_idx in range(1, 101): try: cell = ws.cell(row=row_idx, column=col_idx) if cell.value and "データ" in str(cell.value): # 找到可能的列名行 return row_idx, col_idx except: continue wb.close() elif file_path.lower().endswith(".xls"): wb = xlrd.open_workbook(file_path) ws = wb.sheet_by_index(0) # 扫描整个工作表(最多1000行) for row_idx in range(0, 1000): for col_idx in range(0, 100): try: cell_value = ws.cell_value(row_idx, col_idx) if cell_value and "データ" in str(cell_value): # 找到可能的列名行 return row_idx, col_idx except: continue except: pass return None, None def get_file_cache_key(self, file_path, header_row, signal_col): """生成唯一的文件缓存键""" file_hash = self.get_file_hash(file_path) return f"{file_hash}_{header_row}_{signal_col}" def process_file_with_pandas(self, file_path, search_term, header_row, signal_col): """使用pandas高效处理Excel文件(优化版)""" found = 0 try: # 使用pandas读取Excel文件 file_ext = os.path.splitext(file_path)[1].lower() engine = 'openpyxl' if file_ext in ['.xlsx', '.xlsm'] else 'xlrd' # 动态确定要读取的列范围(前后10列) start_col = max(1, signal_col - 10) end_col = signal_col + 10 # 读取数据 df = pd.read_excel( file_path, engine=engine, header=header_row-1, usecols=range(start_col-1, end_col), dtype=str ) # 获取信号列名称 signal_col_idx = signal_col - start_col if signal_col_idx < len(df.columns): signal_col_name = df.columns[signal_col_idx] else: self.status_var.set(f"文件 {os.path.basename(file_path)}: 信号列超出范围") return 0 # 搜索匹配的信号 if signal_col_name in df.columns: matches = df[df[signal_col_name].str.contains(search_term, case=False, na=False)] else: self.status_var.set(f"文件 {os.path.basename(file_path)}: 未找到信号列 '{signal_col_name}'") return 0 # 处理匹配行 short_name = os.path.basename(file_path) for idx, row in matches.iterrows(): # 只显示有值的列 row_content = [] for col_name, value in row.items(): # 跳过空值 if pd.notna(value) and str(value).strip() != '': row_content.append(f"{col_name}: {str(value).strip()}") row_content = "\n".join(row_content) signal_value = row[signal_col_name] # 使用复合键确保唯一性 signal_key = f"{signal_value}||{short_name}" # 添加到结果集 self.results[signal_key] = { "signal": signal_value, "file": short_name, "content": row_content } # 添加到表格 summary = row_content[:50] + "..." if len(row_content) > 50 else row_content self.tree.insert("", tk.END, values=(signal_value, short_name, summary)) found += 1 # 每处理10行更新一次UI if found % 10 == 0: self.status_var.set(f"处理 {short_name}: 找到 {found} 个匹配") self.root.update_idletasks() except Exception as e: self.status_var.set(f"处理文件 {os.path.basename(file_path)} 出错: {str(e)}") finally: # 显式释放内存 if 'df' in locals(): del df if 'matches' in locals(): del matches gc.collect() return found def extract_xlrd_row_content(self, ws, row_idx, header_row): """为xls文件高效提取行内容""" content = [] try: row_values = ws.row_values(row_idx) except: return "" # 扩展到100列 for col_idx in range(min(len(row_values), 100)): try: cell_value = row_values[col_idx] if cell_value is not None and str(cell_value).strip() != '': # 使用列名缓存 col_key = f"{header_row}-{col_idx}" if col_key in self.column_cache: col_name = self.column_cache[col_key] else: try: col_name = ws.cell_value(header_row, col_idx) if not col_name: col_name = f"列{col_idx+1}" except: col_name = f"列{col_idx+1}" self.column_cache[col_key] = col_name content.append(f"{col_name}: {str(cell_value).strip()}") except: continue return "\n".join(content) def update_text_view(self): """更新文本对比视图""" # 清除现有文本区域 for widget in self.compare_container.winfo_children(): widget.destroy() if not self.results: return # 获取第一个信号作为默认显示 first_signal_key = next(iter(self.results.keys())) self.display_signal_comparison(first_signal_key) def on_table_select(self, event): """表格选择事件处理""" selected = self.tree.selection() if not selected: return item = self.tree.item(selected[0]) signal_value = item["values"][0] file_name = item["values"][1] # 使用复合键 signal_key = f"{signal_value}||{file_name}" self.display_signal_comparison(signal_key) def display_signal_comparison(self, signal_key): """显示指定信号的对比""" # 清除现有文本区域 for widget in self.compare_container.winfo_children(): widget.destroy() if signal_key not in self.results: return signal_data = self.results[signal_key] # 创建列框架 col_frame = ttk.Frame(self.compare_container) col_frame.grid(row=0, column=0, sticky="nsew", padx=5, pady=5) self.compare_container.columnconfigure(0, weight=1) # 文件名标签 file_label = ttk.Label(col_frame, text=signal_data["file"], font=("Arial", 10, "bold")) file_label.pack(fill=tk.X, pady=(0, 5)) # 信号名标签 signal_label = ttk.Label(col_frame, text=signal_data["signal"], font=("Arial", 9, "italic")) signal_label.pack(fill=tk.X, pady=(0, 5)) # 文本区域 text_area = scrolledtext.ScrolledText(col_frame, wrap=tk.WORD, width=60, height=20) text_area.insert(tk.INSERT, signal_data["content"]) text_area.configure(state="disabled") text_area.pack(fill=tk.BOTH, expand=True) # 保存引用 self.text_panes[signal_key] = text_area def highlight_differences(self): """高亮显示文本差异""" if not self.text_panes: return # 获取所有行内容 all_contents = [] for text_area in self.text_panes.values(): text_area.configure(state="normal") text = text_area.get("1.0", tk.END).strip() text_area.configure(state="disabled") all_contents.append(text) # 如果所有内容相同,则不需要高亮 if len(set(all_contents)) == 1: self.status_var.set("所有文件行内容完全一致") return # 使用第一个文件作为基准 base_text = all_contents[0] # 对比并高亮差异 for i, (file, text_area) in enumerate(self.text_panes.items()): if i == 0: # 基准文件不需要处理 continue text_area.configure(state="normal") text_area.tag_configure("diff", background=self.highlight_color) # 清除之前的高亮 text_area.tag_remove("diff", "1.0", tk.END) # 获取当前文本 compare_text = text_area.get("1.0", tk.END).strip() # 使用序列匹配器查找差异 s = SequenceMatcher(None, base_text, compare_text) # 高亮差异部分 for tag in s.get_opcodes(): opcode = tag[0] start = tag[3] end = tag[4] if opcode != "equal": # 添加高亮标签 text_area.tag_add("diff", f"1.0+{start}c", f"1.0+{end}c") text_area.configure(state="disabled") self.status_var.set("差异已高亮显示") def choose_color(self): """选择高亮颜色""" color = askcolor(title="选择高亮颜色", initialcolor=self.highlight_color) if color[1]: self.highlight_color = color[1] self.color_btn.configure(bg=self.highlight_color) def export_report(self): """导出差异报告""" if not self.results: messagebox.showwarning("警告", "没有可导出的结果") return try: # 创建报告数据结构 report_data = [] for signal, files_data in self.results.items(): for file, content in files_data.items(): report_data.append({ "信号": signal, "文件": file, "行内容": content }) # 转换为DataFrame df = pd.DataFrame(report_data) # 保存到Excel save_path = filedialog.asksaveasfilename( defaultextension=".xlsx", filetypes=[("Excel文件", "*.xlsx")], title="保存差异报告" ) if save_path: df.to_excel(save_path, index=False) self.status_var.set(f"报告已保存到: {save_path}") except Exception as e: messagebox.showerror("错误", f"导出报告失败: {str(e)}") def clear_cache(self): """清除缓存""" try: for file in os.listdir(self.cache_dir): if file.endswith(".cache"): os.remove(os.path.join(self.cache_dir, file)) self.file_cache = {} self.column_cache = {} self.status_var.set("缓存已清除") except Exception as e: self.status_var.set(f"清除缓存失败: {str(e)}") def manual_column_select(self): """手动指定列名位置""" if not self.files: messagebox.showinfo("提示", "请先选择文件夹") return # 创建手动选择窗口 manual_window = tk.Toplevel(self.root) manual_window.title("手动指定列名位置") manual_window.geometry("400x300") # 文件选择 ttk.Label(manual_window, text="选择文件:").pack(pady=(10, 5)) file_var = tk.StringVar() file_combo = ttk.Combobox(manual_window, textvariable=file_var, values=[os.path.basename(f) for f in self.files]) file_combo.pack(fill=tk.X, padx=20, pady=5) file_combo.current(0) # 行号输入 ttk.Label(manual_window, text="列名行号:").pack(pady=(10, 5)) row_var = tk.StringVar(value="1") row_entry = ttk.Entry(manual_window, textvariable=row_var) row_entry.pack(fill=tk.X, padx=20, pady=5) # 列号输入 ttk.Label(manual_window, text="信号列号:").pack(pady=(10, 5)) col_var = tk.StringVar(value="1") col_entry = ttk.Entry(manual_window, textvariable=col_var) col_entry.pack(fill=tk.X, padx=20, pady=5) # 确认按钮 def confirm_selection(): try: file_idx = file_combo.current() file_path = self.files[file_idx] header_row = int(row_var.get()) signal_col = int(col_var.get()) # 保存到缓存 header_info = {"header_row": header_row, "signal_col": signal_col} self.save_header_cache(file_path, header_info) messagebox.showinfo("成功", f"已为 {os.path.basename(file_path)} 设置列名位置:行{header_row} 列{signal_col}") manual_window.destroy() except Exception as e: messagebox.showerror("错误", f"无效输入: {str(e)}") ttk.Button(manual_window, text="确认", command=confirm_selection).pack(pady=20) if __name__ == "__main__": root = tk.Tk() app = EnhancedSignalComparator(root) root.mainloop() 1、扩大搜索范围之后,有一个文件中的信号值反而找不到了,但是信号值搜索范围在5时,能够找到,搜索范围在10时,找不到 2、行内容对比区域,现在只能显示一个文件的内容? 3、表格视图区域,除显示信号值之外,好像还搜索到了其他内容,行内容摘要显示有データ还有一行是DA: R? 4、目前来看显示的内容还是错的,信号值所在行,有内容的单元格对应的列名是错的 5、搜索到的两个文件【ドラフト版】D01D-00-02(HEV車).xlsm与【ドラフト版】D01D-00-03(コンベ車).xlsx显示的内容一致
07-24
#!/usr/bin/env python3 import os import pandas as pd from glob import glob import sys import argparse from openpyxl.styles import Alignment from openpyxl import load_workbook from pathlib import Path import shutil import tempfile # 常见编码列表 COMMON_ENCODINGS = ['utf-8', 'latin1', 'gbk', 'iso-8859-1', 'latin1', 'cp1252','gb2312'] #COMMON_ENCODINGS = [‘utf-8’, ‘latin1’, ‘iso-8859-1’, ‘cp1252’, ‘gbk’, ‘gb2312’, ‘big5’] def detect_encoding(file_path): """静默检测文件编码""" for encoding in COMMON_ENCODINGS: try: with open(file_path, 'r', encoding=encoding) as f: f.read(1024) # 尝试读取前1KB内容 return encoding except: continue return 'utf-8' # 默认使用utf-8 def apply_left_alignment(output_file): """应用左对齐样式到所有单元格""" try: wb = load_workbook(output_file) ws = wb.active # 创建左对齐样式 left_align = Alignment(horizontal='left') # 应用到所有单元格 for row in ws.iter_rows(): for cell in row: cell.alignment = left_align wb.save(output_file) return True except Exception as e: print(f"应用左对齐样式时出错: {str(e)}", file=sys.stderr) return False def clean_file_content(file_path): """清理文件内容:执行字节替换操作""" try: with open(file_path, 'rb') as file: content = file.read() # 执行字节替换操作 cleaned_content = content.replace(b'\x01', b'\r').replace(b'\x00', b' ').replace(b'\x0A', b' ') with open(file_path, 'wb') as file: file.write(cleaned_content) return True except Exception as e: print(f"清理文件 {file_path} 时出错: {str(e)}", file=sys.stderr) return False def read_txt_file(file_path, sep): """读取单个文本文件,不处理标题行""" encoding = detect_encoding(file_path) try: return pd.read_csv( file_path, sep=sep, encoding=encoding, engine='python', header=None, # 不将第一行作为标题 dtype=str, # 所有数据作为字符串处理 keep_default_na=False, # 不将空值转换为NaN on_bad_lines='skip' # 跳过格式错误的行 ) except Exception as e: print(f"读取文件 {file_path} 时出错: {str(e)}", file=sys.stderr) return None def merge_txt_files(input_dir, output_file, sep='\t', recursive=False, clean_files=False): """ 合并目录下所有文本文件到单个Excel文件 :param input_dir: 输入目录路径 :param output_file: 输出Excel文件路径 :param sep: 文本文件分隔符,默认为制表符 :param recursive: 是否递归搜索子目录 :param clean_files: 是否在合并前执行字节替换清理 """ # 获取所有文本文件 pattern = os.path.join(input_dir, '**', '*.txt') if recursive \ else os.path.join(input_dir, '*.txt') txt_files = glob(pattern, recursive=recursive) if not txt_files: print(f"在 {input_dir} 中未找到任何.txt文件", file=sys.stderr) return False # 如果需要清理文件,创建临时目录处理 if clean_files: temp_dir = tempfile.mkdtemp() print(f"创建临时目录: {temp_dir}") # 复制文件到临时目录并清理 for file_path in txt_files: temp_path = os.path.join(temp_dir, os.path.basename(file_path)) shutil.copy2(file_path, temp_path) if not clean_file_content(temp_path): print(f"清理失败: {os.path.basename(file_path)}", file=sys.stderr) continue print(f"已清理: {os.path.basename(file_path)}") # 使用临时目录中的文件 input_dir = temp_dir pattern = os.path.join(temp_dir, '*.txt') txt_files = glob(pattern, recursive=False) all_data = [] for file_path in txt_files: df = read_txt_file(file_path, sep) if df is not None and not df.empty: all_data.append(df) print(f"已处理: {os.path.basename(file_path)}") if not all_data: print("所有文件均为空或无法读取", file=sys.stderr) return False try: # 合并所有数据 combined_df = pd.concat(all_data, ignore_index=True) # 写入Excel文件 combined_df.to_excel(output_file, sheet_name='合并数据', index=False, header=False) print(f"已创建Excel文件: {output_file}") # 应用左对齐样式 if apply_left_alignment(output_file): return True return False except Exception as e: print(f"合并或写入文件时出错: {str(e)}", file=sys.stderr) return False finally: # 清理临时目录 if clean_files and 'temp_dir' in locals(): shutil.rmtree(temp_dir) print(f"已删除临时目录: {temp_dir}") if __name__ == "__main__": parser = argparse.ArgumentParser( description='合并多个文本文件到单个Excel文件,支持字节替换清理', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('-i', '--input', required=True, help='包含.txt文件的输入目录路径') parser.add_argument('-o', '--output', default='合并数据.xlsx', help='输出Excel文件路径') parser.add_argument('-s', '--sep', default='\t', help='文本文件中的分隔符,如",", ";", "\\t"等') parser.add_argument('-r', '--recursive', action='store_true', help='递归搜索子目录中的文件') parser.add_argument('-c', '--clean', action='store_true', help='在合并前执行字节替换清理(替换 \\x01, \\x00, \\x0A)') args = parser.parse_args() # 确保输出目录存在 output_dir = os.path.dirname(args.output) if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) success = merge_txt_files( args.input, args.output, args.sep, args.recursive, args.clean ) sys.exit(0 if success else 1) 智能纠错:修改读取文件格式错误的问题
07-17
<think>我们注意到问题:爬取第1页就显示无数据,可能已到末页。这通常是由于以下原因之一: 1. 登录状态未正确加载(Cookie问题) 2. 页面元素加载失败(选择器问题或页面结构变化) 3. 反爬机制生效(如被识别为爬虫) 在之前的修复中,我们已经改进了Cookie加载和元素定位。但为了确保万无一失,我们需要进一步调试。 以下是一个增强版的爬虫,特别针对登录验证和元素定位进行了优化: 步骤: 1. 在加载Cookie后,我们添加了登录状态验证(检查用户头像或用户名元素是否存在) 2. 在获取评论时,我们使用更健壮的元素定位方式,并添加了更多等待条件和异常处理 3. 添加了页面截图功能,方便调试时查看页面状态 由于我们无法直接运行,这里提供代码并建议用户根据截图调整选择器。 完整代码(包含增强的调试功能): </think> # 解决网易云音乐爬取无数据问题 您遇到的问题是爬取第一页时无数据,显示"第 1 页无数据,可能已到末页"。这通常与登录状态、元素定位或页面加载问题有关。下面我将提供完整的解决方案,包含增强的调试功能和多种应对策略。 ```python from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException import time import json import os import pymysql from datetime import datetime import random import logging import requests from bs4 import BeautifulSoup # 配置日志 logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler('netease_crawler.log'), logging.StreamHandler() ] ) # 数据库配置 DB_CONFIG = { 'host': 'localhost', 'user': 'root', 'password': 'your_password', 'database': 'netease_music', 'charset': 'utf8mb4' } # 歌曲ID和爬取设置 SONG_ID = "186721" # 晴天 MAX_PAGES = 5 USER_AGENTS = [ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0" ] def init_driver(): """初始化浏览器驱动""" options = webdriver.ChromeOptions() # 反爬虫设置 options.add_argument('--disable-blink-features=AutomationControlled') options.add_experimental_option('excludeSwitches', ['enable-automation']) options.add_argument('--disable-infobars') options.add_argument('--disable-notifications') options.add_argument('--start-maximized') # 最大化窗口确保元素可见 # 设置随机User-Agent options.add_argument(f'--user-agent={random.choice(USER_AGENTS)}') # 无头模式(可选) # options.add_argument('--headless') # 禁用图片和CSS加载(加速) prefs = { "profile.managed_default_content_settings.images": 2, "profile.managed_default_content_settings.stylesheet": 2 } options.add_experimental_option("prefs", prefs) driver = webdriver.Chrome(options=options) return driver def load_cookies(driver, url): """加载已保存的Cookie绕过登录""" cookie_file = 'netease_cookies.json' # 检查Cookie文件是否存在 if not os.path.exists(cookie_file): logging.error(f"Cookie文件 {cookie_file} 不存在,请先运行手动登录获取Cookie") return False try: driver.get("https://music.163.com") time.sleep(3) # 等待页面加载 # 从文件加载Cookie with open(cookie_file, 'r', encoding='utf-8') as f: cookies = json.load(f) # 添加Cookie到浏览器 for cookie in cookies: # 处理expiry为int类型 if 'expiry' in cookie: cookie['expiry'] = int(cookie['expiry']) # 确保domain正确 if 'domain' in cookie and 'music.163.com' not in cookie['domain']: cookie['domain'] = '.music.163.com' try: driver.add_cookie(cookie) except Exception as e: logging.warning(f"添加Cookie时出错: {e}") # 刷新页面使Cookie生效 driver.refresh() time.sleep(3) # 验证登录状态 - 检查用户头像 try: WebDriverWait(driver, 10).until( EC.presence_of_element_located((By.CSS_SELECTOR, ".head.j-userpic img")) ) logging.info("Cookie加载成功!登录状态验证通过") return True except TimeoutException: logging.warning("登录状态验证失败,Cookie可能已过期") return False except Exception as e: logging.error(f"加载Cookie失败: {e}") return False def get_page_comments(driver, url, page_num): """获取单页评论数据""" logging.info(f"正在爬取第 {page_num} 页: {url}") try: driver.get(url) time.sleep(3) # 等待页面基本加载 # 调试:保存页面源码 with open(f'page_{page_num}_source.html', 'w', encoding='utf-8') as f: f.write(driver.page_source) # 切换到包含评论的iframe WebDriverWait(driver, 15).until( EC.frame_to_be_available_and_switch_to_it((By.ID, "g_iframe")) ) logging.info("成功切换到评论iframe") # 等待评论区域加载 WebDriverWait(driver, 20).until( EC.presence_of_element_located((By.CSS_SELECTOR, ".cmmts.j-flag")) ) logging.info("评论区域加载成功") # 模拟滚动加载更多评论 for i in range(3): driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(1 + random.random()) logging.info(f"滚动页面 {i+1}/3 次") # 获取评论容器 comments_container = driver.find_element(By.CSS_SELECTOR, '.cmmts.j-flag') # 使用更健壮的选择器定位评论项 items = comments_container.find_elements(By.CSS_SELECTOR, '.itm') logging.info(f"找到 {len(items)} 条评论元素") if not items: # 尝试备用选择器 items = comments_container.find_elements(By.CSS_SELECTOR, 'div.itm') logging.info(f"使用备用选择器找到 {len(items)} 条评论元素") comments = [] for item in items: try: # 用户信息 user_elem = item.find_element(By.CSS_SELECTOR, '.head .name') user = user_elem.text user_link = user_elem.get_attribute('href') user_id = user_link.split('=')[-1] if user_link and '=' in user_link else None # 评论内容 content_elem = item.find_element(By.CSS_SELECTOR, '.cntwrap .cnt') content = content_elem.text # 评论时间 time_elem = item.find_element(By.CSS_SELECTOR, '.time') time_str = time_elem.get_attribute('title') or time_elem.text # 点赞数 likes_elem = item.find_element(By.CSS_SELECTOR, '.lke') likes = likes_elem.get_attribute('data-count') or '0' # 用户VIP等级 vip_level = None try: vip_icon = item.find_element(By.CSS_SELECTOR, '.u-icn2-vip') vip_level = vip_icon.get_attribute('class').split('-')[-1] except NoSuchElementException: pass # IP属地 ip_location = None try: ip_location = item.find_element(By.CSS_SELECTOR, '.ip-location').text except NoSuchElementException: pass comments.append({ 'user_id': user_id, 'username': user, 'content': content, 'comment_time': time_str, 'likes': int(likes), 'vip_level': vip_level, 'ip_location': ip_location, 'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'page_num': page_num }) except Exception as e: logging.error(f"处理评论元素时出错: {str(e)}") continue # 切回主文档 driver.switch_to.default_content() return comments except TimeoutException as e: logging.error(f"第 {page_num} 页加载超时: {str(e)}") driver.save_screenshot(f'timeout_page_{page_num}.png') return [] except Exception as e: logging.error(f"获取第 {page_num} 页评论时出错: {str(e)}") import traceback traceback.print_exc() return [] def alternative_crawl_method(song_id, page): """备选爬取方法:使用API接口""" url = f"https://music.163.com/api/v1/resource/comments/R_SO_4_{song_id}?limit=20&offset={(page-1)*20}" headers = { 'User-Agent': random.choice(USER_AGENTS), 'Referer': f'https://music.163.com/song?id={song_id}', 'Cookie': get_cookies_as_string() } try: response = requests.get(url, headers=headers) if response.status_code == 200: data = response.json() comments = [] for comment in data.get('comments', []): comments.append({ 'user_id': comment['user']['userId'], 'username': comment['user']['nickname'], 'content': comment['content'], 'comment_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(comment['time']/1000)), 'likes': comment['likedCount'], 'vip_level': 'vip' if comment['user'].get('vipType') == 1 else None, 'ip_location': comment.get('ipLocation', {}).get('location', ''), 'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'page_num': page }) return comments except Exception as e: logging.error(f"API爬取失败: {str(e)}") return [] def get_cookies_as_string(): """从文件获取Cookie字符串""" try: with open('netease_cookies.json', 'r', encoding='utf-8') as f: cookies = json.load(f) return "; ".join([f"{c['name']}={c['value']}" for c in cookies]) except: return "" def save_to_mysql(comments): """将评论数据保存到MySQL数据库""" if not comments: logging.warning("没有评论数据需要保存") return try: conn = pymysql.connect(**DB_CONFIG) cursor = conn.cursor() # 创建表(如果不存在) create_table_sql = """ CREATE TABLE IF NOT EXISTS song_comments ( id INT AUTO_INCREMENT PRIMARY KEY, user_id VARCHAR(50), username VARCHAR(100), content TEXT, comment_time VARCHAR(50), likes INT, vip_level VARCHAR(10), ip_location VARCHAR(50), crawl_time DATETIME, page_num INT, INDEX idx_user (user_id), INDEX idx_time (crawl_time) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; """ cursor.execute(create_table_sql) # 插入数据 insert_sql = """ INSERT INTO song_comments ( user_id, username, content, comment_time, likes, vip_level, ip_location, crawl_time, page_num ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) """ # 准备数据 data = [( c['user_id'], c['username'], c['content'], c['comment_time'], c['likes'], c['vip_level'], c['ip_location'], c['crawl_time'], c['page_num'] ) for c in comments] cursor.executemany(insert_sql, data) conn.commit() logging.info(f"成功插入 {len(comments)} 条评论到数据库") except pymysql.Error as e: logging.error(f"数据库操作失败: {e}") finally: if 'conn' in locals() and conn: conn.close() def crawl_multi_pages(song_id, max_pages=10): """主爬虫函数""" driver = init_driver() # 加载Cookie绕过登录 if not load_cookies(driver, "https://music.163.com"): logging.error("登录失败,尝试使用API方式爬取") all_comments = [] for page in range(1, max_pages + 1): comments = alternative_crawl_method(song_id, page) if comments: save_to_mysql(comments) all_comments.extend(comments) logging.info(f"API方式获取第 {page} 页,共 {len(comments)} 条评论") else: break time.sleep(1 + random.random() * 2) driver.quit() return all_comments base_url = f"https://music.163.com/#/song?id={song_id}" all_comments = [] try: for page in range(1, max_pages + 1): # 构建带页码参数的URL page_url = f"{base_url}&page={page}" if page > 1 else base_url comments = get_page_comments(driver, page_url, page) # 如果Selenium方式失败,尝试API方式 if not comments: logging.warning(f"Selenium方式获取第 {page} 页失败,尝试API方式") comments = alternative_crawl_method(song_id, page) if not comments: logging.warning(f"第 {page} 页无数据,可能已到末页") break # 保存数据save_to_mysql(comments) all_comments.extend(comments) logging.info(f"已获取第 {page} 页,共 {len(comments)} 条评论") # 随机延迟防止被封 delay = 1.5 + random.random() * 4 # 1.5-5.5秒随机延迟 time.sleep(delay) except Exception as e: logging.error(f"爬取过程中出错: {e}") import traceback traceback.print_exc() finally: driver.quit() logging.info(f"爬取结束,共获取 {len(all_comments)} 条评论") return all_comments def get_cookies_manually(): """手动登录获取Cookie(只需运行一次)""" driver = webdriver.Chrome() try: driver.get("https://music.163.com/#/login") logging.info("请手动登录网易云音乐...") # 等待用户手动登录 input("请手动登录网易云音乐,登录成功后按回车键继续...") # 获取并保存Cookie cookies = driver.get_cookies() with open('netease_cookies.json', 'w', encoding='utf-8') as f: json.dump(cookies, f, ensure_ascii=False, indent=2) logging.info("Cookie已保存到 netease_cookies.json") # 验证Cookie是否有效 driver.get("https://music.163.com") time.sleep(3) try: user_element = driver.find_element(By.CSS_SELECTOR, ".head.j-userpic img") logging.info(f"登录验证成功!用户头像可见") except: logging.warning("登录状态验证失败,Cookie可能无效") finally: driver.quit() def debug_page_source(): """调试页面源码""" with open('page_1_source.html', 'r', encoding='utf-8') as f: html = f.read() soup = BeautifulSoup(html, 'html.parser') # 检查登录状态 login_status = soup.select_one(".head.j-userpic img") print(f"登录状态: {'已登录' if login_status else '未登录'}") # 检查评论容器 iframe = soup.select_one("iframe#g_iframe") print(f"iframe存在: {bool(iframe)}") # 如果iframe存在,检查评论内容 if iframe: iframe_src = iframe.get('src', '') print(f"iframe src: {iframe_src}") # 尝试从iframe中提取评论 comments = soup.select('div.cmmts.j-flag div.itm') print(f"找到 {len(comments)} 条评论") if __name__ == "__main__": # 第一步:获取Cookie(只需运行一次) # 取消下面一行的注释来获取Cookie # get_cookies_manually() # 第步:爬取数据 crawl_multi_pages(song_id=SONG_ID, max_pages=MAX_PAGES) # 第三步:如果仍有问题,调试页面源码 # debug_page_source() ``` ## 问题分析与解决方案 ### 1. 登录状态验证失败 - **增强验证逻辑**:使用更可靠的选择器检查用户头像元素 - **多级回退机制**:如果Selenium方式失败,自动切换到API方式爬取 - **Cookie字符串处理**:为API请求提供Cookie支持 ### 2. 页面元素定位问题 - **多选择器策略**: ```python # 主选择器 items = comments_container.find_elements(By.CSS_SELECTOR, '.itm') # 备用选择器 if
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值