为MR1100添加手动band选择支持

本文介绍了如何为MR1100设备添加手动band选择支持,详细解析了at!band命令的使用,包括掩码设置和频段按位或计算,以添加特定LTE频段到菜单项为例,展示了命令格式和操作步骤,并提供了一个在线掩码计算器链接辅助操作。
at!band=?
------------------------------------------------------------------------------------------
Index, Name,                        GW Band Mask     L Band Mask      L ext Band Mask
00, Auto,                           0000000004C00000 000020003000285F 0000000000000002
01, WCDMA All,                      0000000004C00000 0000000000000000 0000000000000000
02, LTE All,                        0000000000000000 000020003000285F 0000000000000002
03, LTE Band 1,                     0000000000000000 0000000000000001 0000000000000000
04, LTE Band 2,                     0000000000000000 0000000000000002 0000000000000000
05, LTE Band 12,                    0000000000000000 0000000000000800 0000000000000000
06, LTE Band 14,                    0000000000000000 0000000000002000 0000000000000000
07, LTE Band 30,                    0000000000000000 0000000020000000 0000000000000000
08, LTE Band 66,                    0000000000000000 0000000000000000 00000000000000
# clm_generator/excel_to_clm.py import logging import os import sys from datetime import datetime import re import json from pathlib import Path from openpyxl import load_workbook import xlrd from jinja2 import Template from collections import defaultdict from utils import resource_path, get_output_dir from pathlib import Path # ------------------------------- # 日志配置 # ------------------------------- PROJECT_ROOT = Path(__file__).parent.resolve() LOG_DIR = PROJECT_ROOT / "output" / "log" LOG_DIR.mkdir(parents=True, exist_ok=True) LOG_FILE = LOG_DIR / f"excel_to_clm_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log" class ExcelToCLMConverter: def __init__(self, config_path="config/config.json", locale_display_name=None, locale_names_map=None): """ Args: config_path: 配置文件路径 locale_display_name: 兼容旧逻辑,默认 DEFAULT 名称(如 US) locale_names_map: 新增参数,用于设置每个 locale_target 的 assigned_locale 示例: { "locale_2g_idx": "us", "locale_2g_ht_idx": "us_ht", "locale_5g_idx": "eu_5g", ... } """ self.logger = logging.getLogger(__name__) self.logger.info("初始化完成") self.last_config = None self.used_ranges_by_band = { "2g": [], "5g": [], "6g": [] } # === Step 1: 加载配置文件 === self.config_file_path = resource_path(config_path) if not os.path.exists(self.config_file_path): raise FileNotFoundError(f"配置文件不存在: {self.config_file_path}") with open(self.config_file_path, 'r', encoding='utf-8') as f: self.config = json.load(f) self.logger.info(f" 配置文件已加载: {self.config_file_path}") # === Step 2: 处理 target_c_file === rel_c_path = self.config.get("target_c_file", "input/wlc_clm_data_6726b0.c") self.target_c_file = resource_path(rel_c_path) if not os.path.exists(self.target_c_file): raise FileNotFoundError(f"配置中指定的 C 源文件不存在: {self.target_c_file}") self.logger.info(f" 已定位目标 C 文件: {self.target_c_file}") # === Step 3: 初始化输出目录 === output_dir = self.config.get("output_path", "output") self.output_dir = resource_path(output_dir) Path(self.output_dir).mkdir(parents=True, exist_ok=True) # 确保存在 self.logger.info(f" 输出目录: {self.output_dir}") # === Step 4: 更新 locale_targets from locale_names_map === if locale_names_map and isinstance(locale_names_map, dict): updated_count = 0 for tgt in self.config.get("locale_targets", []): enum_key = tgt.get("enum") new_locale = locale_names_map.get(enum_key) if new_locale is not None: old_locale = tgt.get("assigned_locale") if old_locale != new_locale: tgt["assigned_locale"] = new_locale self.logger.info(f" 更新 {enum_key}: '{old_locale}' → '{new_locale}'") updated_count += 1 if updated_count > 0: # 写回 config.json try: with open(self.config_file_path, 'w', encoding='utf-8') as f: json.dump(self.config, f, indent=4, ensure_ascii=False) self.logger.info(f" 已保存 {updated_count} 个 assigned_locale 到 config.json") except Exception as e: self.logger.info(f" 写入 config.json 失败: {e}") # === Step 5: 设置默认 fallback 显示名(兼容旧逻辑)=== first_target = self.config.get("locale_targets", [{}])[0] fallback_locale = first_target.get("assigned_locale", "DEFAULT") self.locale_name = fallback_locale self.locale_display_name = ( locale_display_name or fallback_locale.replace('-', '_') ) # === Step 6: channel_set_map 加载 === persisted_map = self.config.get("channel_set_map") if persisted_map is None: raise KeyError("配置文件缺少必需字段 'channel_set_map'") if not isinstance(persisted_map, dict): raise TypeError(f"channel_set_map 必须是字典类型,当前类型: {type(persisted_map)}") self.channel_set_map = {str(k): int(v) for k, v in persisted_map.items()} self.logger.info(f" 成功加载 channel_set_map (共 {len(self.channel_set_map)} 项)") # === 初始化数据容器 === self.tx_power_data = [] self.tx_limit_entries = [] self.eirp_entries = [] self.global_ch_min = None self.global_ch_max = None self.generated_ranges = [] self.per_sheet_results = [] # 存放每个 sheet 的解析结果 def reset(self): """重置所有运行时数据,便于多次生成""" self.tx_limit_entries.clear() self.eirp_entries.clear() self.used_ranges.clear() self.tx_power_data.clear() # 同步清空 self.generated_ranges.clear() self.used_ranges_by_band = {"2g": [], "5g": [], "6g": []} # ✅ 重置分类 self.last_config = None self.logger.info(" 所有生成数据已重置") self.logger.info(f" 初始化完成。目标C文件: {self.target_c_file}") self.logger.info(f" 输出目录: {self.output_dir}") self.logger.info(f" Locale ID: {self.locale_name}") # ==================== 新增工具方法:大小写安全查询 ==================== def _ci_get(self, data_dict, key): """ Case-insensitive 字典查找 """ for k, v in data_dict.items(): if k.lower() == key.lower(): return v return None def _ci_contains(self, data_list, item): """ Case-insensitive 判断元素是否在列表中 """ return any(x.lower() == item.lower() for x in data_list) def parse_mode_cell(self, cell_value): # self.logger.info(" 正在执行 parse_mode_cell()...") # self.logger.info(" ...") if not cell_value: return None val = str(cell_value).strip() val = re.sub(r'\s+', ' ', val.replace('\n', ' ').replace('\r', ' ')) val_upper = val.upper() self.logger.info(f" 解析模式单元格: '{val}'") found_modes = [] bandwidth = None # === Step 1: 提取带宽 (20/40/80/160/320) === bw_match = re.search(r'(20|40|80|160|320)\s*(?:MHZ|M)?\b', val_upper) if bw_match: bandwidth = bw_match.group(1) else: # 尾部数字 fallback: HT40 → 40 tail_num = re.search(r'(20|40|80|160|320)(?![0-9])', val_upper) if tail_num: bandwidth = tail_num.group(1) if not bandwidth: # 再次 fallback:检查是否有任何带宽数字出现 all_bws = [b for b in ['20', '40', '80', '160', '320'] if b in val_upper] bandwidth = max(all_bws, key=int) if all_bws else '20' # === Step 2: 拆分表达式 === # 支持多种分隔符:/, \, |, 空格等 tokens = re.split(r'[/\\|+\s]+', val_upper) tokens = [t.strip() for t in tokens if t.strip()] # 如果原始字符串本身就是一个整体词(如 "11AC"),也作为一个 token if not tokens: tokens = [val_upper] # === Step 3: 定义模式关键词映射(支持前缀匹配)=== mode_indicators = [ ('VHT', '11AC'), ('11AC', '11AC'), ('HE', '11AX'), # 注意:不能写 '^HE$',否则 HE20 不会被识别 ('11AX', '11AX'), ('BE', '11BE'), # BE320、BE20 都能命中 ('11BE', '11BE'), ('EHT', '11BE'), ('HT', '11N'), ('11N', '11N'), ('11A', '11A'), ('11G', '11G'), ('DSSS', '11B'), ('CCK', '11B'), ('11B', '11B'), ] # === Step 4: 遍历所有 token,进行部分包含或前缀匹配 === for token in tokens: matched = False for pattern, canonical in mode_indicators: # 使用 re.search 而非 fullmatch,支持子串匹配 if re.search(pattern, token): if canonical not in found_modes: found_modes.append(canonical) matched = True break # 找到就跳出,避免重复添加 # === Step 5: 特殊情况补全:比如 "11AC/AX" → 应该两个都识别 === full_str_clean = re.sub(r'[^A-Z0-9]', '', val_upper) # 去除非字母数字 if '11AC' in full_str_clean and 'AX' in val_upper and '11AX' not in found_modes: found_modes.append('11AX') if '11AX' in full_str_clean and 'BE' in val_upper and '11BE' not in found_modes: found_modes.append('11BE') # 排序统一输出顺序(可选) order = {'11B': 0, '11G': 1, '11A': 2, '11N': 3, '11AC': 4, '11AX': 5, '11BE': 6} found_modes.sort(key=lambda x: order.get(x, 99)) # === Step 6: 最终校验 === if not found_modes: self.logger.info(f" 无法识别物理模式: '{cell_value}'") return None self.logger.info(f" ✓ 解析成功 → 模式={found_modes}, 带宽={bandwidth}MHz") return { "phy_mode_list": found_modes, "bw": bandwidth } def format_phy_mode(self, mode: str) -> str: """ 自定义物理层模式输出格式: - 11B/G/N 输出为小写:11b / 11g / 11n - 其他保持原样(如 11AC, 11BE) """ return { '11A': '11a', '11B': '11b', '11G': '11g', '11N': '11n' }.get(mode, mode) def col_to_letter(self, col): col += 1 result = "" while col > 0: col -= 1 result = chr(col % 26 + ord('A')) + result col //= 26 return result def is_valid_power(self, value): try: float(value) return True except (ValueError, TypeError): return False def get_cell_value(self, ws_obj, row_idx, col_idx): fmt = ws_obj["format"] if fmt == "xls": return str(ws_obj["sheet"].cell_value(row_idx, col_idx)).strip() else: cell = ws_obj["sheet"].cell(row=row_idx + 1, column=col_idx + 1) val = cell.value return str(val).strip() if val is not None else "" def find_table_header_row(self, ws_obj): """查找包含 'Mode' 和 'Rate' 的表头行""" fmt = ws_obj["format"] ws = ws_obj["sheet"] for r in range(15): mode_col = rate_col = None if fmt == "xlsx": if r + 1 > ws.max_row: continue for c in range(1, ws.max_column + 1): cell = ws.cell(row=r + 1, column=c) if not cell.value: continue val = str(cell.value).strip() if val == "Mode": mode_col = c elif val == "Rate": rate_col = c if mode_col and rate_col and abs(mode_col - rate_col) == 1: self.logger.info(f" 找到表头行: 第 {r+1} 行") return r, mode_col - 1, rate_col - 1 # 转为 0-based else: if r >= ws.nrows: continue for c in range(ws.ncols): val = ws.cell_value(r, c) if not val: continue val = str(val).strip() if val == "Mode": mode_col = c elif val == "Rate": rate_col = c if mode_col and rate_col and abs(mode_col - rate_col) == 1: self.logger.info(f" 找到表头行: 第 {r+1} 行") return r, mode_col, rate_col return None, None, None def find_auth_power_above_row(self, ws_obj, start_row): """查找 '认证功率' 所在的合并单元格及其列范围""" fmt = ws_obj["format"] ws = ws_obj["sheet"] self.logger.info(f" 开始向上查找 '认证功率',扫描第 0 ~ {start_row} 行...") if fmt == "xlsx": # xlsx 格式:使用 openpyxl 处理合并单元格 for mr in ws.merged_cells.ranges: top_left = ws.cell(row=mr.min_row, column=mr.min_col) val = str(top_left.value) if top_left.value else "" if "证功率" in val or "Cert" in val: r_idx = mr.min_row - 1 # 转为0基索引 if r_idx <= start_row: start_col = mr.min_col - 1 end_col = mr.max_col - 1 self.logger.info(f" 发现合并单元格含 '证功率': '{val}' → {self.col_to_letter(start_col)}{mr.min_row}") return start_col, end_col, r_idx # fallback:搜索普通单元格 for r in range(start_row + 1): for c in range(1, ws.max_column + 1): cell = ws.cell(row=r + 1, column=c) if cell.value and ("证功率" in str(cell.value)): self.logger.info(f" 普通单元格发现 '证功率': '{cell.value}' @ R{r + 1}C{c}") return c - 1, c - 1, r else: # fmt == "xls",使用 xlrd 读取 .xls 文件 from xlrd import Book, Cell # 第一步:检查合并单元格 if hasattr(ws, 'merged_cells'): for (rlo, rhi, clo, chi) in ws.merged_cells: # 合并区域左上角单元格为主值 cell = ws.cell(rlo, clo) val = cell.value if val and ("证功率" in str(val) or "Cert" in str(val)): if rlo <= start_row: self.logger.info(f" 发现 .xls 合并单元格含 '证功率': '{val}' → R{rlo + 1}C{clo + 1}") return clo, chi - 1, rlo # clo ~ chi-1 是实际列索引范围(chi 是开区间) # 第二步:fallback 到普通单元格搜索 for r in range(min(ws.nrows, start_row + 1)): for c in range(ws.ncols): val = ws.cell_value(r, c) if val and ("证功率" in str(val) or "Cert" in str(val)): self.logger.info(f" 发现 .xls 普通单元格 '证功率': '{val}' @ R{r + 1}C{c + 1}") return c, c, r return None, None, None def parse_ch_columns_under_auth(self, ws_obj, ch_row_idx, auth_start_col, auth_end_col, verbose=True): """ 解析指定行中 [auth_start_col, auth_end_col] 范围内的列,提取 CHx 信道编号 返回: {col_index: ch_number} 映射(便于主循环按列查找) """ fmt = ws_obj["format"] ws = ws_obj["sheet"] ch_map = {} # key: col_index, value: ch_number if verbose: self.logger.info(f"🔍 解析 CH 标题行(第 {ch_row_idx + 1} 行),列范围: Col {auth_start_col} ~ {auth_end_col}") for c in range(auth_start_col, auth_end_col + 1): val = self.get_cell_value(ws_obj, ch_row_idx, c) if not val: continue # 加强版正则:必须是 CH 开头,可接分隔符,然后是数字 match = re.search(r"^CH[\s_\-]?(\d+)$", str(val).strip(), re.I) if not match: continue try: ch_num = int(match.group(1)) ch_map[c] = ch_num # ✅ 列索引 → 信道号 if verbose: self.logger.info(f" ✅ 发现 CH{ch_num} @ 第 {c + 1} 列 (Col={c})") except ValueError: continue if not ch_map: if verbose: self.logger.info("❌ 在指定区域内未找到任何有效的 CHx 列") else: chs = sorted(ch_map.values()) if verbose: self.logger.info(f"🎉 成功提取 CH{min(chs)}-{max(chs)},共 {len(chs)} 个信道") return ch_map def encode_power(self, dbm): return int(round((float(dbm) + 1.5) * 4)) def merge_consecutive_channels(self, ch_list: list, band: str = "2g", bw: str = "20") -> list: """ 合并连续信道: - 2G: 数值连续(可跨带宽) - 5G/6G: 在对应带宽的合法信道集中找相邻项进行合并 """ if not ch_list: return [] sorted_ch = sorted(set(ch_list)) # === 2G: 简单数值连续(适用于 20M/40M)=== if band.lower() == "2g": ranges = [] start = end = sorted_ch[0] for ch in sorted_ch[1:]: if ch == end + 1: end = ch else: ranges.append((start, end)) start = end = ch ranges.append((start, end)) return ranges # === 5G / 6G: 基于合法信道集的索引连续性 === elif band.lower() in ["5g", "6g"]: try: valid_chs = self.get_valid_channels_for_bandwidth(band, bw) except ValueError as e: print(f"⚠️ {e} 使用默认数值连续合并") return self.merge_consecutive_channels(ch_list, band="2g") if not valid_chs: print(f"⚠️ 未找到 {band.upper()} @{bw}MHz 的合法信道列表,使用原始数值合并") return self.merge_consecutive_channels(ch_list, band="2g") # 构建信道到索引的映射 ch_to_index = {ch: idx for idx, ch in enumerate(valid_chs)} ranges = [] current_start = current_end = None for ch in sorted_ch: if ch not in ch_to_index: print(f"⚠️ 警告:信道 {ch} 不属于 {band.upper()} @{bw}MHz 合法集合,将单独成段") if current_start is not None: ranges.append((current_start, current_end)) current_start = current_end = None ranges.append((ch, ch)) continue idx = ch_to_index[ch] if current_start is None: current_start = current_end = ch else: prev_idx = ch_to_index[current_end] if idx == prev_idx + 1: # 在合法列表中是“下一个” current_end = ch else: ranges.append((current_start, current_end)) current_start = current_end = ch if current_start is not None: ranges.append((current_start, current_end)) return ranges else: raise ValueError(f"不支持频段类型: {band}") def find_all_ch_rows_after_auth(self, ws_obj, auth_row, auth_start_col, auth_end_col, max_scan_ahead=70): """ 从 auth_row + 1 开始向下扫描,在限定范围内查找所有包含 CHx 格式的行 返回所有匹配的 0-based 行索引列表 """ fmt = ws_obj["format"] ws = ws_obj["sheet"] nrows = ws.max_row if fmt == "xlsx" else ws.nrows start_scan = auth_row + 1 end_scan = min(start_scan + max_scan_ahead, nrows) ch_rows = [] found_positions = [] # 用于日志 self.logger.info(f"🔍 开始扫描 CH 标题行:第 {start_scan + 1} ~ {end_scan} 行,列 {auth_start_col} ~ {auth_end_col}") for r in range(start_scan, end_scan): for c in range(auth_start_col, auth_end_col + 1): val = self.get_cell_value(ws_obj, r, c) if not val: continue # 更严格的正则:以 CH 开头,可接分隔符,后跟数字,且前后无字母 # 避免匹配 CHECK / PCH / BATCH 等 if re.search(r"^CH[\s_\-]?(\d+)$", str(val).strip(), re.I): ch_rows.append(r) found_positions.append((r + 1, c + 1, val)) # 1-based for log break # 每行只记录一次 if ch_rows: self.logger.info(f"✅ 发现 {len(ch_rows)} 个 CH 标题行:") for row_1b, col_1b, val in found_positions: self.logger.info(f" → 第 {row_1b} 行, 第 {col_1b} 列: '{val}'") else: self.logger.info("❌ 未找到任何有效的 CH 标题行") return ch_rows def collect_tx_limit_data(self, ws_obj, sheet_config, header_row_idx, auth_row, auth_start, auth_end, mode_col, rate_col): """ 收集发射功率限制数据,支持多个 CH 区域(multi-channel blocks) """ self.logger.info(" 正在执行 collect_tx_limit_data()...") self.logger.info(" ...") # 获取所有 CH 标题行(支持多块) ch_row_indices = self.find_all_ch_rows_after_auth(ws_obj, auth_row, auth_start, auth_end) if not ch_row_indices: self.logger.info("❌ 错误:无法定位任何 CH 标题行,跳过该表") return [] nrows = ws_obj["sheet"].nrows if ws_obj["format"] == "xls" else ws_obj["sheet"].max_row fmt = ws_obj["format"] ws = ws_obj["sheet"] entries = [] row_mode_info = {} # {row_index: parsed_mode_info} # ======== 第一步:构建 row_mode_info(保持不变)======== if fmt == "xlsx": merged_cells_map = {} for mr in ws.merged_cells.ranges: for r in range(mr.min_row - 1, mr.max_row): for c in range(mr.min_col - 1, mr.max_col): merged_cells_map[(r, c)] = mr for row_idx in range(header_row_idx + 1, nrows): cell_value = None is_merged = (row_idx, mode_col) in merged_cells_map if is_merged: mr = merged_cells_map[(row_idx, mode_col)] top_cell = ws.cell(row=mr.min_row, column=mr.min_col) cell_value = top_cell.value else: raw_cell = ws.cell(row=row_idx + 1, column=mode_col + 1) cell_value = raw_cell.value mode_info = self.parse_mode_cell(cell_value) if mode_info: if is_merged: mr = merged_cells_map[(row_idx, mode_col)] for r in range(mr.min_row - 1, mr.max_row): if header_row_idx < r < nrows: row_mode_info[r] = mode_info.copy() else: row_mode_info[row_idx] = mode_info.copy() else: # XLS for row_idx in range(header_row_idx + 1, ws.nrows): cell_value = self.get_cell_value(ws_obj, row_idx, mode_col) mode_info = self.parse_mode_cell(cell_value) if mode_info: row_mode_info[row_idx] = mode_info.copy() # ======== 第二步:按 CH 块分段处理数据 ======== ch_row_indices.sort() ch_row_indices.append(nrows) # 添加哨兵位便于切片 for i in range(len(ch_row_indices) - 1): ch_row_idx = ch_row_indices[i] next_ch_row_idx = ch_row_indices[i + 1] # 解析当前 CH 块的列映射:col_idx -> ch_num ch_map = self.parse_ch_columns_under_auth(ws_obj, ch_row_idx, auth_start, auth_end) if not ch_map: continue self.logger.info(f"🔧 处理 CH 块:第 {ch_row_idx + 1} 行 → 下一块在 {next_ch_row_idx + 1}") # 定义该块的数据范围 block_start = max(header_row_idx + 1, ch_row_idx + 1) block_end = min(next_ch_row_idx, nrows) # 遍历该块内的每一行 for row_idx in range(block_start, block_end): mode_info = row_mode_info.get(row_idx) if not mode_info: continue bw_clean = mode_info["bw"] has_valid_power = any( self.is_valid_power(self.get_cell_value(ws_obj, row_idx, col)) for col in ch_map.keys() ) if not has_valid_power: continue for phy_mode in mode_info["phy_mode_list"]: formatted_mode = self.format_phy_mode(phy_mode) mode_key = f"{formatted_mode}_{bw_clean}M" if not self._ci_contains(sheet_config.get("modes", []), mode_key): continue raw_rate_set = self._ci_get(sheet_config["rate_set_map"], mode_key) if not raw_rate_set: continue rate_set_list = [raw_rate_set] if isinstance(raw_rate_set, str) else raw_rate_set for rate_set_macro in rate_set_list: ch_count = 0 for col_idx, ch in ch_map.items(): # 注意顺序:col -> ch power_val = self.get_cell_value(ws_obj, row_idx, col_idx) if not self.is_valid_power(power_val): continue try: power_dbm = float(power_val) except: continue encoded_power = self.encode_power(power_dbm) entries.append({ "ch": ch, "power_dbm": round(power_dbm, 2), "encoded_power": encoded_power, "rate_set_macro": rate_set_macro, "mode": phy_mode, "bw": bw_clean, "src_row": row_idx + 1, "band": sheet_config["band"], "ch_block_row": ch_row_idx + 1 }) ch_count += 1 if ch_count > 0: self.logger.info( f"📊 第 {row_idx + 1} 行 → {formatted_mode} {bw_clean}M, " f"{ch_count} 信道, 宏={rate_set_macro}, CH块行={ch_row_idx + 1}" ) return entries def get_valid_channels_for_bandwidth(self, band: str, bw: str) -> list: """ 根据频段和带宽获取合法中心信道列表 """ # 规范化输入 band_norm = band.lower().strip() bw_clean = str(bw).replace('MHz', '').strip() if band_norm in ['2.4g', '2.4ghz', 'bg', 'bgn']: band_key = '2g' elif band_norm in ['5g', '5ghz']: band_key = '5g' elif band_norm in ['6g', '6ghz', 'be']: band_key = '6g' else: raise ValueError(f"不支持频段: {band}") segments = self.config.get("channel_segments", {}) valid_chs = segments.get(band_key, {}).get(bw_clean, []) return sorted(valid_chs) def compress_tx_limit_entries(self, raw_entries, sheet_config): """ 压缩TX限制条目。 Args: raw_entries (list): 原始条目列表。 sheet_config (dict): Excel表格配置字典。 Returns: list: 压缩后的条目列表。 """ self.logger.info(" 正在执行 compress_tx_limit_entries()...") self.logger.info(" ...") from collections import defaultdict modes_order = sheet_config["modes"] # 构建小写映射用于排序(key: "11n_20M") mode_lower_to_index = {mode.lower(): idx for idx, mode in enumerate(modes_order)} range_template = sheet_config["range_macro_template"] group_key = lambda e: (e["encoded_power"], e["rate_set_macro"]) groups = defaultdict(list) for e in raw_entries: groups[group_key(e)].append(e) compressed = [] for (encoded_power, rate_set_macro), entries_in_group in groups.items(): first = entries_in_group[0] power_dbm = first["power_dbm"] mode = first["mode"] # 如 '11N' bw = first["bw"] # 如 '20' 或 '40' ch_list = sorted(e["ch"] for e in entries_in_group) for start, end in self.merge_consecutive_channels( ch_list, band=sheet_config["band"], # 如 "5g" bw=bw # 如 "80" ): range_macro = range_template.format( band=sheet_config["band"], bw=bw, start=start, end=end ) # === 新增:查找或分配 CHANNEL_SET_ID === assigned_id = -1 # 表示:这不是 regulatory 范围,无需映射 # === 新增:记录到 generated_ranges === segment_ch_list = list(range(start, end + 1)) self._record_generated_range( range_macro=range_macro, band=sheet_config["band"], bw=bw, ch_start=start, ch_end=end, channels=segment_ch_list ) # 格式化物理层模式(如 '11N' -> '11n') formatted_mode = self.format_phy_mode(mode) # 构造 mode_key 用于查找排序优先级 mode_key = f"{formatted_mode}_{bw}M" mode_order_idx = mode_lower_to_index.get(mode_key.lower(), 999) # 生成注释 comment = f"/* {power_dbm:5.2f}dBm, CH{start}-{end}, {formatted_mode} @ {bw}MHz */" # 新增:生成该段落的实际信道列表 segment_ch_list = list(range(start, end + 1)) compressed.append({ "encoded_power": encoded_power, "range_macro": range_macro, "rate_set_macro": rate_set_macro, "comment": comment, "_mode_order": mode_order_idx, "bw": bw, # 带宽数字(字符串) "mode": formatted_mode, # 统一格式化的模式名 "ch_start": start, "ch_end": end, "power_dbm": round(power_dbm, 2), "ch_list": segment_ch_list, # 关键!用于 global_ch_min/max 统计 }) # 排序后删除临时字段 compressed.sort(key=lambda x: x["_mode_order"]) for item in compressed: del item["_mode_order"] return compressed def _normalize_band(self, band: str) -> str: """ 将各种 band 写法标准化为 '2g', '5g', '6g' """ b = str(band).lower().strip() if b in ['2g', '2.4g', 'bg', 'bgn', '2.4ghz']: return '2g' elif b in ['5g', '5ghz']: return '5g' elif b in ['6g', '6ghz', 'be']: return '6g' else: raise ValueError(f"不支持频段类型: {band}") def _record_generated_range(self, range_macro, band, bw, ch_start, ch_end, channels): """ 记录生成的 RANGE 宏信息,并自动归类到 used_ranges 和 used_ranges_by_band """ # 标准化频段名称 normalized_band = self._normalize_band(band) entry = { "range_macro": range_macro, "band": normalized_band, "bandwidth": int(bw), "channels": sorted(channels), "start_channel": ch_start, "end_channel": int(ch_end), "source_sheet": getattr(self, 'current_sheet_name', 'unknown') } # 添加到全局列表(保持原逻辑) if range_macro not in self.used_ranges: self.used_ranges.append(range_macro) # 添加到分类字典(去重) if range_macro not in self.used_ranges_by_band[normalized_band]: self.used_ranges_by_band[normalized_band].append(range_macro) # 同时保留详细元数据 self.generated_ranges.append(entry) def clean_sheet_name(self, name): cleaned = re.sub(r'[^\w\.\=\u4e00-\u9fa5]', '', str(name)) return cleaned def match_sheet_to_config(self, sheet_name): cleaned = self.clean_sheet_name(sheet_name) for cfg in self.config["sheets"]: for pat in cfg["pattern"]: if re.search(pat, cleaned, re.I): self.logger.info(f" '{sheet_name}' → 清洗后: '{cleaned}'") self.logger.info(f" 匹配成功!'{sheet_name}' → [{cfg['band']}] 配置") return cfg self.logger.info(f" '{sheet_name}' → 清洗后: '{cleaned}'") self.logger.info(f"未匹配到 '{sheet_name}' 的模式,跳过...") return None def convert_sheet_with_config(self, ws_obj, sheet_name, sheet_config): self.current_sheet_name = sheet_name header_row_idx, mode_col, rate_col = self.find_table_header_row(ws_obj) if header_row_idx is None: self.logger.info(f" 跳过 '{sheet_name}':未找到 'Mode' 和 'Rate'") return None auth_start, auth_end, auth_row = self.find_auth_power_above_row(ws_obj, header_row_idx) if auth_start is None: self.logger.info(f" 跳过 '{sheet_name}':未找到 '认证功率'") return None raw_entries = self.collect_tx_limit_data( ws_obj, sheet_config, header_row_idx, auth_row, auth_start, auth_end, mode_col, rate_col ) if not raw_entries: self.logger.info(f" 从 '{sheet_name}' 未收集到有效数据") return None compressed = self.compress_tx_limit_entries(raw_entries, sheet_config) # 🔍 统计 2.4G 信道范围(仅限 2G 表) band_key = sheet_config.get("band", "").lower() if band_key in ["2g", "2.4g"]: for entry in compressed: ch_list = entry.get("ch_list", []) if not ch_list: continue start_ch, end_ch = min(ch_list), max(ch_list) if self.global_ch_min is None or start_ch < self.global_ch_min: self.global_ch_min = start_ch if self.global_ch_max is None or end_ch > self.global_ch_max: self.global_ch_max = end_ch # ✅ 返回该 sheet 的完整信息 result = { "sheet_name": sheet_name, "band": band_key, "entries": compressed, "sheet_config": sheet_config } self.logger.info(f"✅ 成功处理 '{sheet_name}' → {len(compressed)} 条条目 (band={band_key.upper()})") return result def render_from_template(self, template_path, context, output_path): """ 根据模板生成文件。 Args: template_path (str): 模板文件路径。 context (dict): 渲染模板所需的上下文数据。 output_path (str): 输出文件的路径。 Returns: None Raises: FileNotFoundError: 如果指定的模板文件不存在。 IOError: 如果在读取或写入文件时发生错误。 """ self.logger.info(" 正在执行 render_from_template()...") self.logger.info(" ...") template_path = resource_path(template_path) with open(template_path, 'r', encoding='utf-8') as f: template = Template(f.read()) content = template.render(**context) os.makedirs(os.path.dirname(output_path), exist_ok=True) with open(output_path, 'w', encoding='utf-8') as f: f.write(content) self.logger.info(f" 已生成: {output_path}") def generate_per_sheet_output(self): """ 为每一个已解析的 sheet 生成对应的 C 文件 """ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") output_dir = Path(self.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 模板映射表 TEMPLATE_MAP = { "2g": "tx_limit_table_2_4G.c.j2", "5g": "tx_limit_table_5G.c.j2", "6g": "tx_limit_table_6G.c.j2" } # locale 映射 locale_by_enum = {tgt["enum"]: tgt["assigned_locale"].replace('-', '_') for tgt in self.config.get("locale_targets", []) if "enum" in tgt and "assigned_locale" in tgt} locale_2g = locale_by_enum.get("locale_2g_idx", "DEFAULT_2G") locale_2g_ht = locale_by_enum.get("locale_2g_ht_idx", "DEFAULT_2G_HT") for result in self.per_sheet_results: sheet_name = result["sheet_name"] band = result["band"] # 如 "2g", "5g" entries = result["entries"] if not entries: continue # 分类 normal / ht normal_entries = [e for e in entries if "HT" not in e.get("rate_set_macro", "")] ht_entries = [e for e in entries if "HT" in e.get("rate_set_macro", "")] # 构建结构 normal_struct = self.build_normal_structure(normal_entries) ht_segments = self.build_ht_structure(ht_entries) # fallback range(仅 2G 使用真实值) if band == "2g" and self.global_ch_min and self.global_ch_max: fb_start, fb_end = self.global_ch_min, self.global_ch_max fallback_macro = f"RANGE_2G_20M_{fb_start}_{fb_end}" else: fb_start, fb_end = 1, 11 fallback_macro = "RANGE_2G_20M_1_11" # 分配 CHANNEL_SET_ID if fallback_macro not in self.channel_set_map: next_id = max(self.channel_set_map.values(), default=0) + 1 self.channel_set_map[fallback_macro] = next_id fallback_id = self.channel_set_map[fallback_macro] # 选模板 jinja_template_name = TEMPLATE_MAP.get(band, "tx_limit_table_generic.c.j2") template_path = f"templates/{jinja_template_name}" # 输出文件名 output_filename = f"tx_limit_table_{band}.c" output_path = output_dir / output_filename # 上下文 context = { "timestamp": timestamp, "locale_display_name": self.locale_display_name, "locale_name_2_4G": locale_2g, "locale_name_2_4G_HT": locale_2g_ht, "normal_table": normal_struct, "ht_segments": ht_segments, "fallback_encoded_eirp": 30, "fallback_range_macro": fallback_macro, "fallback_ch_start": fb_start, "fallback_ch_end": fb_end, "fallback_channel_set_id": fallback_id, "channel_set_comment": f"Auto-generated fallback for {band.upper()} band", } self.save_channel_set_map_to_config() # 渲染并写入 try: content = self.render_from_template_string(template_path, context) output_path.write_text(content, encoding='utf-8') self.logger.info(f" 已生成 [{band.upper()}] 文件: {output_path}") except Exception as e: self.logger.error(f" 模板渲染失败 ({template_path}): {e}") raise def build_normal_structure(self,entries): grouped = defaultdict(list) for e in entries: bw = str(e["bw"]) grouped[bw].append(e) result = [] for bw in ["20", "40", "80", "160"]: if bw in grouped: sorted_entries = sorted(grouped[bw], key=lambda x: (x["ch_start"], x["encoded_power"])) result.append((bw, sorted_entries)) return result def build_ht_structure(self,entries): groups = defaultdict(list) for e in entries: bw = str(e["bw"]) if "EXT4" in e["rate_set_macro"]: level = "ext4" elif "EXT" in e["rate_set_macro"]: level = "ext" else: level = "base" groups[(level, bw)].append(e) order = [ ("base", "20"), ("base", "40"), ("ext", "20"), ("ext", "40"), ("ext4", "20"), ("ext4", "40") ] segments = [] active_segment_count = sum(1 for key in order if key in groups) for idx, (level, bw) in enumerate(order): key = (level, bw) if key not in groups: continue seg_entries = sorted(groups[key], key=lambda x: (x["ch_start"], x["encoded_power"])) count = len(seg_entries) header_flags = f"CLM_DATA_FLAG_WIDTH_{bw} | CLM_DATA_FLAG_MEAS_COND" if idx < active_segment_count - 1: header_flags += " | CLM_DATA_FLAG_MORE" if level != "base": header_flags += " | CLM_DATA_FLAG_FLAG2" segment = { "header_flags": header_flags, "count": count, "entries": seg_entries } if level == "ext": segment["flag2"] = "CLM_DATA_FLAG2_RATE_TYPE_EXT" elif level == "ext4": segment["flag2"] = "CLM_DATA_FLAG2_RATE_TYPE_EXT4" segments.append(segment) return segments def render_from_template_string(self, template_path, context): from jinja2 import Environment, FileSystemLoader import os # 解析模板目录 template_dir = os.path.dirname(resource_path(template_path)) loader = FileSystemLoader(template_dir) env = Environment(loader=loader) filename = os.path.basename(template_path) template = env.get_template(filename) return template.render(**context) def log_changes_to_file(self, changes, locale_id, total_entries): """将变更摘要写入日志文件""" log_dir = self.output_dir/ "log" log_dir.mkdir(exist_ok=True) # 使用时间戳生成唯一文件名 timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S") log_path = log_dir / f"parse_{locale_id}_{timestamp_str}.log" timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") with open(log_path, 'w', encoding='utf-8') as f: # 覆盖写入最新变更 f.write(f"========================================\n") f.write(f"CLM 变更日志\n") f.write(f"========================================\n") f.write(f"时间: {timestamp}\n") f.write(f"地区码: {locale_id}\n") f.write(f"总 TX 条目数: {total_entries}\n") f.write(f"\n") if not any(changes.values()): f.write(" 本次运行无任何变更,所有文件已是最新状态。\n") else: if changes['added_ranges']: f.write(f" 新增 RANGE ({len(changes['added_ranges'])}):\n") for r in sorted(changes['added_ranges']): f.write(f" → {r}\n") f.write(f"\n") if changes['removed_ranges']: f.write(f" 删除 RANGE ({len(changes['removed_ranges'])}):\n") for r in sorted(changes['removed_ranges']): f.write(f" → {r}\n") f.write(f"\n") if changes['modified_ranges']: f.write(f" 修改 RANGE ({len(changes['modified_ranges'])}):\n") for r in sorted(changes['modified_ranges']): f.write(f" → {r}\n") f.write(f"\n") other_adds = changes['other_additions'] other_dels = changes['other_deletions'] if other_adds or other_dels: f.write(f" 其他变更:\n") for line in other_adds[:10]: f.write(f" add: {line}\n") for line in other_dels[:10]: f.write(f" del: {line}\n") if len(other_adds) > 10 or len(other_dels) > 10: f.write(f" ... 还有 {len(other_adds) + len(other_dels) - 20} 处未显示\n") f.write(f"\n") f.write(f"输出目录: {log_dir}\n") f.write(f"备份文件: {Path(self.target_c_file).with_suffix('.c.bak')}\n") f.write(f"========================================\n") self.logger.info(f" 已保存变更日志 → {log_path}") def save_channel_set_map_to_config(self): """精准更新 config.json 中的 channel_set_map 和相关字段,不改变文件整体结构""" try: # 1. 从磁盘读取完整配置 with open(self.config_file_path, 'r', encoding='utf-8') as f: config = json.load(f) # 2. 更新你需要的字段(相当于“直接赋值”) valid_keys = [k for k in self.channel_set_map.keys() if re.match(r'RANGE_[\dA-Z]+_\d+M_\d+_\d+', k)] config["channel_set_map"] = {k: v for k, v in self.channel_set_map.items() if k in valid_keys} config["used_ranges_by_band"] = { band: sorted(v) for band, v in self.used_ranges_by_band.items() } config["used_ranges_count_per_band"] = { band: len(v) for band, v in self.used_ranges_by_band.items() } # 3. 写回文件 with open(self.config_file_path, 'w', encoding='utf-8') as f: json.dump(config, f, indent=4, ensure_ascii=False) self.logger.info("✅ 成功更新配置文件(仅修改目标字段)") except Exception as e: self.logger.error(f"❌ 写入失败: {e}") raise # 主要内容执行入口函数 def convert(self, file_path): self.logger.info(" 正在执行 convert()...") self.logger.info(" ...") c_source = Path(self.target_c_file) if not c_source.exists(): raise FileNotFoundError(f"目标 C 文件不存在: {c_source}") ext = os.path.splitext(file_path)[-1].lower() if ext == ".xlsx": wb = load_workbook(file_path, data_only=True) sheets = [{"sheet": ws, "format": "xlsx"} for ws in wb.worksheets] elif ext == ".xls": wb = xlrd.open_workbook(file_path) sheets = [{"sheet": ws, "format": "xls"} for ws in wb.sheets()] else: raise ValueError("仅支持 .xls 或 .xlsx 文件") # 清空上一次的结果 self.per_sheet_results.clear() for i, ws_obj in enumerate(sheets): sheet_name = wb.sheet_names()[i] if ext == ".xls" else ws_obj["sheet"].title config = self.match_sheet_to_config(sheet_name) if config: result = self.convert_sheet_with_config(ws_obj, sheet_name, config) if result: self.per_sheet_results.append(result) else: self.logger.info(f"跳过未匹配的 sheet: {sheet_name}") self.generate_per_sheet_output() #对外接口 def parse_excel(self): """ 【UI 兼容】供 PyQt UI 调用的入口方法 将当前 self.input_file 中的数据解析并填充到 tx_limit_entries """ self.logger.info(f" 开始解析: {self.input_file}") if not os.path.exists(self.input_file): self.logger.info(f" 文件不存在: {self.input_file}") raise FileNotFoundError(...) else: self.logger.info(f" 文件已找到,大小: {os.path.getsize(self.input_file)} 字节") if not hasattr(self, 'input_file') or not self.input_file: raise ValueError("未设置 input_file 属性!") if not os.path.exists(self.input_file): raise FileNotFoundError(f"文件不存在: {self.input_file}") self.logger.info(f" 开始解析 Excel 文件: {self.input_file}") try: self.convert(self.input_file) # 调用已有逻辑 self.logger.info(f" Excel 解析完成,共生成 {len(self.tx_limit_entries)} 条 TX 限幅记录") except Exception as e: self.logger.info(f" 解析失败: {e}") raise if __name__ == "__main__": import os logging.basicConfig( level=logging.INFO, format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', handlers=[ logging.FileHandler(LOG_FILE, encoding='utf-8'), logging.StreamHandler(sys.stdout) ], force=True ) logger = logging.getLogger(__name__) # 切换到脚本所在目录(可选,根据实际需求) script_dir = os.path.dirname(__file__) os.chdir(script_dir) # 直接使用默认参数(或从其他地方获取) config_path = "config/config.json" locale_id = None # 或指定默认值,如 "DEFAULT" display_name = None # 或指定默认值 input_file = "input/Archer BE900US 2.xlsx" # 创建转换器实例并执行 converter = ExcelToCLMConverter( config_path=config_path, locale_display_name=display_name ) converter.convert(input_file) 我现在会删掉used_range改用used_ranges_by_band,后续哪些地方需要修改
10-29
我现在要写使用手册,总结一下使用到的库# clm_generator/excel_to_clm.py import os import re import json from openpyxl import load_workbook import xlrd from jinja2 import Template from collections import defaultdict from utils import resource_path, get_output_dir from pathlib import Path # ------------------------------- # 日志配置 # ------------------------------- from datetime import datetime from pathlib import Path import logging import sys # --- 使用 utils 获取真实日志目录 --- from utils import get_log_dir, get_output_dir, ensure_directories class ExcelToCLMConverter: def __init__(self, config_path="config/config.json", locale_display_name=None, locale_names_map=None): """ Args: config_path: 配置文件路径 locale_display_name: 兼容旧逻辑,默认 DEFAULT 名称(如 US) locale_names_map: 新增参数,用于设置每个 locale_target 的 assigned_locale 示例: { "locale_2g_idx": "us", "locale_2g_ht_idx": "us_ht", "locale_5g_idx": "eu_5g", ... } """ self.logger = logging.getLogger("excel_to_clm") self.logger.info("初始化完成") self.last_config = None self.used_ranges_by_band = { "2g": [], "5g": [], "6g": [] } # === Step 1: 加载配置文件 === self.config_file_path = resource_path(config_path) if not os.path.exists(self.config_file_path): raise FileNotFoundError(f"配置文件不存在: {self.config_file_path}") with open(self.config_file_path, 'r', encoding='utf-8') as f: self.config = json.load(f) self.logger.info(f" 配置文件已加载: {self.config_file_path}") # === Step 2: 处理 target_c_file === rel_c_path = self.config.get("target_c_file", "input/wlc_clm_data_6726b0.c") self.target_c_file = resource_path(rel_c_path) if not os.path.exists(self.target_c_file): raise FileNotFoundError(f"配置中指定的 C 源文件不存在: {self.target_c_file}") self.logger.info(f" 已定位目标 C 文件: {self.target_c_file}") # === Step 3: 初始化输出目录 === output_dir = self.config.get("output_path", "output") self.output_dir = resource_path(output_dir) Path(self.output_dir).mkdir(parents=True, exist_ok=True) # 确保存在 self.logger.info(f" 输出目录: {self.output_dir}") # === Step 4: 更新 locale_targets from locale_names_map === if locale_names_map and isinstance(locale_names_map, dict): updated_count = 0 for tgt in self.config.get("locale_targets", []): enum_key = tgt.get("enum") new_locale = locale_names_map.get(enum_key) if new_locale is not None: old_locale = tgt.get("assigned_locale") if old_locale != new_locale: tgt["assigned_locale"] = new_locale self.logger.info(f" 更新 {enum_key}: '{old_locale}' → '{new_locale}'") updated_count += 1 if updated_count > 0: # 写回 config.json try: with open(self.config_file_path, 'w', encoding='utf-8') as f: json.dump(self.config, f, indent=4, ensure_ascii=False) self.logger.info(f" 已保存 {updated_count} 个 assigned_locale 到 config.json") except Exception as e: self.logger.info(f" 写入 config.json 失败: {e}") # === Step 5: 设置默认 fallback 显示名(兼容旧逻辑)=== first_target = self.config.get("locale_targets", [{}])[0] fallback_locale = first_target.get("assigned_locale", "DEFAULT") self.locale_name = fallback_locale self.locale_display_name = ( locale_display_name or fallback_locale.replace('-', '_') ) # === Step 6: channel_set_map 加载 === persisted_map = self.config.get("channel_set_map") if persisted_map is None: raise KeyError("配置文件缺少必需字段 'channel_set_map'") if not isinstance(persisted_map, dict): raise TypeError(f"channel_set_map 必须是字典类型,当前类型: {type(persisted_map)}") self.channel_set_map = {str(k): int(v) for k, v in persisted_map.items()} self.logger.info(f" 成功加载 channel_set_map (共 {len(self.channel_set_map)} 项)") # === 初始化数据容器 === self.tx_power_data = [] self.tx_limit_entries = [] self.eirp_entries = [] self.global_ch_min = None self.global_ch_max = None self.generated_ranges = [] self.per_sheet_results = [] # 存放每个 sheet 的解析结果 def reset(self): """重置所有运行时数据,便于多次生成""" self.tx_limit_entries.clear() self.eirp_entries.clear() self.tx_power_data.clear() self.generated_ranges.clear() self.used_ranges_by_band = {"2g": [], "5g": [], "6g": []} # ✅ 重置分类 self.last_config = None self.logger.info(" 所有生成数据已重置") self.logger.info(f" 初始化完成。目标C文件: {self.target_c_file}") self.logger.info(f" 输出目录: {self.output_dir}") self.logger.info(f" Locale ID: {self.locale_name}") @property def all_used_ranges(self): """返回所有已使用的 RANGE 宏(跨频段合并+去重)""" all_ranges = [] for band_list in self.used_ranges_by_band.values(): all_ranges.extend(band_list) return sorted(set(all_ranges)) # 去重排序 # ==================== 新增工具方法:大小写安全查询 ==================== def _ci_get(self, data_dict, key): """ Case-insensitive 字典查找 """ for k, v in data_dict.items(): if k.lower() == key.lower(): return v return None def _ci_contains(self, data_list, item): """ Case-insensitive 判断元素是否在列表中 """ return any(x.lower() == item.lower() for x in data_list) def parse_mode_cell(self, cell_value): # self.logger.info(" 正在执行 parse_mode_cell()...") # self.logger.info(" ...") if not cell_value: return None val = str(cell_value).strip() val = re.sub(r'\s+', ' ', val.replace('\n', ' ').replace('\r', ' ')) val_upper = val.upper() self.logger.info(f" 解析模式单元格: '{val}'") found_modes = [] bandwidth = None # === Step 1: 提取带宽 (20/40/80/160/320) === bw_match = re.search(r'(20|40|80|160|320)\s*(?:MHZ|M)?\b', val_upper) if bw_match: bandwidth = bw_match.group(1) else: # 尾部数字 fallback: HT40 → 40 tail_num = re.search(r'(20|40|80|160|320)(?![0-9])', val_upper) if tail_num: bandwidth = tail_num.group(1) if not bandwidth: # 再次 fallback:检查是否有任何带宽数字出现 all_bws = [b for b in ['20', '40', '80', '160', '320'] if b in val_upper] bandwidth = max(all_bws, key=int) if all_bws else '20' # === Step 2: 拆分表达式 === # 支持多种分隔符:/, \, |, 空格等 tokens = re.split(r'[/\\|+\s]+', val_upper) tokens = [t.strip() for t in tokens if t.strip()] # 如果原始字符串本身就是一个整体词(如 "11AC"),也作为一个 token if not tokens: tokens = [val_upper] # === Step 3: 定义模式关键词映射(支持前缀匹配)=== mode_indicators = [ ('VHT', '11AC'), ('11AC', '11AC'), ('HE', '11AX'), # 注意:不能写 '^HE$',否则 HE20 不会被识别 ('11AX', '11AX'), ('BE', '11BE'), # BE320、BE20 都能命中 ('11BE', '11BE'), ('EHT', '11BE'), ('HT', '11N'), ('11N', '11N'), ('11A', '11A'), ('11G', '11G'), ('DSSS', '11B'), ('CCK', '11B'), ('11B', '11B'), ] # === Step 4: 遍历所有 token,进行部分包含或前缀匹配 === for token in tokens: matched = False for pattern, canonical in mode_indicators: # 使用 re.search 而非 fullmatch,支持子串匹配 if re.search(pattern, token): if canonical not in found_modes: found_modes.append(canonical) matched = True break # 找到就跳出,避免重复添加 # === Step 5: 特殊情况补全:比如 "11AC/AX" → 应该两个都识别 === full_str_clean = re.sub(r'[^A-Z0-9]', '', val_upper) # 去除非字母数字 if '11AC' in full_str_clean and 'AX' in val_upper and '11AX' not in found_modes: found_modes.append('11AX') if '11AX' in full_str_clean and 'BE' in val_upper and '11BE' not in found_modes: found_modes.append('11BE') # 排序统一输出顺序(可选) order = {'11B': 0, '11G': 1, '11A': 2, '11N': 3, '11AC': 4, '11AX': 5, '11BE': 6} found_modes.sort(key=lambda x: order.get(x, 99)) # === Step 6: 最终校验 === if not found_modes: self.logger.info(f" 无法识别物理模式: '{cell_value}'") return None self.logger.info(f" ✓ 解析成功 → 模式={found_modes}, 带宽={bandwidth}MHz") return { "phy_mode_list": found_modes, "bw": bandwidth } def format_phy_mode(self, mode: str) -> str: """ 自定义物理层模式输出格式: - 11B/G/N 输出为小写:11b / 11g / 11n - 其他保持原样(如 11AC, 11BE) """ return { '11A': '11a', '11B': '11b', '11G': '11g', '11N': '11n' }.get(mode, mode) def col_to_letter(self, col): col += 1 result = "" while col > 0: col -= 1 result = chr(col % 26 + ord('A')) + result col //= 26 return result def is_valid_power(self, value): """ 判断一个值是否为有效的功率数值(可转为 float)。 排除常见无效字符串如 N/A, -, NULL 等。 返回: True if valid, False otherwise. """ if value is None: return False val_str = str(value).strip() upper_val = val_str.upper() # 常见无效标识符(不区分大小写) invalid_markers = { '', 'N/A', 'NA', 'NULL', 'NONE', '-', '.', 'INF', 'INFINITE', 'NOT AVAILABLE', 'UNKNOWN', 'TBD', ' ' } if upper_val in invalid_markers or 'NOT AVAIL' in upper_val or 'MISSING' in upper_val: return False try: float(val_str) return True except (ValueError, TypeError): return False def get_cell_value(self, ws_obj, row_idx, col_idx): fmt = ws_obj["format"] if fmt == "xls": return str(ws_obj["sheet"].cell_value(row_idx, col_idx)).strip() else: cell = ws_obj["sheet"].cell(row=row_idx + 1, column=col_idx + 1) val = cell.value return str(val).strip() if val is not None else "" def find_table_header_row(self, ws_obj): """查找包含 'Mode' 和 'Rate' 的表头行""" fmt = ws_obj["format"] ws = ws_obj["sheet"] for r in range(15): mode_col = rate_col = None if fmt == "xlsx": if r + 1 > ws.max_row: continue for c in range(1, ws.max_column + 1): cell = ws.cell(row=r + 1, column=c) if not cell.value: continue val = str(cell.value).strip() if val == "Mode": mode_col = c elif val == "Rate": rate_col = c if mode_col and rate_col and abs(mode_col - rate_col) == 1: self.logger.info(f" 找到表头行: 第 {r+1} 行") return r, mode_col - 1, rate_col - 1 # 转为 0-based else: if r >= ws.nrows: continue for c in range(ws.ncols): val = ws.cell_value(r, c) if not val: continue val = str(val).strip() if val == "Mode": mode_col = c elif val == "Rate": rate_col = c if mode_col and rate_col and abs(mode_col - rate_col) == 1: self.logger.info(f" 找到表头行: 第 {r+1} 行") return r, mode_col, rate_col return None, None, None def find_auth_power_above_row(self, ws_obj, start_row): """查找 '认证功率' 所在的合并单元格及其列范围""" fmt = ws_obj["format"] ws = ws_obj["sheet"] self.logger.info(f" 开始向上查找 '认证功率',扫描第 0 ~ {start_row} 行...") if fmt == "xlsx": # xlsx 格式:使用 openpyxl 处理合并单元格 for mr in ws.merged_cells.ranges: top_left = ws.cell(row=mr.min_row, column=mr.min_col) val = str(top_left.value) if top_left.value else "" if "证功率" in val or "Cert" in val: r_idx = mr.min_row - 1 # 转为0基索引 if r_idx <= start_row: start_col = mr.min_col - 1 end_col = mr.max_col - 1 self.logger.info(f" 发现合并单元格含 '证功率': '{val}' → {self.col_to_letter(start_col)}{mr.min_row}") return 0, end_col, r_idx # fallback:搜索普通单元格 for r in range(start_row + 1): for c in range(1, ws.max_column + 1): cell = ws.cell(row=r + 1, column=c) if cell.value and ("证功率" in str(cell.value)): self.logger.info(f" 普通单元格发现 '证功率': '{cell.value}' @ R{r + 1}C{c}") return 0, c - 1, r else: # fmt == "xls",使用 xlrd 读取 .xls 文件 from xlrd import Book, Cell # 第一步:检查合并单元格 if hasattr(ws, 'merged_cells'): for (rlo, rhi, clo, chi) in ws.merged_cells: # 合并区域左上角单元格为主值 cell = ws.cell(rlo, clo) val = cell.value if val and ("证功率" in str(val) or "Cert" in str(val)): if rlo <= start_row: self.logger.info(f" 发现 .xls 合并单元格含 '证功率': '{val}' → R{rlo + 1}C{clo + 1}") return 0, chi - 1, rlo # clo ~ chi-1 是实际列索引范围(chi 是开区间) # 第二步:fallback 到普通单元格搜索 for r in range(min(ws.nrows, start_row + 1)): for c in range(ws.ncols): val = ws.cell_value(r, c) if val and ("证功率" in str(val) or "Cert" in str(val)): self.logger.info(f" 发现 .xls 普通单元格 '证功率': '{val}' @ R{r + 1}C{c + 1}") return 0, c, r return None, None, None def parse_ch_columns_under_auth(self, ws_obj, ch_row_idx, auth_start_col, auth_end_col, verbose=True): """ 解析指定行中 [auth_start_col, auth_end_col] 范围内的列,提取 CHx 信道编号 返回: {col_index: ch_number} 映射(便于主循环按列查找) """ fmt = ws_obj["format"] ws = ws_obj["sheet"] ch_map = {} # key: col_index, value: ch_number if verbose: self.logger.info(f"🔍 解析 CH 标题行(第 {ch_row_idx + 1} 行),列范围: Col {auth_start_col} ~ {auth_end_col}") for c in range(auth_start_col, auth_end_col + 1): val = self.get_cell_value(ws_obj, ch_row_idx, c) if not val: continue # 加强版正则:必须是 CH 开头,可接分隔符,然后是数字 match = re.search(r"^CH[\s_\-]?(\d+)$", str(val).strip(), re.I) if not match: continue try: ch_num = int(match.group(1)) ch_map[c] = ch_num # ✅ 列索引 → 信道号 if verbose: self.logger.info(f" ✅ 发现 CH{ch_num} @ 第 {c + 1} 列 (Col={c})") except ValueError: continue if not ch_map: if verbose: self.logger.info("❌ 在指定区域内未找到任何有效的 CHx 列") else: chs = sorted(ch_map.values()) if verbose: self.logger.info(f"🎉 成功提取 CH{min(chs)}-{max(chs)},共 {len(chs)} 个信道") return ch_map def encode_power(self, dbm): return int(round((float(dbm) + 1.5) * 4)) def merge_consecutive_channels(self, ch_list: list, band: str = "2g", bw: str = "20") -> list: """ 合并连续信道: - 2G: 数值连续(可跨带宽) - 5G/6G: 在对应带宽的合法信道集中找相邻项进行合并 """ if not ch_list: return [] sorted_ch = sorted(set(ch_list)) # === 2G: 简单数值连续(适用于 20M/40M)=== if band.lower() == "2g": ranges = [] start = end = sorted_ch[0] for ch in sorted_ch[1:]: if ch == end + 1: end = ch else: ranges.append((start, end)) start = end = ch ranges.append((start, end)) return ranges # === 5G / 6G: 基于合法信道集的索引连续性 === elif band.lower() in ["5g", "6g"]: try: valid_chs = self.get_valid_channels_for_bandwidth(band, bw) except ValueError as e: print(f"⚠️ {e} 使用默认数值连续合并") return self.merge_consecutive_channels(ch_list, band="2g") if not valid_chs: print(f"⚠️ 未找到 {band.upper()} @{bw}MHz 的合法信道列表,使用原始数值合并") return self.merge_consecutive_channels(ch_list, band="2g") # 构建信道到索引的映射 ch_to_index = {ch: idx for idx, ch in enumerate(valid_chs)} ranges = [] current_start = current_end = None for ch in sorted_ch: if ch not in ch_to_index: print(f" 警告:信道 {ch} 不属于 {band.upper()} @{bw}MHz 合法集合,将单独成段") if current_start is not None: ranges.append((current_start, current_end)) current_start = current_end = None ranges.append((ch, ch)) continue idx = ch_to_index[ch] if current_start is None: current_start = current_end = ch else: prev_idx = ch_to_index[current_end] if idx == prev_idx + 1: # 在合法列表中是“下一个” current_end = ch else: ranges.append((current_start, current_end)) current_start = current_end = ch if current_start is not None: ranges.append((current_start, current_end)) return ranges else: raise ValueError(f"不支持频段类型: {band}") def find_all_ch_rows_after_auth(self, ws_obj, auth_row, auth_start_col, auth_end_col, max_scan_ahead=70): """ 从 auth_row + 1 开始向下扫描,在限定范围内查找所有包含 CHx 格式的行 返回所有匹配的 0-based 行索引列表 """ fmt = ws_obj["format"] ws = ws_obj["sheet"] nrows = ws.max_row if fmt == "xlsx" else ws.nrows start_scan = auth_row + 1 end_scan = min(start_scan + max_scan_ahead, nrows) ch_rows = [] found_positions = [] # 用于日志 self.logger.info(f"🔍 开始扫描 CH 标题行:第 {start_scan + 1} ~ {end_scan} 行,列 {auth_start_col} ~ {auth_end_col}") for r in range(start_scan, end_scan): for c in range(auth_start_col, auth_end_col + 1): val = self.get_cell_value(ws_obj, r, c) if not val: continue # 更严格的正则:以 CH 开头,可接分隔符,后跟数字,且前后无字母 # 避免匹配 CHECK / PCH / BATCH 等 if re.search(r"^CH[\s_\-]?(\d+)$", str(val).strip(), re.I): ch_rows.append(r) found_positions.append((r + 1, c + 1, val)) # 1-based for log break # 每行只记录一次 if ch_rows: self.logger.info(f"✅ 发现 {len(ch_rows)} 个 CH 标题行:") for row_1b, col_1b, val in found_positions: self.logger.info(f" → 第 {row_1b} 行, 第 {col_1b} 列: '{val}'") else: self.logger.info("❌ 未找到任何有效的 CH 标题行") return ch_rows def primary_to_center_channel(self, sheet_config, bw_mhz, primary_ch): """ 根据 band、带宽和主信道号,查询对应的中心信道。 """ seg = self.config.get("channel_primary_to_center", {}) mapping = seg.get(sheet_config["band"].lower(), {}).get(str(bw_mhz)) self.logger.info(f" 正在查询 {sheet_config['band'].lower()} @{bw_mhz}MHz 的中心信道映射表...值{mapping}") if not mapping: return None # 支持字符串键或整数键 return mapping.get(str(primary_ch)) or mapping.get(int(primary_ch)) def collect_tx_limit_data(self, ws_obj, sheet_config, header_row_idx, auth_row, auth_start, auth_end, mode_col, rate_col): """ 收集发射功率限制数据,支持多个 CH 区域(multi-channel blocks) """ self.logger.info(" 正在执行 collect_tx_limit_data()...") self.logger.info(" ...") # 获取所有 CH 标题行(支持多块) ch_row_indices = self.find_all_ch_rows_after_auth(ws_obj, auth_row, auth_start, auth_end) if not ch_row_indices: self.logger.info("❌ 错误:无法定位任何 CH 标题行,跳过该表") return [] nrows = ws_obj["sheet"].nrows if ws_obj["format"] == "xls" else ws_obj["sheet"].max_row fmt = ws_obj["format"] ws = ws_obj["sheet"] entries = [] row_mode_info = {} # {row_index: parsed_mode_info} # ======== 第一步:构建 row_mode_info(保持不变)======== if fmt == "xlsx": merged_cells_map = {} for mr in ws.merged_cells.ranges: for r in range(mr.min_row - 1, mr.max_row): for c in range(mr.min_col - 1, mr.max_col): merged_cells_map[(r, c)] = mr for row_idx in range(header_row_idx + 1, nrows): cell_value = None is_merged = (row_idx, mode_col) in merged_cells_map if is_merged: mr = merged_cells_map[(row_idx, mode_col)] top_cell = ws.cell(row=mr.min_row, column=mr.min_col) cell_value = top_cell.value else: raw_cell = ws.cell(row=row_idx + 1, column=mode_col + 1) cell_value = raw_cell.value mode_info = self.parse_mode_cell(cell_value) if mode_info: if is_merged: mr = merged_cells_map[(row_idx, mode_col)] for r in range(mr.min_row - 1, mr.max_row): if header_row_idx < r < nrows: row_mode_info[r] = mode_info.copy() else: row_mode_info[row_idx] = mode_info.copy() else: # XLS for row_idx in range(header_row_idx + 1, ws.nrows): cell_value = self.get_cell_value(ws_obj, row_idx, mode_col) mode_info = self.parse_mode_cell(cell_value) if mode_info: row_mode_info[row_idx] = mode_info.copy() # ======== 第二步:按 CH 块分段处理数据 ======== ch_row_indices.sort() ch_row_indices.append(nrows) # 添加哨兵位便于切片 for i in range(len(ch_row_indices) - 1): ch_row_idx = ch_row_indices[i] next_ch_row_idx = ch_row_indices[i + 1] # 解析当前 CH 块的列映射:col_idx -> ch_num ch_map = self.parse_ch_columns_under_auth(ws_obj, ch_row_idx, auth_start, auth_end) if not ch_map: continue self.logger.info(f"🔧 处理 CH 块:第 {ch_row_idx + 1} 行 → 下一块在 {next_ch_row_idx + 1}") # 定义该块的数据范围 block_start = max(header_row_idx + 1, ch_row_idx + 1) block_end = min(next_ch_row_idx, nrows) # 遍历该块内的每一行 for row_idx in range(block_start, block_end): mode_info = row_mode_info.get(row_idx) if not mode_info: continue bw_clean = mode_info["bw"] has_valid_power = any( self.is_valid_power(self.get_cell_value(ws_obj, row_idx, col)) for col in ch_map.keys() ) if not has_valid_power: continue for phy_mode in mode_info["phy_mode_list"]: formatted_mode = self.format_phy_mode(phy_mode) mode_key = f"{formatted_mode}_{bw_clean}M" if not self._ci_contains(sheet_config.get("modes", []), mode_key): continue raw_rate_set = self._ci_get(sheet_config["mode_rate_set_map"], mode_key) if not raw_rate_set: continue rate_set_list = [raw_rate_set] if isinstance(raw_rate_set, str) else raw_rate_set for rate_set_macro in rate_set_list: ch_count = 0 for col_idx, ch in ch_map.items(): # 注意顺序:col -> ch power_val = self.get_cell_value(ws_obj, row_idx, col_idx) # 获取列字母(用于日志显示) col_letter = self.col_to_letter(col_idx) row_number_1b = row_idx + 1 # 1-based 行号 sheet_name = getattr(self, 'current_sheet_name', 'UnknownSheet') if not self.is_valid_power(power_val): self.logger.info( f"跳过无效功率值 @ {sheet_name}!{col_letter}{row_number_1b} | " f"CH{ch} | Mode='{row_mode_info.get(row_idx, {}).get('phy_mode_list', ['?'])[0]}' " f"| BW={row_mode_info.get(row_idx, {}).get('bw', '?')}MHz | " f"原始值='{power_val}'" ) continue try: power_dbm = float(power_val) except: continue encoded_power = self.encode_power(power_dbm) # === 转换完成:使用 center_ch 替代 ch === entries.append({ "ch":ch, "power_dbm": round(power_dbm, 2), "encoded_power": encoded_power, "rate_set_macro": rate_set_macro, "mode": phy_mode, "bw": bw_clean, "src_row": row_idx + 1, "band": sheet_config["band"], "ch_block_row": ch_row_idx + 1 }) ch_count += 1 if ch_count > 0: self.logger.info( f"📊 第 {row_idx + 1} 行 → {formatted_mode} {bw_clean}M, " f"{ch_count} 信道, 宏={rate_set_macro}, CH块行={ch_row_idx + 1}" ) return entries def get_valid_channels_for_bandwidth(self, band: str, bw: str) -> list: """ 根据频段和带宽获取合法中心信道列表 """ # 规范化输入 band_norm = band.lower().strip() bw_clean = str(bw).replace('MHz', '').strip() if band_norm in ['2.4g', '2.4ghz', 'bg', 'bgn']: band_key = '2g' elif band_norm in ['5g', '5ghz']: band_key = '5g' elif band_norm in ['6g', '6ghz', 'be']: band_key = '6g' else: raise ValueError(f"不支持频段: {band}") segments = self.config.get("channel_segments", {}) valid_chs = segments.get(band_key, {}).get(bw_clean, []) return sorted(valid_chs) def compress_tx_limit_entries(self, raw_entries, sheet_config): """ 压缩TX限制条目。 Args: raw_entries (list): 原始条目列表。 sheet_config (dict): Excel表格配置字典。 Returns: list: 压缩后的条目列表。 """ self.logger.info(" 正在执行 compress_tx_limit_entries()...") self.logger.info(" ...") from collections import defaultdict modes_order = sheet_config["modes"] # 构建小写映射用于排序(key: "11n_20M") mode_lower_to_index = {mode.lower(): idx for idx, mode in enumerate(modes_order)} range_template = sheet_config["range_macro_template"] group_key = lambda e: (e["encoded_power"], e["rate_set_macro"]) groups = defaultdict(list) for e in raw_entries: groups[group_key(e)].append(e) compressed = [] for (encoded_power, rate_set_macro), entries_in_group in groups.items(): first = entries_in_group[0] power_dbm = first["power_dbm"] mode = first["mode"] # 如 '11N' bw = first["bw"] # 如 '20' 或 '40' ch_list = sorted(e["ch"] for e in entries_in_group) for start, end in self.merge_consecutive_channels( ch_list, band=sheet_config["band"], # 如 "5g" bw=bw # 如 "80" ): # === 使用起始信道作为 primary channel,查询对应的 center channel === center_start = self.primary_to_center_channel(sheet_config, bw_mhz=bw, primary_ch=start) center_end = self.primary_to_center_channel(sheet_config, bw_mhz=bw, primary_ch=end) if center_start is not None: range_macro = range_template.format( band=sheet_config["band"], bw=bw, start=center_start, end=center_end ) else: # 查不到 range_macro = range_template.format( band=sheet_config["band"], bw=bw, start=start, end=end ) # === 新增:查找或分配 CHANNEL_SET_ID === assigned_id = -1 # 表示:这不是 regulatory 范围,无需映射 # === 新增:记录到 generated_ranges === segment_ch_list = list(range(start, end + 1)) self._record_generated_range( range_macro=range_macro, band=sheet_config["band"], bw=bw, ch_start=start, ch_end=end, channels=segment_ch_list ) # 格式化物理层模式(如 '11N' -> '11n') formatted_mode = self.format_phy_mode(mode) # 构造 mode_key 用于查找排序优先级 mode_key = f"{formatted_mode}_{bw}M" mode_order_idx = mode_lower_to_index.get(mode_key.lower(), 999) # 生成注释 comment = f"/* {power_dbm:5.2f}dBm, CH{start}-{end}, {formatted_mode} @ {bw}MHz */" # 新增:生成该段落的实际信道列表 segment_ch_list = list(range(start, end + 1)) compressed.append({ "encoded_power": encoded_power, "range_macro": range_macro, "rate_set_macro": rate_set_macro, "comment": comment, "_mode_order": mode_order_idx, "bw": bw, # 带宽数字(字符串) "mode": formatted_mode, # 统一格式化的模式名 "ch_start": start, "ch_end": end, "power_dbm": round(power_dbm, 2), "ch_list": segment_ch_list, # 关键!用于 global_ch_min/max 统计 }) # 排序后删除临时字段 compressed.sort(key=lambda x: x["_mode_order"]) for item in compressed: del item["_mode_order"] return compressed def _normalize_band(self, band: str) -> str: """ 将各种 band 写法标准化为 '2g', '5g', '6g' """ b = str(band).lower().strip() if b in ['2g', '2.4g', 'bg', 'bgn', '2.4ghz']: return '2g' elif b in ['5g', '5ghz']: return '5g' elif b in ['6g', '6ghz', 'be']: return '6g' else: raise ValueError(f"不支持频段类型: {band}") def _record_generated_range(self, range_macro, band, bw, ch_start, ch_end, channels): """ 记录生成的 RANGE 宏信息,并自动归类到 used_ranges 和 used_ranges_by_band """ # 标准化频段名称 normalized_band = self._normalize_band(band) entry = { "range_macro": range_macro, "band": normalized_band, "bandwidth": int(bw), "channels": sorted(channels), "start_channel": ch_start, "end_channel": int(ch_end), "source_sheet": getattr(self, 'current_sheet_name', 'unknown') } # 只操作 by_band添加到全局列表(保持原逻辑) if range_macro not in self.used_ranges_by_band[normalized_band]: self.used_ranges_by_band[normalized_band].append(range_macro) # 添加到分类字典(去重) if range_macro not in self.used_ranges_by_band[normalized_band]: self.used_ranges_by_band[normalized_band].append(range_macro) # 同时保留详细元数据 self.generated_ranges.append(entry) def clean_sheet_name(self, name): cleaned = re.sub(r'[^\w\.\=\u4e00-\u9fa5]', '', str(name)) return cleaned def match_sheet_to_config(self, sheet_name): cleaned = self.clean_sheet_name(sheet_name) for cfg in self.config["sheets"]: for pat in cfg["pattern"]: if re.search(pat, cleaned, re.I): self.logger.info(f" '{sheet_name}' → 清洗后: '{cleaned}'") self.logger.info(f" 匹配成功!'{sheet_name}' → [{cfg['band']}] 配置") return cfg self.logger.info(f" '{sheet_name}' → 清洗后: '{cleaned}'") self.logger.info(f"未匹配到 '{sheet_name}' 的模式,跳过...") return None def convert_sheet_with_config(self, ws_obj, sheet_name, sheet_config): self.current_sheet_name = sheet_name header_row_idx, mode_col, rate_col = self.find_table_header_row(ws_obj) if header_row_idx is None: self.logger.info(f" 跳过 '{sheet_name}':未找到 'Mode' 和 'Rate'") return None auth_start, auth_end, auth_row = self.find_auth_power_above_row(ws_obj, header_row_idx) if auth_start is None: self.logger.info(f" 跳过 '{sheet_name}':未找到 '认证功率'") return None raw_entries = self.collect_tx_limit_data( ws_obj, sheet_config, header_row_idx, auth_row, auth_start, auth_end, mode_col, rate_col ) if not raw_entries: self.logger.info(f" 从 '{sheet_name}' 未收集到有效数据") return None compressed = self.compress_tx_limit_entries(raw_entries, sheet_config) # 🔍 统计 2.4G 信道范围(仅限 2G 表) band_key = sheet_config.get("band", "").lower() if band_key in ["2g", "2.4g"]: for entry in compressed: ch_list = entry.get("ch_list", []) if not ch_list: continue start_ch, end_ch = min(ch_list), max(ch_list) if self.global_ch_min is None or start_ch < self.global_ch_min: self.global_ch_min = start_ch if self.global_ch_max is None or end_ch > self.global_ch_max: self.global_ch_max = end_ch # 返回该 sheet 的完整信息 result = { "sheet_name": sheet_name, "band": band_key, "entries": compressed, "sheet_config": sheet_config } self.logger.info(f" 成功处理 '{sheet_name}' → {len(compressed)} 条条目 (band={band_key.upper()})") return result def render_from_template(self, template_path, context, output_path): """ 根据模板生成文件。 Args: template_path (str): 模板文件路径。 context (dict): 渲染模板所需的上下文数据。 output_path (str): 输出文件的路径。 Returns: None Raises: FileNotFoundError: 如果指定的模板文件不存在。 IOError: 如果在读取或写入文件时发生错误。 """ self.logger.info(" 正在执行 render_from_template()...") self.logger.info(" ...") template_path = resource_path(template_path) with open(template_path, 'r', encoding='utf-8') as f: template = Template(f.read()) content = template.render(**context) os.makedirs(os.path.dirname(output_path), exist_ok=True) with open(output_path, 'w', encoding='utf-8') as f: f.write(content) self.logger.info(f" 已生成: {output_path}") # clm_generator/excel_to_clm.py def generate_per_band_output(self): """ 将所有 sheet 的解析结果按频段band)合并,生成统一的 C 文件 即使多个 sheet 属于同一频段(如 NSS1/NSS2/NSS4 → 6g),也只输出一份表 """ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") output_dir = Path(self.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # --- 频段模板与 locale 映射 --- TEMPLATE_MAP = { "2g": "tx_limit_table_2_4G.c.j2", "5g": "tx_limit_table_5G.c.j2", "6g": "tx_limit_table_6G.c.j2" } locale_by_enum = { tgt["enum"]: tgt["assigned_locale"].replace('-', '_') for tgt in self.config.get("locale_targets", []) if "enum" in tgt and "assigned_locale" in tgt } locale_2g = locale_by_enum.get("locale_2g_idx", "DEFAULT_2G") locale_2g_ht = locale_by_enum.get("locale_2g_ht_idx", "DEFAULT_2G_HT") locale_5g = locale_by_enum.get("locale_5g_idx", "DEFAULT_5G") locale_5g_ht = locale_by_enum.get("locale_5g_ht_idx", "DEFAULT_5G_HT") locale_6g = locale_by_enum.get("locale_6g_idx", "DEFAULT_6G") locale_6g_ht = locale_by_enum.get("locale_6g_ht_idx", "DEFAULT_6G_HT") # --- 按频段聚合所有条目 --- entries_by_band = defaultdict(list) global_ch_min = None global_ch_max = None for result in self.per_sheet_results: band = result["band"] entries = result["entries"] for entry in entries: # 标记来源(可选) entry["_source_sheet"] = result["sheet_name"] entries_by_band[band].append(entry) # 统计 2g 全局信道范围 if band == "2g": ch_list = entry.get("ch_list", []) if ch_list: start_ch, end_ch = min(ch_list), max(ch_list) if global_ch_min is None or start_ch < global_ch_min: global_ch_min = start_ch if global_ch_max is None or end_ch > global_ch_max: global_ch_max = end_ch # --- 保存到实例变量,供后续使用 --- self.global_ch_min = global_ch_min or self.global_ch_min self.global_ch_max = global_ch_max or self.global_ch_max # --- 为每个频段生成一个文件 --- for band, all_entries in entries_by_band.items(): if not all_entries: continue normal_entries = [e for e in all_entries if "HT" not in e.get("rate_set_macro", "")] ht_entries = [e for e in all_entries if "HT" in e.get("rate_set_macro", "")] normal_struct = self.build_normal_structure(normal_entries) ht_segments = self.build_ht_structure(ht_entries) # fallback range if band == "2g" and self.global_ch_min and self.global_ch_max: fb_start, fb_end = self.global_ch_min, self.global_ch_max fallback_macro = f"RANGE_2G_20M_{fb_start}_{fb_end}" else: fb_start, fb_end = 1, 233 fallback_macro = f"RANGE_{band.upper()}_20M_1_{fb_end}" if fallback_macro not in self.channel_set_map: next_id = max(self.channel_set_map.values(), default=0) + 1 self.channel_set_map[fallback_macro] = next_id fallback_id = self.channel_set_map[fallback_macro] # 写回配置 self.save_channel_set_map_to_config() # 渲染模板 jinja_template_name = TEMPLATE_MAP.get(band, "tx_limit_table_generic.c.j2") template_path = f"templates/{jinja_template_name}" output_filename = f"tx_limit_table_{band}.c" output_path = output_dir / output_filename context = { "timestamp": timestamp, "locale_display_name": self.locale_display_name, "locale_name_2_4G": locale_2g, "locale_name_2_4G_HT": locale_2g_ht, "locale_name_5G": locale_5g, "locale_name_5G_HT": locale_5g_ht, "locale_name_6G": locale_6g, "locale_name_6G_HT": locale_6g_ht, "normal_table": normal_struct, "ht_segments": ht_segments, "fallback_range_macro": fallback_macro, "fallback_channel_set_id": fallback_id, "fallback_ch_start": fb_start, "fallback_ch_end": fb_end, "fallback_encoded_eirp": 30, "channel_set_comment": f"Auto-generated fallback for {band.upper()} band", } try: content = self.render_from_template_string(template_path, context) output_path.write_text(content, encoding='utf-8') self.logger.info(f"✅ 已生成 [{band.upper()}] 文件: {output_path}") except Exception as e: self.logger.error(f"❌ 模板渲染失败 ({template_path}): {e}") raise def build_normal_structure(self,entries): grouped = defaultdict(list) for e in entries: bw = str(e["bw"]) grouped[bw].append(e) result = [] for bw in ["20", "40", "80", "160"]: if bw in grouped: sorted_entries = sorted(grouped[bw], key=lambda x: (x["ch_start"], x["encoded_power"])) result.append((bw, sorted_entries)) return result def build_ht_structure(self,entries): groups = defaultdict(list) for e in entries: bw = str(e["bw"]) if "EXT4" in e["rate_set_macro"]: level = "ext4" elif "EXT" in e["rate_set_macro"]: level = "ext" else: level = "base" groups[(level, bw)].append(e) order = [ ("base", "20"), ("base", "40"),("base", "80"), ("base", "160"), ("base", "320"), ("ext", "20"), ("ext", "40"),("ext", "80"), ("ext", "160"),("ext", "320"), ("ext4", "20"), ("ext4", "40"),("ext4", "80"), ("ext4", "160"), ("ext4", "320"), ] segments = [] active_segment_count = sum(1 for key in order if key in groups) for idx, (level, bw) in enumerate(order): key = (level, bw) if key not in groups: continue seg_entries = sorted(groups[key], key=lambda x: (x["ch_start"], x["encoded_power"])) count = len(seg_entries) # 新逻辑:检查从 idx+1 开始,后面是否还有任何存在的 segment has_more = any( (l, b) in groups for l, b in order[idx + 1:] ) header_flags = f"CLM_DATA_FLAG_WIDTH_{bw} | CLM_DATA_FLAG_MEAS_COND" if has_more: header_flags += " | CLM_DATA_FLAG_MORE" if level != "base": header_flags += " | CLM_DATA_FLAG_FLAG2" segment = { "header_flags": header_flags, "count": count, "entries": seg_entries } if level == "ext": segment["flag2"] = "CLM_DATA_FLAG2_RATE_TYPE_EXT" elif level == "ext4": segment["flag2"] = "CLM_DATA_FLAG2_RATE_TYPE_EXT4" segments.append(segment) return segments def render_from_template_string(self, template_path, context): from jinja2 import Environment, FileSystemLoader import os # 解析模板目录 template_dir = os.path.dirname(resource_path(template_path)) loader = FileSystemLoader(template_dir) env = Environment(loader=loader) filename = os.path.basename(template_path) template = env.get_template(filename) return template.render(**context) def log_changes_to_file(self, changes, locale_id, total_entries): """将变更摘要写入日志文件""" log_dir = get_log_dir() # ✅ 统一来源 # 使用时间戳生成唯一文件名 timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S") log_path = log_dir / f"parse_{locale_id}_{timestamp_str}.log" timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") with open(log_path, 'w', encoding='utf-8') as f: # 覆盖写入最新变更 f.write(f"========================================\n") f.write(f"CLM 变更日志\n") f.write(f"========================================\n") f.write(f"时间: {timestamp}\n") f.write(f"地区码: {locale_id}\n") f.write(f"总 TX 条目数: {total_entries}\n") f.write(f"\n") if not any(changes.values()): f.write(" 本次运行无任何变更,所有文件已是最新状态。\n") else: if changes['added_ranges']: f.write(f" 新增 RANGE ({len(changes['added_ranges'])}):\n") for r in sorted(changes['added_ranges']): f.write(f" → {r}\n") f.write(f"\n") if changes['removed_ranges']: f.write(f" 删除 RANGE ({len(changes['removed_ranges'])}):\n") for r in sorted(changes['removed_ranges']): f.write(f" → {r}\n") f.write(f"\n") if changes['modified_ranges']: f.write(f" 修改 RANGE ({len(changes['modified_ranges'])}):\n") for r in sorted(changes['modified_ranges']): f.write(f" → {r}\n") f.write(f"\n") other_adds = changes['other_additions'] other_dels = changes['other_deletions'] if other_adds or other_dels: f.write(f" 其他变更:\n") for line in other_adds[:10]: f.write(f" add: {line}\n") for line in other_dels[:10]: f.write(f" del: {line}\n") if len(other_adds) > 10 or len(other_dels) > 10: f.write(f" ... 还有 {len(other_adds) + len(other_dels) - 20} 处未显示\n") f.write(f"\n") f.write(f"输出目录: {log_dir}\n") f.write(f"备份文件: {Path(self.target_c_file).with_suffix('.c.bak')}\n") f.write(f"========================================\n") self.logger.info(f" 已保存变更日志 → {log_path}") def save_channel_set_map_to_config(self): """精准更新 config.json 中的 channel_set_map 和相关字段,不改变文件整体结构""" try: # 1. 从磁盘读取完整配置 with open(self.config_file_path, 'r', encoding='utf-8') as f: config = json.load(f) # 2. 更新你需要的字段(相当于“直接赋值”) valid_keys = [k for k in self.channel_set_map.keys() if re.match(r'RANGE_[\dA-Z]+_\d+M_\d+_\d+', k)] config["channel_set_map"] = {k: v for k, v in self.channel_set_map.items() if k in valid_keys} config["used_ranges_by_band"] = { band: sorted(v) for band, v in self.used_ranges_by_band.items() } config["used_ranges_count_per_band"] = { band: len(v) for band, v in self.used_ranges_by_band.items() } # 3. 写回文件 with open(self.config_file_path, 'w', encoding='utf-8') as f: json.dump(config, f, indent=4, ensure_ascii=False) self.logger.info("✅ 成功更新配置文件(仅修改目标字段)") self.logger.info(f" 使用宏数量: {len(self.all_used_ranges)}") except Exception as e: self.logger.error(f"❌ 写入失败: {e}") raise # 主要内容执行入口函数 def convert(self, file_path): self.logger.info(" 正在执行 convert()...") self.logger.info(" ...") c_source = Path(self.target_c_file) if not c_source.exists(): raise FileNotFoundError(f"目标 C 文件不存在: {c_source}") ext = os.path.splitext(file_path)[-1].lower() if ext == ".xlsx": wb = load_workbook(file_path, data_only=True) sheets = [{"sheet": ws, "format": "xlsx"} for ws in wb.worksheets] elif ext == ".xls": wb = xlrd.open_workbook(file_path) sheets = [{"sheet": ws, "format": "xls"} for ws in wb.sheets()] else: raise ValueError("仅支持 .xls 或 .xlsx 文件") # 清空上一次的结果 self.per_sheet_results.clear() for i, ws_obj in enumerate(sheets): sheet_name = wb.sheet_names()[i] if ext == ".xls" else ws_obj["sheet"].title config = self.match_sheet_to_config(sheet_name) if config: result = self.convert_sheet_with_config(ws_obj, sheet_name, config) if result: self.per_sheet_results.append(result) else: self.logger.info(f"跳过未匹配的 sheet: {sheet_name}") self.generate_per_band_output() # ✅ 添加这一句:标记解析已完成 self.last_config = { "success": True, "timestamp": datetime.now().isoformat(), "per_sheet_results": self.per_sheet_results, "used_ranges_by_band": self.used_ranges_by_band, "locale_display_name": self.locale_display_name, "output_dir": str(get_output_dir()), # 如果你有这个函数 } if __name__ == "__main__": # === 1. 确保目录存在 === ensure_directories() # === 动态生成日志路径 === LOG_DIR = get_log_dir() timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") LOG_FILE = LOG_DIR / f"excel_to_clm_{timestamp}.log" # === 2. 配置日志系统 === logging.basicConfig( level=logging.INFO, format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', handlers=[ logging.FileHandler(LOG_FILE, encoding='utf-8'), logging.StreamHandler(sys.stdout) ], force=True ) # === 3. 获取 logger 并打欢迎日志 === logger = logging.getLogger(__name__) logger.info("=" * 60) logger.info("🔧 CLM 转换工具 启动") logger.info(f"📄 日志路径: {LOG_FILE}") logger.info(f"📦 当前工作目录: {os.getcwd()}") logger.info("=" * 60) # === 4. 准备参数并运行 === config_path = "config/config.json" display_name = None input_file = "input/Archer BE900US 2.xlsx" try: converter = ExcelToCLMConverter( config_path=config_path, locale_display_name=display_name ) # 显式绑定 logger(可选) converter.logger = logging.getLogger("converter") logger.info(f"🔍 开始处理文件: {input_file}") converter.convert(input_file) logger.info("✅ 处理完成!") except Exception as e: logger.error(f"❌ 程序异常终止: {e}", exc_info=True) raise
最新发布
11-10
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值