# power/power_sync.py
import json
import os
import re
import logging
import sys
from pathlib import Path
from shutil import copy2
from datetime import datetime
from utils import resource_path
from typing import Dict, List, Tuple, Any
# -------------------------------
# 日志配置
# -------------------------------
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
LOG_DIR = PROJECT_ROOT / "output" / "log"
LOG_DIR.mkdir(parents=True, exist_ok=True)
LOG_FILE = LOG_DIR / f"power_sync_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
class PowerTableSynchronizer:
def __init__(self, c_file_path=None, dry_run=False, config_path="config/config.json"):
self.logger = logging.getLogger(__name__)
# === Step 1: 使用 resource_path 解析所有路径 ===
self.config_file_path = resource_path(config_path)
self.logger.info(f"配置文件: {self.config_file_path}")
if not os.path.exists(self.config_file_path):
raise FileNotFoundError(f"配置文件不存在: {self.config_file_path}")
try:
with open(self.config_file_path, 'r', encoding='utf-8') as f:
self.config = json.load(f)
self.logger.info(f"配置文件已加载: {self.config_file_path}")
except json.JSONDecodeError as e:
raise ValueError(f"配置文件格式错误,JSON 解析失败: {self.config_file_path}") from e
except Exception as e:
raise RuntimeError(f"读取配置文件时发生未知错误: {e}") from e
self.dry_run = dry_run
# === Step 2: 目标 C 文件处理 ===
if c_file_path is None:
if "target_c_file" not in self.config:
raise KeyError("config 文件缺少 'target_c_file' 字段")
internal_c_path = self.config["target_c_file"]
logging.info(f"使用内置 C 文件: {internal_c_path}")
self.c_file_path =resource_path(internal_c_path)
self._is_internal_c_file = True
else:
self.c_file_path = Path(c_file_path)
self._is_internal_c_file = False
if not self.c_file_path.exists():
raise FileNotFoundError(f"找不到 C 源文件: {self.c_file_path}")
# === Step 3: 初始化数据容器 ===
self.locale_enums = {} # enum_name -> {"macros": [macro], "values": {macro: idx}}
self.power_tables = {} # table_name -> [lines]
self.table_pending_appends = {} # table_name -> List[str]
# === Step 4: 加载锚点标记 ===
for marker_key in ["STR_POWER_LOCALE_ENUM", "END_POWER_LOCALE_ENUM",
"STR_POWER_TABLE", "END_POWER_TABLE"]:
if marker_key not in self.config:
raise KeyError(f"config 文件缺少 '{marker_key}' 字段")
self.start_enum_marker = self.config["STR_POWER_LOCALE_ENUM"]
self.end_enum_marker = self.config["END_POWER_LOCALE_ENUM"]
self.start_table_marker = self.config["STR_POWER_TABLE"]
self.end_table_marker = self.config["END_POWER_TABLE"]
# === Step 5: 功率表文件 ===
gen_file = PROJECT_ROOT / "output" / "tx_limit_table.c"
if not gen_file.exists():
self.logger.error(f" 找不到生成文件: {gen_file}")
raise FileNotFoundError(f"请先运行 excel_to_clm.py 生成 tx_limit_table.c: {gen_file}")
try:
self.power = gen_file.read_text(encoding='utf-8')
except Exception as e:
self.logger.error(f" 读取 {gen_file} 失败: {e}")
raise
# 加载 locale_targets 配置
if "locale_targets" not in self.config:
raise KeyError("config 文件缺少 'locale_targets' 字段")
required_keys = {"enum", "table", "suffix"}
for i, item in enumerate(self.config["locale_targets"]):
if not isinstance(item, dict) or not required_keys.issubset(item.keys()):
raise ValueError(f"locale_targets[{i}] 缺少必要字段 {required_keys}: {item}")
self.locale_targets = self.config["locale_targets"]
self.logger.info(f"已加载 {len(self.locale_targets)} 个 Locale 映射目标")
def offset_to_lineno(self, content: str, offset: int) -> int:
"""将字符偏移量转换为行号(从1开始)"""
return content.count('\n', 0, offset) + 1
def _extract_brace_content(self, content: str, start_brace_pos: int) -> tuple[str | None, int]:
depth = 0
i = start_brace_pos
while i < len(content):
c = content[i]
if c == '{':
depth += 1
elif c == '}':
depth -= 1
if depth == 0:
inner = content[start_brace_pos + 1:i].strip()
return inner, i + 1 # 返回内部内容 和 '}' 后的下一个索引
i += 1
return None, -1
def parse_c_power_definitions(self):
"""解析 C 源文件中的 enum locale_xxx_idx 和 static const unsigned char locales_xxx[]"""
self.logger.info("解析 C 文件中的功率表定义...")
self.logger.info("...")
content = self.c_file_path.read_text(encoding='utf-8')
# --- 解析 ENUM 区域 ---
try:
enum_start_idx = content.find(self.start_enum_marker)
enum_end_idx = content.find(self.end_enum_marker)
if enum_start_idx == -1 or enum_end_idx == -1:
raise ValueError("未找到 LOCALE ENUM 标记块")
enum_block = content[enum_start_idx:enum_end_idx]
start_line = self.offset_to_lineno(content, enum_start_idx)
end_line = self.offset_to_lineno(content, enum_end_idx)
self.logger.info(f"找到 ENUM 标记范围:第 {start_line} 行 → 第 {end_line} 行")
enum_pattern = re.compile(
r'(enum\s+locale_[a-zA-Z0-9_]+(?:_[a-zA-Z0-9_]+)*_idx\s*\{)([^}]*)\}\s*;',
re.DOTALL | re.IGNORECASE
)
for match in enum_pattern.finditer(enum_block):
enum_decl = match.group(0)
self.logger.debug(f" 解析枚举声明: {enum_decl}")
enum_name_match = re.search(r'locale_[\w\d_]+_idx', enum_decl, re.IGNORECASE)
if not enum_name_match:
continue
enum_name = enum_name_match.group(0)
body = match.group(2)
# 在 parse_c_power_definitions() 中
body_no_comment = re.sub(r'//.*|/\*.*?\*/', '', body, flags=re.DOTALL)
# 只提取 = 数字 的宏
valid_assignments = re.findall(
r'(LOCALE_[A-Za-z0-9_]+)\s*=\s*(-?\b\d+\b)',
body_no_comment
)
macro_list = [m[0] for m in valid_assignments]
value_map = {m: int(v) for m, v in valid_assignments}
self.locale_enums[enum_name] = {
"macros": macro_list,
"values": value_map,
"raw_body": body
}
self.logger.info(f" 解析枚举 {enum_name}: {len(macro_list)} 个宏")
except Exception as e:
self.logger.error(f"解析 ENUM 失败: {e}", exc_info=True)
# --- 解析 TABLE 区域 ---
try:
table_start_idx = content.find(self.start_table_marker)
table_end_idx = content.find(self.end_table_marker)
if table_start_idx == -1 or table_end_idx == -1:
raise ValueError("未找到 POWER TABLE 标记块")
table_block = content[table_start_idx:table_end_idx]
start_line = self.offset_to_lineno(content, table_start_idx)
end_line = self.offset_to_lineno(content, table_end_idx)
self.logger.info(f"找到 TABLE 标记范围:第 {start_line} 行 → 第 {end_line} 行")
# === 增强解析 TABLE:按 /* Locale X */ 分块提取 ===
array_matches = list(re.finditer(
r'''
^ # 行首(配合 MULTILINE)
\s* # 可选前导空白
(?:static\s+)? # 可选 static
(?:const\s+)? # 可选 const
(?:PROGMEM\s+)? # 可选 PROGMEM(常见于嵌入式)
(?:unsigned\s+char|uint8_t) # 支持两种类型
\s+ # 类型与变量名之间至少一个空白
([a-zA-Z_]\w*) # 数组名(如 locales_2g_ht)
\s*\[\s*\] # 匹配 [ ],允许空格
''',
table_block,
re.VERBOSE | re.MULTILINE | re.IGNORECASE
))
if not array_matches:
self.logger.warning("未在 TABLE 区域找到任何 power table 数组定义")
# === 新增调试信息 ===
sample = table_block[:1000]
self.logger.debug(f"TABLE block 前 1000 字符内容:\n{sample}")
else:
for match in array_matches:
table_name = match.group(1)
self.logger.info(
f" 找到数组定义: {table_name} @ 第 {self.offset_to_lineno(table_block, match.start())} 行")
self.logger.debug(f" 正则匹配到数组名: '{table_name}' (原始匹配: {match.group(0)})")
self.logger.debug(f" match.end() = {match.end()}, "
f"后续字符 = '{table_block[match.end():match.end() + 20].replace(chr(10), '\\n')}'")
# 查找 '{' 的位置
brace_start = table_block.find('{', match.end())
if brace_start == -1:
self.logger.warning(f" 未找到 起始符 → 跳过数组 {table_name}")
continue
else:
self.logger.debug(
f" 找到 '{{' 位置: 偏移量 {brace_start}, 行号 {self.offset_to_lineno(table_block, brace_start)}")
# 提取大括号内的内容
inner_content, end_pos = self._extract_brace_content(table_block, brace_start)
if inner_content is None:
self.logger.warning(f" 提取 {table_name} 的大括号内容失败 → inner_content 为 None")
continue
else:
self.logger.info(f" 成功提取 {table_name} 的大括号内容,长度: {len(inner_content)} 字符")
# self.logger.info(f"--- 开始 ---")
# self.logger.info(f"{inner_content}")
# self.logger.info(f"--- 结束 ---")
# 按行分割
lines = inner_content.splitlines()
self.logger.info(f" {table_name} 共提取 {len(lines)} 行数据")
# 可选:打印前几行预览(避免日志爆炸)
preview_lines = min(10, len(lines))
for i in range(preview_lines):
self.logger.debug(f"[{i:2d}] {lines[i]}")
if len(lines) > 10:
self.logger.debug("... 还有更多行")
# 逐行解析 body_content,按 /* Locale X */ 分块
entries = [] # 存储每一块: {'locale_tag': 'a_359', 'lines': [...]}
current_block = []
current_locale = None
for line_num, line in enumerate(lines):
stripped = line.strip()
self.logger.debug(f"[Line {line_num:3d}] |{line}|") # 原始行(含空白)
self.logger.debug(f" → stripped: |{stripped}|")
# 检查是否是新的 Locale 注释
comment_match = re.match(r'/\*\s*Locale\s+([A-Za-z0-9_-]+)\s*\([^)]+\)\s*\*/', stripped,
re.IGNORECASE)
if comment_match:
# 保存上一个 block
if current_locale and current_block:
entries.append({
'locale_tag': current_locale,
'lines': [ln.rstrip(',').rstrip() for ln in current_block]
})
# self.logger.info(
# f" 保存前一个 Locale 数据块: {current_locale.} ({len(current_block)} 行)")
# 开始新 block
raw_name = comment_match.group(1) # 如 A-359
normalized = raw_name.replace('-', '_') # → A_359
current_locale = normalized
current_block = []
#self.logger.info(f" 发现新 Locale 注释: '{raw_name}' → 标准化为 '{normalized}'")
continue
# 忽略空行、纯注释行
clean_line = re.sub(r'/\*.*?\*/|//.*', '', stripped).strip()
if clean_line:
current_block.append(stripped)
self.logger.debug(f" 添加有效行: {stripped}")
else:
if not stripped:
self.logger.debug(" 忽略空行")
elif '//' in stripped or ('/*' in stripped and '*/' in stripped):
self.logger.debug(f" 忽略纯注释行: {stripped}")
else:
self.logger.warning(f" 可疑但未处理的行: {stripped}") # 可能是跨行注释开头
# 保存最后一个 block
if current_locale and current_block:
entries.append({
'locale_tag': current_locale,
'lines': [ln.rstrip(',').rstrip() for ln in current_block]
})
self.power_tables[table_name] = entries
self.logger.info(f" 解析数组 {table_name}: {len(entries)} 个 Locale 数据块")
except Exception as e:
self.logger.error(f"解析 TABLE 失败: {e}", exc_info=True)
def validate_and_repair(self):
self.logger.info("对原始数据块进行验证和修复...")
self.logger.info("...")
modified = False
changes = []
# 提取所有 Locale 原始数据块(已由 extract_all_raw_locale_data 返回原始行)
tx_power_data = self.extract_all_raw_locale_data()
for target in self.locale_targets:
enum_name = target["enum"]
table_name = target["table"]
suffix = target["suffix"]
# 关键字段检查
if "assigned_locale" not in target:
raise KeyError(f"locale_targets 缺少 'assigned_locale': {target}")
locale = target["assigned_locale"]
macro_name = f"LOCALE_{suffix}_IDX_{locale.replace('-', '_')}"
# 检查是否能在源文件中找到该 Locale 数据
if locale not in tx_power_data:
self.logger.warning(f" 在 tx_limit_table.c 中找不到 Locale 数据: {locale}")
continue
# 获取原始行列表(含缩进、注释、逗号)
data_lines = tx_power_data[locale] # ← 这些是原始字符串行
# --- 处理 ENUM ---
if enum_name not in self.locale_enums:
self.logger.warning(f"未找到枚举定义: {enum_name}")
continue
enum_data = self.locale_enums[enum_name]
macros = enum_data["macros"]
values = enum_data["values"]
next_idx = self._get_next_enum_index(enum_name)
if macro_name not in macros:
macros.append(macro_name)
values[macro_name] = next_idx
changes.append(f"ENUM + {macro_name} = {next_idx}")
modified = True
if "pending_updates" not in enum_data:
enum_data["pending_updates"] = []
enum_data["pending_updates"].append((macro_name, next_idx))
# --- 处理 TABLE ---
if table_name not in self.power_tables:
self.logger.warning(f"未找到 power table 数组: {table_name}")
continue
self.logger.info(f"找到 power table 数组: {table_name}")
current_entries = self.power_tables[table_name] # 已加载的条目列表
# 归一化目标 locale 名称用于比较
target_locale_normalized = locale.replace('-', '_')
self.logger.debug(f" 目标 Locale 名称: {locale} → 标准化为 {target_locale_normalized}")
# 检查是否已存在(仅比对 locale_tag)
self.logger.debug(f"当前 {table_name} 中已有的 locale_tags: {[e['locale_tag'] for e in current_entries]}")
already_exists = any(
entry['locale_tag'] == target_locale_normalized
for entry in current_entries
)
if already_exists:
self.logger.warning(f"Locale '{locale}' 已存在于 {table_name},跳过")
continue
# 直接记录原始行,不再清洗!
current_entries.append({
'locale_tag': target_locale_normalized,
'lines': data_lines # 原样保存原始行(用于后续显示或校验)
})
changes.append(f"TABLE + {len(data_lines)} 行 → {table_name}")
modified = True
# 记录待写入的数据块(包含原始带格式内容)
if table_name not in self.table_pending_appends:
self.table_pending_appends[table_name] = []
self.table_pending_appends[table_name].append({
'locale_tag': locale, # 原始名称
'data_lines': data_lines # 完整原始行(含缩进、注释、逗号)
})
if changes:
self.logger.info(f"共需添加 {len(changes)} 项:\n" + "\n".join(f" → {ch}" for ch in changes))
return modified
def _get_next_enum_index(self, enum_name):
"""基于已解析的 values 获取下一个可用索引"""
if enum_name not in self.locale_enums:
self.logger.warning(f"未找到枚举定义: {enum_name}")
return 0
value_map = self.locale_enums[enum_name]["values"] # 直接使用已解析的数据
if not value_map:
return 0
# 只考虑非负数(排除 CLM_LOC_NONE=-1, CLM_LOC_SAME=-2 等保留值)
used_indices = [v for v in value_map.values() if v >= 0]
if used_indices:
next_idx = max(used_indices) + 1
else:
next_idx = 0 # 没有有效数值时从 0 开始
return next_idx
def extract_all_raw_locale_data(self) -> Dict[str, List[str]]:
"""
从 output/tx_limit_table.c 中提取所有 /* Locale XXX */ 后面的数据块(直到下一个 Locale 或 EOF)
使用逐行解析,保留原始格式(含缩进、注释、逗号),不进行任何清洗
"""
lines = self.power.splitlines()
locale_data = {}
current_locale = None
current_block = []
for i, line in enumerate(lines):
stripped = line.strip()
# 检查是否是新的 Locale 标记
match = re.match(r'/\*\s*Locale\s+([A-Za-z0-9_]+)\s*\*/', stripped, re.IGNORECASE)
if match:
# 保存上一个 block(直接保存原始行,不清洗)
if current_locale:
locale_data[current_locale] = current_block
self.logger.debug(f" 已提取 Locale {current_locale},共 {len(current_block)} 行")
# 开始新 block
current_locale = match.group(1)
current_block = []
self.logger.debug(f" 发现 Locale: {current_locale}")
continue
# 收集当前 locale 的内容(原样保留)
if current_locale is not None:
current_block.append(line.rstrip('\r\n')) # 仅去除换行符,其他不变
# 处理最后一个 block
if current_locale:
locale_data[current_locale] = current_block
self.logger.debug(f" 已提取最后 Locale {current_locale},共 {len(current_block)} 行")
self.logger.info(f" 成功提取 {len(locale_data)} 个 Locale 数据块: {list(locale_data.keys())}")
return locale_data
def _write_back_in_blocks(self):
"""将修改后的 enum 和 table 块写回原 C 文件,基于锚点 block 精准更新"""
self.logger.info("正在写回修改后的数据...")
if self.dry_run:
self.logger.info("DRY-RUN: 跳过写入文件")
return
try:
content = self.c_file_path.read_text(encoding='utf-8')
# === Step 1: 查找所有锚点位置 ===
enum_start = content.find(self.start_enum_marker)
enum_end = content.find(self.end_enum_marker)
table_start = content.find(self.start_table_marker)
table_end = content.find(self.end_table_marker)
if -1 in (enum_start, enum_end, table_start, table_end):
missing = []
if enum_start == -1: missing.append(f"起始 ENUM: {self.start_enum_marker}")
if enum_end == -1: missing.append(f"结束 ENUM: {self.end_enum_marker}")
if table_start == -1: missing.append(f"起始 TABLE: {self.start_table_marker}")
if table_end == -1: missing.append(f"结束 TABLE: {self.end_table_marker}")
raise ValueError(f"未找到锚点标记: {missing}")
enum_block = content[enum_start:enum_end]
table_block = content[table_start:table_end]
self.logger.info(f" 修改枚举范围: 第 {self.offset_to_lineno(content, enum_start)} 行 → "
f"{self.offset_to_lineno(content, enum_end)} 行")
self.logger.info(f" 修改数组范围: 第 {self.offset_to_lineno(content, table_start)} 行 → "
f"{self.offset_to_lineno(content, table_end)} 行")
replacements = [] # (start, end, replacement)
def remove_comments(text):
text = re.sub(r'//.*$', '', text, flags=re.MULTILINE)
text = re.sub(r'/\*.*?\*/', '', text, flags=re.DOTALL)
return text.strip()
# === Step 3: 更新 ENUMs ===
for target in self.locale_targets:
enum_name_key = target["enum"]
enum_data = self.locale_enums.get(enum_name_key)
if not enum_data or "pending_updates" not in enum_data:
continue
insertions = enum_data["pending_updates"]
if not insertions:
continue
pattern = re.compile(
rf'(enum\s+{re.escape(enum_name_key)}\s*\{{)([^}}]*)\}}\s*;',
re.DOTALL | re.IGNORECASE
)
match = pattern.search(enum_block)
if not match:
self.logger.warning(f"未找到枚举: {enum_name_key}")
continue
header_part = match.group(1)
body_content = match.group(2)
lines = [ln for ln in body_content.split('\n') if ln.strip()]
last_line = lines[-1] if lines else ""
indent_match = re.match(r'^(\s*)', last_line)
line_indent = indent_match.group(1) if indent_match else " "
expanded_last = last_line.expandtabs(4)
clean_last = remove_comments(last_line)
first_macro_match = re.search(r'LOCALE_[A-Z0-9_]+', clean_last)
default_indent_len = len(line_indent.replace('\t', ' '))
target_macro_col = default_indent_len
if first_macro_match:
raw_before = last_line[:first_macro_match.start()]
expanded_before = raw_before.expandtabs(4)
target_macro_col = len(expanded_before)
eq_match = re.search(r'=\s*\d+', clean_last)
if eq_match and first_macro_match:
eq_abs_start = first_macro_match.start() + eq_match.start()
raw_eq_part = last_line[:eq_abs_start]
expanded_eq_part = raw_eq_part.expandtabs(4)
target_eq_col = len(expanded_eq_part)
else:
target_eq_col = target_macro_col + 30
new_body = body_content.rstrip()
if not new_body.endswith(','):
new_body += ','
for macro_name, next_idx in insertions:
current_visual_len = len(macro_name.replace('\t', ' '))
padding_to_eq = max(1, target_eq_col - target_macro_col - current_visual_len)
formatted_macro = f"{macro_name}{' ' * padding_to_eq}= {next_idx}"
visible_macros = len(re.findall(r'LOCALE_[A-Z0-9_]+', clean_last))
MAX_PER_LINE = 4
if visible_macros < MAX_PER_LINE and last_line.strip():
insertion = f" {formatted_macro},"
updated_last = last_line.rstrip() + insertion
new_body = body_content.rsplit(last_line, 1)[0] + updated_last
last_line = updated_last
clean_last = remove_comments(last_line)
else:
raw_indent_len = len(line_indent.replace('\t', ' '))
leading_spaces = max(0, target_macro_col - raw_indent_len)
prefix_padding = ' ' * leading_spaces
new_line = f"\n{line_indent}{prefix_padding}{formatted_macro},"
new_body += new_line
last_line = new_line.strip()
clean_last = remove_comments(last_line)
new_enum = f"{header_part}{new_body}\n}};"
full_start = enum_start + match.start()
full_end = enum_start + match.end()
replacements.append((full_start, full_end, new_enum))
self.logger.debug(f"插入 ENUM: {dict(insertions)}")
enum_data.pop("pending_updates", None)
# === Step 4: 更新 TABLEs —— 使用 pending_appends 中的数据 ===
seen = set()
table_names = []
for target in self.locale_targets:
name = target["table"]
if name not in seen:
table_names.append(name)
seen.add(name)
for table_name in table_names:
if table_name not in self.power_tables:
self.logger.debug(f"跳过未定义的表: {table_name}")
continue
if table_name not in self.table_pending_appends:
self.logger.debug(f"无待插入数据: {table_name}")
continue
data_to_insert = self.table_pending_appends[table_name]
if not data_to_insert:
continue
pattern = re.compile(
rf'(\b{re.escape(table_name)}\s*\[\s*\]\s*=\s*\{{)(.*?)(\}}\s*;\s*)',
re.DOTALL | re.IGNORECASE
)
match = pattern.search(table_block)
if not match:
self.logger.warning(f"未找到数组定义: {table_name}")
continue
header_part = match.group(1)
body_content = match.group(2)
footer_part = match.group(3)
lines = [ln for ln in body_content.split('\n') if ln.strip()]
last_line = lines[-1] if lines else ""
indent_match = re.match(r'^(\s*)', last_line)
line_indent = indent_match.group(1) if indent_match else " "
new_body = body_content.rstrip()
# ==== 遍历每个待插入的 locale 数据块 ====
for item in data_to_insert:
locale_tag = item['locale_tag']
locale_display = locale_tag.replace('_', '-')
macro_suffix = locale_tag
# 添加注释标记(与原始风格一致)
new_body += f"\n{line_indent}/* Locale {locale_display} ({macro_suffix}) */"
# 原始行加空格,不 strip,不加额外 indent
for raw_line in item['data_lines']:
# 仅排除纯空白行(可选),保留所有格式
if raw_line.strip(): # 排除空行
# 使用原始缩进,不再加 {line_indent}
new_body += f"\n{line_indent}{raw_line}"
# 构造新 table 内容
full_start = table_start + match.start()
full_end = table_start + match.end()
new_table = f"{header_part}{new_body}\n{footer_part}"
replacements.append((full_start, full_end, new_table))
self.logger.debug(f"插入{len(data_to_insert)} 个 Locale 数据块到 {table_name}")
# 清除防止重复写入
self.table_pending_appends.pop(table_name, None)
# === Step 5: 应用所有替换(倒序避免偏移错乱)===
if not replacements:
self.logger.info("无任何修改需要写入")
return
replacements.sort(key=lambda x: x[0], reverse=True) # 倒序应用
final_content = content
for start, end, r in replacements:
#self.logger.info(f"增加 [{start}:{end}] → 新内容:\n{r[:150]}...")
final_content = final_content[:start] + r + final_content[end:]
if content == final_content:
self.logger.info("文件内容未发生变化,无需写入")
return
# 备份原文件
backup_path = self.c_file_path.with_suffix('.c.bak')
copy2(self.c_file_path, backup_path)
self.logger.info(f"已备份 → {backup_path}")
# 写入新内容
self.c_file_path.write_text(final_content, encoding='utf-8')
self.logger.info(f"成功写回 C 文件: {self.c_file_path}")
self.logger.info(f"共更新 {len(replacements)} 个区块")
except Exception as e:
self.logger.error(f"写回文件失败: {e}", exc_info=True)
raise
def run(self):
self.logger.info("开始同步 POWER LOCALE 定义...")
try:
self.parse_c_power_definitions()
was_modified = self.validate_and_repair()
if was_modified:
if self.dry_run:
self.logger.info("预览模式:检测到变更,但不会写入文件")
else:
self._write_back_in_blocks() # 执行写入操作
self.logger.info("同步完成:已成功更新 C 文件")
else:
self.logger.info("所有 Locale 已存在,无需修改")
return was_modified
except Exception as e:
self.logger.error(f"同步失败: {e}", exc_info=True)
raise
def main():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
handlers=[
logging.FileHandler(LOG_FILE, encoding='utf-8'),
logging.StreamHandler(sys.stdout)
],
force=True
)
logger = logging.getLogger(__name__)
# 固定配置
c_file_path = "input/wlc_clm_data_6726b0.c"
dry_run = False
log_level = "INFO"
config_path = "config/config.json"
logging.getLogger().setLevel(log_level)
print(f"开始同步 POWER LOCALE 定义...")
print(f"C 源文件: {c_file_path}")
if dry_run:
print("启用 dry-run 模式:仅预览变更,不修改文件")
try:
sync = PowerTableSynchronizer(
c_file_path=None,
dry_run=dry_run,
config_path=config_path,
)
sync.run()
print("同步完成!")
print(f"详细日志已保存至: {LOG_FILE}")
except FileNotFoundError as e:
logger.error(f"文件未找到: {e}")
print("请检查文件路径是否正确。")
sys.exit(1)
except PermissionError as e:
logger.error(f"权限错误: {e}")
print("无法读取或写入文件,请检查权限。")
sys.exit(1)
except Exception as e:
logger.error(f"程序异常退出: {e}", exc_info=True)
sys.exit(1)
if __name__ == '__main__':
main()
# rate_set/rate_sync.py
import json
import os
import re
import logging
import sys
from pathlib import Path
from utils import resource_path
from datetime import datetime
from typing import Dict, List, Tuple, Any
# -------------------------------
# 日志配置
# -------------------------------
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
LOG_DIR = PROJECT_ROOT / "output" / "log"
LOG_DIR.mkdir(parents=True, exist_ok=True)
LOG_FILE = LOG_DIR / f"rate_sync_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
class RateSetSynchronizer:
MAX_ENUM_PER_LINE = 4 # enum 每行最多几个宏
MAX_DATA_ITEMS_PER_LINE = 4 # data 数组每行最多几个值
MAX_INDEX_ITEMS_PER_LINE = 15 # index 数组每行最多几个值
def __init__(self, c_file_path=None, dry_run=False, config_path="config/config.json"):
self.logger = logging.getLogger(f"{__name__}.RateSetSynchronizer")
# 加载配置
self.config_file_path = resource_path(config_path)
if not os.path.exists(self.config_file_path):
raise FileNotFoundError(f"配置文件不存在: {self.config_file_path}")
with open(self.config_file_path, 'r', encoding='utf-8') as f:
self.config = json.load(f)
self.dry_run = dry_run
# C 文件路径
if c_file_path is None:
internal_c_path = self.config["target_c_file"]
self.c_file_path = resource_path(internal_c_path)
else:
self.c_file_path = Path(c_file_path)
if not self.c_file_path.exists():
raise FileNotFoundError(f"找不到 C 源文件: {self.c_file_path}")
# === 单一锚点标记 ===
self.block_start = self.config["STR_RATE_SET_DATA"]
self.block_end = self.config["END_RATE_SET_DATA"]
# 数组与枚举名
self.data_array_name = "rate_sets_2g_20m"
self.index_array_name = "rate_sets_index_2g_20m"
self.enum_name = "rate_set_2g_20m"
# 扫描所有 .c 文件(排除自身)
self.rate_set_dir = Path(__file__).parent
self.rate_files = [
f for f in self.rate_set_dir.iterdir()
if f.is_file() and f.suffix == ".c" and f.name != "rate_sync.py"
]
# 加载文件名和结构映射
self.target_map = self.config.get("target_map")
if not isinstance(self.target_map, dict):
raise ValueError("config.json 中缺少 'target_map' 字段或格式错误")
self._validate_target_map() # ← 添加一致性校验
def _validate_target_map(self):
"""验证 target_map 是否一致,防止多个 full_key 映射到同一数组"""
seen_data = {}
seen_index = {}
seen_enum = {}
for key, cfg in self.target_map.items():
d = cfg["data"]
i = cfg["index"]
e = cfg["enum"]
if d in seen_data:
raise ValueError(f"data 数组冲突: '{d}' 被 '{seen_data[d]}' 和 '{key}' 同时使用")
if i in seen_index:
raise ValueError(f"index 数组冲突: '{i}' 被 '{seen_index[i]}' 和 '{key}' 同时使用")
if e in seen_enum:
raise ValueError(f"enum 名称冲突: '{e}' 被 '{seen_enum[e]}' 和 '{key}' 同时使用")
seen_data[d] = key
seen_index[i] = key
seen_enum[e] = key
def parse_filename(self, filename: str) -> str:
"""
从文件名提取 band_bw_ext 类型键,用于查找 target_map
示例:
2G_20M_rate_set.c → 2G_20M_BASE
2G_20M_EXT_rate_set.c → 2G_20M_EXT
5G_80M_EXT4_rate_set.c → 5G_80M_EXT4
"""
match = re.match(r'^([A-Z0-9]+)_([0-9]+M)(?:_(EXT\d*))?_rate_set\.c$', filename, re.I)
if not match:
raise ValueError(f"无法识别的文件名格式: {filename}")
band, bw, ext = match.groups()
ext_type = ext.upper() if ext else "BASE"
return f"{band.upper()}_{bw.upper()}_{ext_type}"
def extract_sub_rate_sets(self, content: str) -> List[Dict[str, Any]]:
"""
提取 /*NAME*/ N, 后续多行 WL_RATE_xxx 列表
支持跨行、缩进、逗号、空行、注释干扰等
使用“按行扫描 + 状态机”方式,避免正则越界
"""
self.logger.info("开始提取速率集...")
self.logger.info("...")
sub_sets = []
lines = [line.rstrip() for line in content.splitlines()]
i = 0
# 匹配 /*NAME*/ N, 的开头
header_pattern = re.compile(r'/\*\s*([A-Za-z0-9_]+)\s*\*/\s*(\d+)\s*,?')
while i < len(lines):
line = lines[i]
stripped = line.strip()
# 跳过空行和纯注释
if not stripped or stripped.startswith("//"):
i += 1
continue
# 查找头: /*NAME*/ N,
match = header_pattern.search(stripped)
if not match:
i += 1
continue
name = match.group(1)
try:
count = int(match.group(2))
except ValueError:
self.logger.warning(f"⚠️ 计数无效,跳过: {name} = '{match.group(2)}'")
i += 1
continue
self.logger.info(f"🔍 发现块: {name}, 预期数量={count}")
# 开始收集 body 内容
body_lines = []
j = i + 1
items_collected = 0
max_lines_to_read = 200 # 防止无限读
while j < len(lines) and len(body_lines) < max_lines_to_read:
ln = lines[j].strip()
# 终止条件:遇到新 block / 结构结束 / 空行太多
if ln.startswith("/*") or \
ln.startswith("}") or \
ln.startswith("enum") or \
(not ln and len(body_lines) > 0 and items_collected >= count):
break
if ln and not ln.startswith("//"):
body_lines.append(lines[j]) # 保留原始缩进
j += 1
# 拼接 body 提取所有 WL_RATE_XXX
body_text = "\n".join(body_lines)
all_macros = re.findall(r'WL_RATE_[A-Za-z0-9_]+', body_text)
rate_items = all_macros[:count] # 截断到声明数量
if len(rate_items) < count:
self.logger.warning(f"[{name}] 条目不足: 需要 {count}, 实际 {len(rate_items)}")
# 构建结果
raw_block = "\n".join([line] + body_lines)
sub_sets.append({
"name": name,
"count": count,
"rates": rate_items,
"raw_block": raw_block,
"start_line": i,
"end_line": j - 1
})
self.logger.debug(f" 提取成功: {name} → {len(rate_items)} 个速率")
i = j # 跳到下一个 block
self.logger.info(f" 共提取 {len(sub_sets)} 个有效子集")
return sub_sets
def parse_all_structures(self, full_content: str) -> Dict:
"""
直接从完整 C 文件中解析 enum/data/index 结构
"""
self.logger.info("开始解析所有结构...")
self.logger.info("...")
result = {
'existing_enum': {},
'data_entries': [],
'index_values': [],
'data_len': 0
}
# === 解析 enum ===
enum_pattern = rf'enum\s+{re.escape(self.enum_name)}\s*\{{([^}}]+)\}};'
enum_match = re.search(enum_pattern, full_content, re.DOTALL)
if enum_match:
body = enum_match.group(1)
entries = re.findall(r'(RATE_SET_[^=,\s]+)\s*=\s*(\d+)', body)
result['existing_enum'] = {k: int(v) for k, v in entries}
self.logger.info(f"解析出 {len(entries)} 个已有枚举项")
else:
self.logger.warning(f"未找到 enum 定义: {self.enum_name}")
# === 解析 data 数组 ===
data_pattern = rf'static const unsigned char {re.escape(self.data_array_name)}\[\] = \{{([^}}]+)\}};'
data_match = re.search(data_pattern, full_content, re.DOTALL)
if not data_match:
raise ValueError(f"未找到 data 数组: {self.data_array_name}")
data_code = data_match.group(1)
result['data_entries'] = [item.strip() for item in re.split(r'[,\n]+', data_code) if item.strip()]
result['data_len'] = len(result['data_entries'])
# === 解析 index 数组 ===
index_pattern = rf'static const unsigned short {re.escape(self.index_array_name)}\[\] = \{{([^}}]+)\}};'
index_match = re.search(index_pattern, full_content, re.DOTALL)
if not index_match:
raise ValueError(f"未找到 index 数组: {self.index_array_name}")
index_code = index_match.group(1)
result['index_values'] = [int(x.strip()) for x in re.split(r'[,\n]+', index_code) if x.strip()]
return result
def build_injection(self, new_subsets: List[Dict], existing_enum: Dict[str, int],
current_data_len: int) -> Tuple[List[str], List[int], List[str]]:
"""
构建要注入的新内容
返回: (new_data, new_indices, new_enums)
"""
new_data = []
new_indices = []
new_enums = []
current_offset = 0 # 当前相对于新块起始的偏移
next_enum_value = max(existing_enum.values(), default=-1) + 1
self.logger.info(f"开始构建注入内容,当前最大枚举值 = {next_enum_value}")
for subset in new_subsets:
enum_name = subset["name"] # ✅ 使用完整名称,避免前缀冲突!
if enum_name in existing_enum:
self.logger.info(f"跳过已存在的枚举项: {enum_name} = {existing_enum[enum_name]}")
current_offset += 1 + subset["count"]
continue
# 添加长度 + 所有速率
new_data.append(str(subset["count"]))
new_data.extend(subset["rates"])
# 索引是“从旧 data 尾部开始”的全局偏移
global_index = current_data_len + current_offset
new_indices.append(global_index)
# 枚举定义
new_enums.append(f" {enum_name} = {next_enum_value}")
self.logger.debug(f"新增枚举: {enum_name} → value={next_enum_value}, index={global_index}")
next_enum_value += 1
current_offset += 1 + subset["count"]
self.logger.info(f"构建完成:新增 {len(new_data)} 个数据项,{len(new_indices)} 个索引,{len(new_enums)} 个枚举")
return new_data, new_indices, new_enums
def format_list(self, items: List[str], indent: str = " ", width: int = 8) -> str:
"""格式化数组为多行字符串"""
lines = []
for i in range(0, len(items), width):
chunk = items[i:i + width]
lines.append(indent + ", ".join(chunk) + ",")
return "\n".join(lines).rstrip(",")
def _safe_write_back(self, old_content: str, new_content: str) -> bool:
"""安全写回文件,带备份"""
if old_content == new_content:
self.logger.info("主文件内容无变化,无需写入")
return False
if self.dry_run:
self.logger.info("DRY-RUN 模式启用,跳过实际写入")
print("[DRY RUN] 差异预览(前 20 行):")
diff = new_content.splitlines()[:20]
for line in diff:
print(f" {line}")
return True
# 创建备份
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup = self.c_file_path.with_name(f"{self.c_file_path.stem}_{timestamp}.c.bak")
try:
self.c_file_path.rename(backup)
self.logger.info(f"原文件已备份为: {backup.name}")
except Exception as e:
self.logger.error(f"备份失败: {e}")
raise
# 写入新内容
try:
self.c_file_path.write_text(new_content, encoding='utf-8')
self.logger.info(f"✅ 成功写入更新后的文件: {self.c_file_path.name}")
return True
except Exception as e:
self.logger.error(f"写入失败: {e}", exc_info=True)
raise
def inject_new_data(self) -> bool:
try:
full_content = self.c_file_path.read_text(encoding='utf-8')
except Exception as e:
self.logger.error(f"读取主 C 文件失败: {e}")
raise
self.logger.info(f"正在处理 C 文件: {self.c_file_path.name}")
start_pos = full_content.find(self.block_start)
end_pos = full_content.find(self.block_end)
if start_pos == -1:
raise ValueError(f"未找到起始锚点: {self.block_start}")
if end_pos == -1:
raise ValueError(f"未找到结束锚点: {self.block_end}")
if end_pos <= start_pos:
raise ValueError("结束锚点位于起始锚点之前")
inner_start = start_pos + len(self.block_start)
block_content = full_content[inner_start:end_pos].strip()
all_changes_made = False
# === 遍历每一个 rate set 子文件 ===
for file_path in self.rate_files:
try:
self.logger.info(f"→ 处理子文件: {file_path.name}")
# --- 1. 解析文件名得到 full_key ---
try:
full_key = self.parse_filename(file_path.name)
self.logger.debug(f" ├─ 解析出 key: {full_key}")
except ValueError as ve:
self.logger.warning(f" └─ 跳过无效文件名: {ve}")
continue
# --- 2. 查找 target_map 映射 ---
target = self.target_map.get(full_key)
if not target:
self.logger.warning(f" └─ 未在 config.json 中定义映射关系: {full_key},跳过")
continue
# --- 3. 动态设置当前注入目标 ---
self.data_array_name = target["data"]
self.index_array_name = target["index"]
self.enum_name = target["enum"]
self.logger.debug(f" ├─ 绑定目标:")
self.logger.debug(f" data: {self.data_array_name}")
self.logger.debug(f" index: {self.index_array_name}")
self.logger.debug(f" enum: {self.enum_name}")
# --- 4. 解析主文件中的当前结构 ---
try:
parsed = self.parse_all_structures(full_content)
except Exception as e:
self.logger.error(f" └─ 解析主文件结构失败: {e}")
continue
# --- 5. 提取该子文件中的 rate sets ---
file_content = file_path.read_text(encoding='utf-8')
subsets = self.extract_sub_rate_sets(file_content)
if not subsets:
self.logger.info(f" └─ 无有效子集数据")
continue
# --- 6. 构建要注入的内容 ---
new_data, new_indices, new_enums = self.build_injection(
subsets,
existing_enum=parsed['existing_enum'],
current_data_len=parsed['data_len']
)
if not new_data:
self.logger.info(f" └─ 无需更新")
continue
# --- 7. 写回新内容(精准插入)---
updated_content = self._write_back_in_blocks(
full_content, parsed, new_data, new_indices, new_enums
)
if updated_content != full_content:
all_changes_made = True
full_content = updated_content # 更新内存内容供后续文件使用
self.logger.info(f"✅ 成功注入 {len(subsets)} 条目到 {self.enum_name}")
except Exception as e:
self.logger.warning(f"❌ 处理文件失败 [{file_path.name}]: {e}")
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("详细堆栈:", exc_info=True)
continue
# 最终写回磁盘
if all_changes_made:
try:
return self._safe_write_back(self.c_file_path.read_text(encoding='utf-8'), full_content)
except Exception as e:
self.logger.error(f"写入最终文件失败: {e}")
raise
else:
self.logger.info("没有需要更新的内容")
return False
def _write_back_in_blocks(self, full_content: str, parsed: Dict,
new_data: List[str], new_indices: List[int], new_enums: List[str]) -> str:
"""
使用局部块操作策略:只在 /* START */ ... /* END */ 范围内修改内容
避免跨区域误改,无需额外边界校验
"""
self.logger.info("开始执行局部块写入操作...")
# === Step 1: 查找锚点位置并提取 block ===
start_pos = full_content.find(self.block_start)
end_pos = full_content.find(self.block_end)
if start_pos == -1 or end_pos == -1:
raise ValueError(f"未找到锚点标记: {self.block_start} 或 {self.block_end}")
if end_pos <= start_pos:
raise ValueError("结束锚点位于起始锚点之前")
inner_start = start_pos + len(self.block_start)
block_content = full_content[inner_start:end_pos]
replacements = [] # (start_in_block, end_in_block, replacement)
def remove_comments(text: str) -> str:
text = re.sub(r'//.*$', '', text, flags=re.MULTILINE)
text = re.sub(r'/\*.*?\*/', '', text, flags=re.DOTALL)
return text.strip()
# === Step 2: 更新 ENUM ===
if new_enums:
enum_pattern = rf'(enum\s+{re.escape(self.enum_name)}\s*\{{)([^}}]*)\}}\s*;'
match = re.search(enum_pattern, block_content, re.DOTALL | re.IGNORECASE)
if not match:
raise ValueError(f"未找到枚举定义: {self.enum_name}")
header = match.group(1)
body_content = match.group(2)
lines = [ln for ln in body_content.split('\n') if ln.strip()]
last_line = lines[-1] if lines else ""
indent_match = re.match(r'^(\s*)', last_line)
line_indent = indent_match.group(1) if indent_match else " "
clean_last = remove_comments(last_line)
first_macro_match = re.search(r'RATE_SET_[A-Z0-9_]+', clean_last)
eq_match = re.search(r'=\s*\d+', clean_last)
target_eq_col = 30
if first_macro_match and eq_match:
raw_before_eq = last_line[:first_macro_match.start() + eq_match.start()]
expanded_before_eq = raw_before_eq.expandtabs(4)
target_eq_col = len(expanded_before_eq)
new_body = body_content.rstrip()
if not new_body.endswith(','):
new_body += ','
for enum_def in new_enums:
macro_name = enum_def.split('=')[0].strip().split()[-1]
value = enum_def.split('=')[1].strip().rstrip(',')
current_len = len(macro_name.replace('\t', ' '))
padding = max(1, target_eq_col - current_len)
formatted = f"{macro_name}{' ' * padding}= {value}"
visible_macros = len(re.findall(r'RATE_SET_[A-Z0-9_]+', remove_comments(last_line)))
if visible_macros < self.MAX_ENUM_PER_LINE and last_line.strip():
insertion = f" {formatted},"
updated_last = last_line.rstrip() + insertion
new_body = body_content.rsplit(last_line, 1)[0] + updated_last
last_line = updated_last
else:
prefix_padding = ' ' * max(0, len(line_indent.replace('\t', ' ')) - len(line_indent))
new_line = f"\n{line_indent}{prefix_padding}{formatted},"
new_body += new_line
last_line = new_line.strip()
new_enum_code = f"{header}{new_body}\n}};"
replacements.append((match.start(), match.end(), new_enum_code))
self.logger.debug(f"计划更新 enum: 添加 {len(new_enums)} 项")
# === Step 3: 更新 DATA 数组 ===
if new_data:
data_pattern = rf'(static const unsigned char {re.escape(self.data_array_name)}\[\]\s*=\s*\{{)([^}}]*)(\}}\s*;)'
match = re.search(data_pattern, block_content, re.DOTALL)
if not match:
raise ValueError(f"未找到 data 数组: {self.data_array_name}")
header = match.group(1)
body_content = match.group(2).strip()
footer = match.group(3)
lines = body_content.splitlines()
last_line = lines[-1] if lines else ""
indent_match = re.match(r'^(\s*)', last_line)
line_indent = indent_match.group(1) if indent_match else " "
new_body = body_content.rstrip()
if not new_body.endswith(','):
new_body += ','
for i in range(0, len(new_data), self.MAX_DATA_ITEMS_PER_LINE):
chunk = new_data[i:i + self.MAX_DATA_ITEMS_PER_LINE]
line = "\n" + line_indent + ", ".join(chunk) + ","
new_body += line
new_data_code = f"{header}{new_body}\n{footer}"
replacements.append((match.start(), match.end(), new_data_code))
self.logger.debug(f"计划更新 data 数组: 添加 {len(new_data)} 个元素")
# === Step 4: 更新 INDEX 数组 ===
if new_indices:
index_pattern = rf'(static const unsigned short {re.escape(self.index_array_name)}\[\]\s*=\s*\{{)([^}}]*)(\}}\s*;)'
match = re.search(index_pattern, block_content, re.DOTALL)
if not match:
raise ValueError(f"未找到 index 数组: {self.index_array_name}")
header = match.group(1)
body_content = match.group(2).strip()
footer = match.group(3)
lines = body_content.splitlines()
last_line = lines[-1] if lines else ""
indent_match = re.match(r'^(\s*)', last_line)
line_indent = indent_match.group(1) if indent_match else " "
new_body = body_content.rstrip()
if not new_body.endswith(','):
new_body += ','
str_indices = [str(x) for x in new_indices]
chunk_size = self.MAX_INDEX_ITEMS_PER_LINE
for i in range(0, len(str_indices), chunk_size):
chunk = str_indices[i:i + chunk_size]
line = "\n" + line_indent + ", ".join(chunk) + ","
new_body += line
new_index_code = f"{header}{new_body}\n{footer}"
replacements.append((match.start(), match.end(), new_index_code))
self.logger.debug(f"计划更新 index 数组: 添加 {len(new_indices)} 个索引")
# === Step 5: 倒序应用所有替换到 block_content ===
if not replacements:
self.logger.info("无任何变更需要写入")
return full_content
# 倒序避免偏移错乱
for start, end, r in sorted(replacements, key=lambda x: x[0], reverse=True):
block_content = block_content[:start] + r + block_content[end:]
# === Step 6: 拼接回完整文件 ===
final_content = (
full_content[:inner_start] +
block_content +
full_content[end_pos:]
)
self.logger.info(f"成功构建新内容,总长度变化: {len(full_content)} → {len(final_content)}")
return final_content
def run(self):
self.logger.info("开始同步 RATE_SET 数据...")
try:
changed = self.inject_new_data()
if changed:
print("✅ 同步完成")
else:
print("✅ 无新数据,无需更新")
return {
"success": True,
"changed": changed,
"file": str(self.c_file_path),
"backup": f"{self.c_file_path.stem}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.c.bak" if changed and not self.dry_run else None
}
except Exception as e:
self.logger.error(f"同步失败: {e}", exc_info=True)
print("❌ 同步失败,详见日志。")
return {"success": False, "error": str(e)}
def main():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
handlers=[
logging.FileHandler(LOG_FILE, encoding='utf-8'),
logging.StreamHandler(sys.stdout)
],
force=True
)
dry_run = False # 设置为 True 可进行试运行
try:
sync = RateSetSynchronizer(dry_run=dry_run)
sync.run()
print("同步完成!")
except FileNotFoundError as e:
logging.error(f"文件未找到: {e}")
print("❌ 文件错误,请检查路径。")
sys.exit(1)
except PermissionError as e:
logging.error(f"权限错误: {e}")
print("❌ 权限不足,请关闭编辑器或以管理员运行。")
sys.exit(1)
except Exception as e:
logging.error(f"程序异常退出: {e}", exc_info=True)
print("❌ 同步失败,详见日志。")
sys.exit(1)
if __name__ == '__main__':
main()
这两个文件的写入方法有什么区别
最新发布