# rate_set/rate_sync.py
import json
import os
import re
import logging
import sys
from pathlib import Path
from shutil import copy2
from datetime import datetime
from utils import resource_path
from typing import Dict, List, Tuple, Any
# -------------------------------
# 日志配置
# -------------------------------
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
LOG_DIR = PROJECT_ROOT / "output" / "log"
LOG_DIR.mkdir(parents=True, exist_ok=True)
LOG_FILE = LOG_DIR / f"rate_sync_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
class RateSetSynchronizer:
def __init__(self, c_file_path=None, dry_run=False, config_path="config/config.json"):
self.logger = logging.getLogger(__name__)
# === Step 1: 加载配置文件 ===
self.config_file_path = resource_path(config_path)
self.logger.info(f"配置文件: {self.config_file_path}")
if not os.path.exists(self.config_file_path):
raise FileNotFoundError(f"配置文件不存在: {self.config_file_path}")
try:
with open(self.config_file_path, 'r', encoding='utf-8') as f:
self.config = json.load(f)
self.logger.info(f"配置文件已加载: {self.config_file_path}")
except json.JSONDecodeError as e:
raise ValueError(f"配置文件格式错误,JSON 解析失败: {self.config_file_path}") from e
except Exception as e:
raise RuntimeError(f"读取配置文件时发生未知错误: {e}") from e
self.dry_run = dry_run
# === Step 2: 目标 C 文件处理 ===
if c_file_path is None:
if "target_c_file" not in self.config:
raise KeyError("config 文件缺少 'target_c_file' 字段")
internal_c_path = self.config["target_c_file"]
self.logger.info(f"使用内置 C 文件: {internal_c_path}")
self.c_file_path = resource_path(internal_c_path)
self._is_internal_c_file = True
else:
self.c_file_path = Path(c_file_path)
self._is_internal_c_file = False
if not self.c_file_path.exists():
raise FileNotFoundError(f"找不到 C 源文件: {self.c_file_path}")
# === Step 3: 初始化数据容器 ===
self.rate_subsets = {} # main_key -> list of subset dicts
self.pending_injections = {} # main_key -> injection data to write
# === Step 4: 加载锚点标记 ===
for marker_key in ["STR_RATE_SET_DATA", "END_RATE_SET_DATA"]:
if marker_key not in self.config:
raise KeyError(f"config 文件缺少 '{marker_key}' 字段")
self.start_marker = self.config["STR_RATE_SET_DATA"]
self.end_marker = self.config["END_RATE_SET_DATA"]
# === Step 5: 扫描所有 .c 文件 ===
self.rate_set_dir = Path(__file__).parent
self.rate_files = [
f for f in self.rate_set_dir.iterdir()
if f.is_file() and f.suffix == ".c" and f.name != "rate_sync.py"
]
if not self.rate_files:
self.logger.warning("未找到任何 rate_set .c 文件")
else:
self.logger.info(f"发现 {len(self.rate_files)} 个 rate_set 文件")
def offset_to_lineno(self, content: str, offset: int) -> int:
"""将字符偏移量转换为行号(从1开始)"""
return content.count('\n', 0, offset) + 1
def filename_to_band_bw(self, filename: str) -> tuple[str, str, str] | None:
"""
从文件名提取 band、bw 和可选后缀,例如:
'2G_20M_rates.c' → ('2g', '20m', '')
'2G_40M_EXT_rate_set.c' → ('2g', '40m', 'ext')
'2G_40M_EXT4_rate_set.c' → ('2g', '40m', 'ext4')
'5G_80M_VHT_rate_set.c' → ('5g', '80m', 'vht')
返回: (band_str, bw_str, suffix_str),全小写
"""
# 正则匹配:^BAND_BW_SUFFIX_rest.c$
pattern = r'^([0-9]+[a-zA-Z])_(\d+M)(?:_([A-Za-z0-9]+))?_.*\.c$'
match = re.match(pattern, filename, re.I)
if not match:
self.logger.warning(f" 文件名不符合规范: {filename}")
return None
band_raw, bw_raw, suffix_raw = match.groups()
band = band_raw.lower()
bw = bw_raw.lower() + 'm' # e.g., 40m
suffix = suffix_raw.lower() if suffix_raw else ''
return band, bw, suffix
def get_main_key(self, band_key: str, bw_key: str, suffix_key: str = '') -> str:
"""
生成唯一的主键用于命名数组。
如果有后缀,则包含;否则只用 band_bw。
"""
if suffix_key:
return f"{band_key}_{bw_key}_{suffix_key}"
return f"{band_key}_{bw_key}"
def extract_sub_rate_sets(self, content: str) -> List[Dict[str, Any]]:
"""
提取 /*NAME*/ N, rate1, rate2, ...
返回: [{'name': '...', 'count': N, 'rates': [...]}]
"""
pattern = r'/\*([A-Z0-9_]+)\*/\s+(\d+)\s*,?([\s\S]*?)(?=/*/|$)'
matches = re.findall(pattern, content, re.DOTALL)
sub_sets = []
for name, count_str, body in matches:
try:
count = int(count_str)
except ValueError:
self.logger.warning(f"⚠️ 子集 {name} 的计数不是整数: {count_str}")
continue
rate_items = re.findall(r'WL_RATE_[^,\s}]+', body)
if len(rate_items) < count:
self.logger.warning(f"⚠️ 子集 {name} 声称有 {count} 项,但只找到 {len(rate_items)} 项")
rate_items = rate_items[:count]
sub_sets.append({
"name": name,
"count": count,
"rates": rate_items[:count]
})
self.logger.info(f" 提取到 {len(sub_sets)} 个子速率集")
return sub_sets
def parse_existing_enums(self, content: str, main_key: str) -> set:
"""解析当前 enum 中是否已有同名前缀,用于去重"""
enum_name = f"rate_set_{main_key}"
enum_pattern = rf"enum\s+{re.escape(enum_name)}\s*\{{([^}}]*)\}}"
match = re.search(enum_pattern, content, re.DOTALL)
prefixes = set()
if match:
enum_body = match.group(1)
found = re.findall(r'(RATE_SET_[^_\s]+)', enum_body)
prefixes.update(found)
self.logger.debug(f" 已存在枚举前缀: {prefixes}")
return prefixes
def validate_and_prepare(self):
"""分析所有 rate_set 文件,准备待注入内容"""
self.logger.info("开始验证并准备速率集数据...")
main_content = self.c_file_path.read_text(encoding='utf-8')
changes_made = False
all_changes = []
for file_path in self.rate_files:
self.logger.info(f" 处理文件: {file_path.name}")
key = self.filename_to_band_bw(file_path.name)
if not key:
self.logger.warning(f" 跳过无法识别的文件: {file_path.name}")
continue
band_key, bw_key = key
main_key = self.get_main_key(band_key, bw_key)
content = file_path.read_text(encoding='utf-8')
sub_sets = self.extract_sub_rate_sets(content)
if not sub_sets:
continue
# 检查已存在的枚举项(防重)
existing_prefixes = self.parse_existing_enums(main_content, main_key)
new_injections = []
current_offset = 0
data_entries = [] # 包含 length 字段和所有 rates
index_entries = [] # 记录每个子集的起始偏移(含 length)
# 先计算现有偏移(模拟当前状态)
for sub in sub_sets:
sub_name = sub["name"]
prefix = "_".join(sub_name.split("_")[:5]) # 如 RATE_SET_2G_20M_11b → 前缀
if prefix in existing_prefixes:
self.logger.warning(f"🟡 跳过已存在的子集: {sub_name}")
# 仍需推进偏移(假设它已在数组中)
current_offset += 1 + sub["count"]
continue
# 添加 length 字段
data_entries.append(str(sub["count"]))
data_entries.extend(sub["rates"])
# 记录起始偏移(即 length 字段位置)
index_entries.append(str(current_offset))
new_injections.append(sub)
all_changes.append(f"➕ 添加 '{sub_name}' ({sub['count']} 项) @ offset {current_offset}")
changes_made = True
# 更新偏移:+1 (length) + N (rates)
current_offset += 1 + sub["count"]
if new_injections:
self.pending_injections[main_key] = {
"band": band_key.upper(),
"bw": bw_key.upper().replace('m', 'M'),
"data": data_entries,
"indices": index_entries,
"total_bytes": current_offset
}
if all_changes:
self.logger.info(f"共检测到 {len(all_changes)} 项新增:\n" + "\n".join(f" → {ch}" for ch in all_changes))
else:
self.logger.info("所有速率集均已存在,无需更新")
return changes_made
def _format_array_block(self, items: List[str], indent: str = " ", max_per_line: int = 8) -> str:
lines = []
for i in range(0, len(items), max_per_line):
chunk = items[i:i + max_per_line]
line = indent + ", ".join(chunk) + ","
lines.append(line)
result = "\n".join(lines).rstrip(',')
return result if result.strip() else indent + "0"
def _write_back(self):
"""写回修改内容到主 C 文件"""
if self.dry_run:
self.logger.info("DRY-RUN: 跳过写入文件")
return
try:
content = self.c_file_path.read_text(encoding='utf-8')
start_idx = content.find(self.start_marker)
end_idx = content.find(self.end_marker)
if start_idx == -1 or end_idx == -1:
raise ValueError(f"未找到锚点区域:\n{self.start_marker}\n...\n{self.end_marker}")
header = content[:start_idx + len(self.start_marker)]
footer = content[end_idx:]
original_block = content[start_idx:end_idx]
new_block_parts = []
for main_key, data in self.pending_injections.items():
data_var = f"rate_sets_{main_key}"
index_var = f"rate_sets_index_{main_key}"
data_init = self._format_array_block(data["data"])
index_init = self._format_array_block(data["indices"])
block = (
f"\n/** {data['band']} {data['bw']} rate set */\n"
f"static const unsigned char {data_var}[] = {{\n{data_init}\n}};\n\n"
f"static const unsigned short {index_var}[] = {{\n{index_init}\n}};"
)
new_block_parts.append(block)
new_block = self.start_marker + "".join(new_block_parts) + footer
final_content = content[:start_idx] + new_block + content[end_idx:]
if content == final_content:
self.logger.info("文件内容无变化,无需写入")
return
# 备份
backup_path = self.c_file_path.with_suffix('.c.bak')
copy2(self.c_file_path, backup_path)
self.logger.info(f"已备份原文件 → {backup_path}")
# 写入
self.c_file_path.write_text(final_content, encoding='utf-8')
self.logger.info(f"成功写回 C 文件: {self.c_file_path}")
except Exception as e:
self.logger.error(f"写回失败: {e}", exc_info=True)
raise
def run(self):
"""执行完整同步流程"""
self.logger.info("开始同步 RATE_SET 数据...")
try:
has_changes = self.validate_and_prepare()
if has_changes:
if self.dry_run:
self.logger.info("预览模式:检测到变更,但不会写入文件")
else:
self._write_back()
self.logger.info("同步完成:已成功更新 C 文件")
else:
self.logger.info("所有速率集已存在,无需修改")
return has_changes
except Exception as e:
self.logger.error(f"同步失败: {e}", exc_info=True)
raise
def main():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
handlers=[
logging.FileHandler(LOG_FILE, encoding='utf-8'),
logging.StreamHandler(sys.stdout)
],
force=True
)
logger = logging.getLogger(__name__)
# 固定参数(也可通过 argparse 扩展)
dry_run = False
config_path = "config/config.json"
print("开始同步 RATE_SET 定义...")
if dry_run:
print("启用 dry-run 模式:仅预览变更,不修改文件")
try:
sync = RateSetSynchronizer(
c_file_path=None,
dry_run=dry_run,
config_path=config_path,
)
sync.run()
print("同步完成!")
print(f"详细日志已保存至: {LOG_FILE}")
except FileNotFoundError as e:
logger.error(f"文件未找到: {e}")
print("请检查文件路径是否正确。")
sys.exit(1)
except PermissionError as e:
logger.error(f"权限错误: {e}")
print("无法读取或写入文件,请检查权限。")
sys.exit(1)
except Exception as e:
logger.error(f"程序异常退出: {e}", exc_info=True)
sys.exit(1)
if __name__ == '__main__':
main()
C:\Users\admin\PyCharmMiscProject\.venv\Scripts\python.exe F:\excle_to_clm\rate_set\rate_sync.py
开始同步 RATE_SET 定义...
2025-10-24 16:13:49,304 [INFO] root: 资源路径: F:\excle_to_clm
2025-10-24 16:13:49,304 [INFO] __main__: 配置文件: F:\excle_to_clm\config\config.json
2025-10-24 16:13:49,305 [INFO] __main__: 配置文件已加载: F:\excle_to_clm\config\config.json
2025-10-24 16:13:49,305 [INFO] __main__: 使用内置 C 文件: input/wlc_clm_data_6726b0.c
2025-10-24 16:13:49,305 [INFO] root: 资源路径: F:\excle_to_clm
2025-10-24 16:13:49,305 [INFO] __main__: 发现 6 个 rate_set 文件
2025-10-24 16:13:49,305 [INFO] __main__: 开始同步 RATE_SET 数据...
2025-10-24 16:13:49,305 [INFO] __main__: 开始验证并准备速率集数据...
2025-10-24 16:13:49,310 [INFO] __main__: 处理文件: 2G_20M_EXT4_rate_set.c
2025-10-24 16:13:49,311 [ERROR] __main__: 同步失败: too many values to unpack (expected 2)
Traceback (most recent call last):
File "F:\excle_to_clm\rate_set\rate_sync.py", line 303, in run
has_changes = self.validate_and_prepare()
File "F:\excle_to_clm\rate_set\rate_sync.py", line 179, in validate_and_prepare
band_key, bw_key = key
^^^^^^^^^^^^^^^^
ValueError: too many values to unpack (expected 2)
2025-10-24 16:13:49,314 [ERROR] __main__: 程序异常退出: too many values to unpack (expected 2)
Traceback (most recent call last):
File "F:\excle_to_clm\rate_set\rate_sync.py", line 348, in main
sync.run()
~~~~~~~~^^
File "F:\excle_to_clm\rate_set\rate_sync.py", line 303, in run
has_changes = self.validate_and_prepare()
File "F:\excle_to_clm\rate_set\rate_sync.py", line 179, in validate_and_prepare
band_key, bw_key = key
^^^^^^^^^^^^^^^^
ValueError: too many values to unpack (expected 2)
进程已结束,退出代码为 1
最新发布