分享:sp_get_table_max 获取实例下表行数最大的表

本文介绍了一个SQL存储过程示例,该过程用于获取指定数据库中最大表的数据,并提供了一种方法来查找表的最大行数。通过使用动态SQL和临时表,此存储过程能够在任意数据库上运行,帮助用户快速定位到数据量最大的表。
USE master 
go 
IF object_id('sp_get_table_max','P') IS NOT NULL 
DROP PROCEDURE sp_get_table_max 
  
go 
CREATE PROCEDURE sp_get_table_max ( @db NVARCHAR(128) = '' ,@tb NVARCHAR(128) = '',@top INT = 100 )  
AS 

SET NOCOUNT ON ; 
 
CREATE TABLE #TableSpace   
  ( DB_NAME VARCHAR(128) DEFAULT ( db_name()), 
   TableName VARCHAR(128) , 
   SCHEMA_NAME VARCHAR(128), 
   RowsCount CHAR(32) ) 

DECLARE @sql NVARCHAR(max) 
SET @sql = '' 
SELECT @sql = @sql + REPLACE ( 
' 
USE [@dbname] 
INSERT INTO #TableSpace ( TableName , SCHEMA_NAME, RowsCount )  
SELECT b.name AS tablename ,SCHEMA_NAME(b.schema_id) SCHEMA_NAME, c.row_count AS datacount 
FROM  sys.indexes a , 
    sys.objects b , 
    sys.dm_db_partition_stats c 
WHERE  a.[object_id] = b.[object_id] 
    and b.type = ''U'' and b.name like '''+ @tb + '%''
    AND b.[object_id] = c.[object_id] 
    AND a.index_id = c.index_id 
    AND a.index_id < 2 
    AND b.is_ms_shipped = 0 
' ,'@dbname',name)  
FROM sys.databases 
WHERE name LIKE @db + '%' AND database_id >= 5 
PRINT @sql 
EXEC (@sql) 

SELECT TOP (@top) ' SELECT TOP 10 * FROM ['+ DB_NAME +'].['+ SCHEMA_NAME +'].['+TableName+ ']' AS SQL ,* 
FROM #TableSpace ORDER BY CAST(RowsCount AS BIGINT) DESC 
DROP TABLE [#TableSpace] 


go 
EXEC sp_MS_marksystemobject 'sp_get_table_max' 
go 
参考我的主要文件# clm_generator/excel_to_clm.py import os import re import json from openpyxl import load_workbook import xlrd from jinja2 import Template class ExcelToCLMConverter: def __init__(self, config_path="config/config.json", output_dir="output", locale_id=None, locale_display_name=None): # Step 1: 加载配置文件 if not os.path.exists(config_path): raise FileNotFoundError(f"配置文件不存在: {config_path}") with open(config_path, 'r', encoding='utf-8') as f: self.config = json.load(f) print(f"✅ 配置文件已加载: {config_path}") # Step 2: 初始化属性 self.output_dir = output_dir self.locale_id = locale_id or self.config.get("DEFAULT_LOCALE_ID", "DEFAULT") # 使用 display name 或 fallback 到 locale_id 的转换 self.locale_display_name = ( locale_display_name or self.config.get("DEFAULT_DISPLAY_NAME") or self.locale_id.replace('-', '_') ) # Step 3: 构建 channel_set_map(现在 self.config 已经可用) self.channel_set_map = self._build_channel_set_map(self.config.get("channel_sets", {})) # 其他初始化 self.tx_limit_entries = [] self.eirp_entries = [] self.global_ch_min = None self.global_ch_max = None # 创建输出目录 os.makedirs(self.output_dir, exist_ok=True) # ==================== 新增工具方法:大小写安全查询 ==================== def _ci_get(self, data_dict, key): """ Case-insensitive 字典查找 """ for k, v in data_dict.items(): if k.lower() == key.lower(): return v return None def _ci_contains(self, data_list, item): """ Case-insensitive 判断元素是否在列中 """ return any(x.lower() == item.lower() for x in data_list) # ==================== 原有 parse_mode_cell 方法保持不变 ==================== def parse_mode_cell(self, cell_value): if not cell_value: return None val = str(cell_value).strip() val = re.sub(r'\s+', ' ', val.replace('\n', ' ').replace('\r', ' ')) val_upper = val.upper() found_modes = [] # ✅ 改进:使用 match + 允许后续内容(比如 20M),不再要求全匹配 if re.match(r'^11AC\s*/\s*AX', val_upper) or re.match(r'^11AX\s*/\s*AC', val_upper): found_modes = ['11AC', '11AX'] print(f"🔍 解析复合模式 '{val}' → {found_modes}") # ======== 一般情况:正则匹配标准模式 ======== else: mode_patterns = [ (r'\b11BE\b|\bEHT\b', '11BE'), (r'\b11AX\b|\bHE\b', '11AX'), (r'\b11AC\b|\bVHT\b', '11AC'), # 自动匹配 11AC 或 VHT (r'\b11N\b|\bHT\b', '11N'), (r'\b11G\b|\bERP\b', '11G'), (r'\b11B\b|\bDSSS\b|\bCCK\b', '11B') ] for pattern, canonical in mode_patterns: if re.search(pattern, val_upper) and canonical not in found_modes: found_modes.append(canonical) # ======== 提取带宽 ======== bw_match = re.search(r'\b(20|40|80|160)\s*(?:MHZ|M)?\b', val_upper) bw = bw_match.group(1) if bw_match else None # fallback 带宽 if not bw: if all(m in ['11B', '11G'] for m in found_modes): bw = '20' else: bw = '20' if not found_modes: print(f"🟡 无法识别物理模式: '{cell_value}'") return None return { "phy_mode_list": found_modes, "bw": bw } def format_phy_mode(self, mode: str) -> str: """ 自定义物理层模式输出格式: - 11B/G/N 输出为小写:11b / 11g / 11n - 其他保持原样(如 11AC, 11BE) """ return { '11B': '11b', '11G': '11g', '11N': '11n' }.get(mode, mode) def _build_channel_set_map(self, channel_sets_cfg): """ 构建 range_macro → channel_set_id 的映射 Args: channel_sets_cfg (dict): 来自 config.json 的 channel_sets 部分 Returns: dict: {range_macro: channel_set_id} """ mapping = {} for set_id_str, ranges in channel_sets_cfg.items(): try: set_id = int(set_id_str) except ValueError: print(f"❌ 无效的 channel set ID: {set_id_str}") continue for rng in ranges: if not isinstance(rng, str): print(f"⚠️ 跳过非字符串 RANGE 宏: {rng}") continue if rng in mapping: print(f"⚠️ Warning: {rng} 已存在于 CHANNEL_SET_{mapping[rng]}") mapping[rng] = set_id return mapping def load_config(self, path="config/config.json"): """加载配置文件,并在此时设置默认 locale_id""" if not os.path.exists(path): raise FileNotFoundError(f"配置文件不存在: {path}") with open(path, 'r', encoding='utf-8') as f: self.config = json.load(f) # ✅ 只有在这里才安全地使用 self.config.get() if not self.locale_id: self.locale_id = self.config.get("DEFAULT_LOCALE_ID", "DEFAULT") print(f"✅ 配置文件加载成功: {path}") print(f"🌍 使用 Locale ID: {self.locale_id}") def col_to_letter(self, col): col += 1 result = "" while col > 0: col -= 1 result = chr(col % 26 + ord('A')) + result col //= 26 return result def is_valid_power(self, value): try: float(value) return True except (ValueError, TypeError): return False def get_cell_value(self, ws_obj, row_idx, col_idx): fmt = ws_obj["format"] if fmt == "xls": return str(ws_obj["sheet"].cell_value(row_idx, col_idx)).strip() else: cell = ws_obj["sheet"].cell(row=row_idx + 1, column=col_idx + 1) val = cell.value return str(val).strip() if val is not None else "" def find_table_header_row(self, ws_obj): """查找包含 'Mode' 和 'Rate' 的头行""" fmt = ws_obj["format"] ws = ws_obj["sheet"] for r in range(15): mode_col = rate_col = None if fmt == "xlsx": if r + 1 > ws.max_row: continue for c in range(1, ws.max_column + 1): cell = ws.cell(row=r + 1, column=c) if not cell.value: continue val = str(cell.value).strip() if val == "Mode": mode_col = c elif val == "Rate": rate_col = c if mode_col and rate_col and abs(mode_col - rate_col) == 1: print(f"✅ 找到头行: 第 {r+1} 行") return r, mode_col - 1, rate_col - 1 # 转为 0-based else: if r >= ws.nrows: continue for c in range(ws.ncols): val = ws.cell_value(r, c) if not val: continue val = str(val).strip() if val == "Mode": mode_col = c elif val == "Rate": rate_col = c if mode_col and rate_col and abs(mode_col - rate_col) == 1: print(f"✅ 找到头行: 第 {r+1} 行") return r, mode_col, rate_col return None, None, None def find_auth_power_above_row(self, ws_obj, start_row): """查找 '认证功率' 所在的合并单元格及其列范围""" fmt = ws_obj["format"] ws = ws_obj["sheet"] print(f"🔍 开始向上查找 '认证功率',扫描第 0 ~ {start_row} 行...") if fmt == "xlsx": for mr in ws.merged_cells.ranges: top_left = ws.cell(row=mr.min_row, column=mr.min_col) val = str(top_left.value) if top_left.value else "" if "证功率" in val or "Cert" in val: r_idx = mr.min_row - 1 if r_idx <= start_row: start_col = mr.min_col - 1 end_col = mr.max_col - 1 print(f"📌 发现合并单元格含 '证功率': '{val}' → {self.col_to_letter(start_col)}{mr.min_row}") return start_col, end_col, r_idx # fallback:普通单元格 for r in range(start_row + 1): for c in range(1, ws.max_column + 1): cell = ws.cell(row=r + 1, column=c) if cell.value and ("证功率" in str(cell.value)): print(f"📌 普通单元格发现 '证功率': '{cell.value}' @ R{r+1}C{c}") return c - 1, c - 1, r else: for r in range(min(ws.nrows, start_row + 1)): for c in range(ws.ncols): val = ws.cell_value(r, c) if val and ("证功率" in str(val)): print(f"📌 发现 '证功率': '{val}' @ R{r+1}C{c+1}") return c, c, r return None, None, None def parse_ch_columns_under_auth(self, ws_obj, ch_row_idx, auth_start_col, auth_end_col): """ 只解析位于 [auth_start_col, auth_end_col] 区间内的 CHx 列 """ fmt = ws_obj["format"] ws = ws_obj["sheet"] ch_map = {} print(f"🔍 解析 CH 行(第 {ch_row_idx + 1} 行),限定列范围: Col {auth_start_col} ~ {auth_end_col}") if fmt == "xlsx": for c in range(auth_start_col, auth_end_col + 1): cell = ws.cell(row=ch_row_idx + 1, column=c + 1) val = self.get_cell_value(ws_obj, ch_row_idx, c) match = re.search(r"CH(\d+)", val, re.I) if match: ch_num = int(match.group(1)) ch_map[ch_num] = c print(f" 👉 发现 CH{ch_num} @ Col{c}") else: for c in range(auth_start_col, auth_end_col + 1): val = self.get_cell_value(ws_obj, ch_row_idx, c) match = re.search(r"CH(\d+)", val, re.I) if match: ch_num = int(match.group(1)) ch_map[ch_num] = c print(f" 👉 发现 CH{ch_num} @ Col{c}") if not ch_map: print("❌ 在指定区域内未找到任何 CHx 列") else: chs = sorted(ch_map.keys()) print(f"✔️ 成功提取 CH{min(chs)}-{max(chs)} 共 {len(chs)} 个信道") return ch_map def encode_power(self, dbm): return int(round((float(dbm) + 1.5) * 4)) def merge_consecutive_channels(self, ch_list): if not ch_list: return [] sorted_ch = sorted(ch_list) ranges = [] start = end = sorted_ch[0] for ch in sorted_ch[1:]: if ch == end + 1: end = ch else: ranges.append((start, end)) start = end = ch ranges.append((start, end)) return ranges # ==================== 修改 collect_tx_limit_data ==================== def collect_tx_limit_data(self, ws_obj, sheet_config, header_row_idx, auth_row, auth_start, auth_end, mode_col, rate_col): ch_row_idx = auth_row + 2 nrows = ws_obj["sheet"].nrows if ws_obj["format"] == "xls" else ws_obj["sheet"].max_row if ch_row_idx >= nrows: print(f"❌ CH 行 ({ch_row_idx + 1}) 超出范围") return [] # ✅ 提取认证功率下方的 CH 列映射 ch_map = self.parse_ch_columns_under_auth(ws_obj, ch_row_idx, auth_start, auth_end) if not ch_map: return [] entries = [] row_mode_info = {} # {row_index: parsed_mode_info} fmt = ws_obj["format"] ws = ws_obj["sheet"] # ======== 第一步:构建 row_mode_info —— 使用新解析器 ======== if fmt == "xlsx": merged_cells_map = {} for mr in ws.merged_cells.ranges: for r in range(mr.min_row - 1, mr.max_row): for c in range(mr.min_col - 1, mr.max_col): merged_cells_map[(r, c)] = mr for row_idx in range(header_row_idx + 1, nrows): cell_value = None is_merged = (row_idx, mode_col) in merged_cells_map if is_merged: mr = merged_cells_map[(row_idx, mode_col)] top_cell = ws.cell(row=mr.min_row, column=mr.min_col) cell_value = top_cell.value else: raw_cell = ws.cell(row=row_idx + 1, column=mode_col + 1) cell_value = raw_cell.value mode_info = self.parse_mode_cell(cell_value) if mode_info: if is_merged: mr = merged_cells_map[(row_idx, mode_col)] for r in range(mr.min_row - 1, mr.max_row): if header_row_idx < r < nrows: row_mode_info[r] = mode_info.copy() else: row_mode_info[row_idx] = mode_info.copy() else: for row_idx in range(header_row_idx + 1, ws.nrows): cell_value = self.get_cell_value(ws_obj, row_idx, mode_col) mode_info = self.parse_mode_cell(cell_value) if mode_info: row_mode_info[row_idx] = mode_info.copy() # ======== 第二步:生成条目(关键修改区)======== for row_idx in range(header_row_idx + 1, nrows): mode_info = row_mode_info.get(row_idx) if not mode_info: continue bw_clean = mode_info["bw"] has_valid_power = False for ch, col_idx in ch_map.items(): power_val = self.get_cell_value(ws_obj, row_idx, col_idx) if self.is_valid_power(power_val): has_valid_power = True break if not has_valid_power: print(f"🗑️ 跳过空行: 第 {row_idx + 1} 行(无任何有效功率值)") continue # ---- 遍历每个 phy_mode ---- for phy_mode in mode_info["phy_mode_list"]: formatted_mode = self.format_phy_mode(phy_mode) mode_key = f"{formatted_mode}_{bw_clean}M" # ✅ 改为大小写不敏感判断 if not self._ci_contains(sheet_config.get("modes", []), mode_key): print(f"⚠️ 忽略不支持的模式: {mode_key}") continue # === 获取 rate_set 定义(可能是 str 或 list)=== raw_rate_set = self._ci_get(sheet_config["rate_set_map"], mode_key) if not raw_rate_set: print(f"❌ 找不到 rate_set 映射: {mode_key}") continue # 统一转为 list 处理 if isinstance(raw_rate_set, str): rate_set_list = [raw_rate_set] elif isinstance(raw_rate_set, list): rate_set_list = raw_rate_set else: continue # 非法类型跳过 for rate_set_macro in rate_set_list: ch_count = 0 for ch, col_idx in ch_map.items(): power_val = self.get_cell_value(ws_obj, row_idx, col_idx) if not self.is_valid_power(power_val): continue try: power_dbm = float(power_val) except: continue encoded_power = self.encode_power(power_dbm) entries.append({ "ch": ch, "power_dbm": round(power_dbm, 2), "encoded_power": encoded_power, "rate_set_macro": rate_set_macro, # <<< 每个 macro 单独一条记录 "mode": phy_mode, "bw": bw_clean, "src_row": row_idx + 1, "band": sheet_config["band"] }) ch_count += 1 print( f"📊 已采集第 {row_idx + 1} 行 → {formatted_mode} {bw_clean}M, {ch_count} 个信道, 使用宏: {rate_set_macro}" ) return entries def compress_tx_limit_entries(self, raw_entries, sheet_config): """ 压缩TX限制条目。 Args: raw_entries (list): 原始条目列。 sheet_config (dict): Excel格配置字典。 Returns: list: 压缩后的条目列。 """ from collections import defaultdict modes_order = sheet_config["modes"] # ✅ 构建小写映射用于排序(key: "11n_20M") mode_lower_to_index = {mode.lower(): idx for idx, mode in enumerate(modes_order)} range_template = sheet_config["range_macro_template"] group_key = lambda e: (e["encoded_power"], e["rate_set_macro"]) groups = defaultdict(list) for e in raw_entries: groups[group_key(e)].append(e) compressed = [] for (encoded_power, rate_set_macro), entries_in_group in groups.items(): first = entries_in_group[0] power_dbm = first["power_dbm"] mode = first["mode"] # 如 '11N' bw = first["bw"] # 如 '20' 或 '40' ch_list = sorted(e["ch"] for e in entries_in_group) for start, end in self.merge_consecutive_channels(ch_list): range_macro = range_template.format( band=sheet_config["band"], bw=bw, start=start, end=end ) # ✅ 格式化物理层模式(如 '11N' -> '11n') formatted_mode = self.format_phy_mode(mode) # ✅ 构造 mode_key 用于查找排序优先级 mode_key = f"{formatted_mode}_{bw}M" mode_order_idx = mode_lower_to_index.get(mode_key.lower(), 999) # ✅ 生成注释 comment = f"/* {power_dbm:5.2f}dBm, CH{start}-{end}, {formatted_mode} @ {bw}MHz */" # ✅ 新增:生成该段落的实际信道列 segment_ch_list = list(range(start, end + 1)) compressed.append({ "encoded_power": encoded_power, "range_macro": range_macro, "rate_set_macro": rate_set_macro, "comment": comment, "_mode_order": mode_order_idx, # --- 👇 新增:保留关键字段供模板使用 --- "bw": bw, # 带宽数字(字符串) "mode": formatted_mode, # 统一格式化的模式名 "ch_start": start, "ch_end": end, "power_dbm": round(power_dbm, 2), "ch_list": segment_ch_list, # ✅ 关键!用于 global_ch_min/max 统计 }) # 排序后删除临时字段 compressed.sort(key=lambda x: x["_mode_order"]) for item in compressed: del item["_mode_order"] return compressed def clean_sheet_name(self, name): cleaned = re.sub(r'[^\w\.\=\u4e00-\u9fa5]', '', str(name)) return cleaned def match_sheet_to_config(self, sheet_name): cleaned = self.clean_sheet_name(sheet_name) for cfg in self.config["sheets"]: for pat in cfg["pattern"]: if re.search(pat, cleaned, re.I): print(f"🧹 '{sheet_name}' → 清洗后: '{cleaned}'") print(f"✅ 匹配成功!'{sheet_name}' → [{cfg['band']}] 配置") return cfg print(f"🧹 '{sheet_name}' → 清洗后: '{cleaned}'") print(f"🟡 未匹配到 '{sheet_name}' 的模式,跳过...") return None def convert_sheet_with_config(self, ws_obj, sheet_name, sheet_config): header_row_idx, mode_col, rate_col = self.find_table_header_row(ws_obj) if header_row_idx is None: print(f"🟡 跳过 '{sheet_name}':未找到 'Mode' 和 'Rate'") return auth_start, auth_end, auth_row = self.find_auth_power_above_row(ws_obj, header_row_idx) if auth_start is None: print(f"🟡 跳过 '{sheet_name}':未找到 '认证功率'") return raw_entries = self.collect_tx_limit_data( ws_obj, sheet_config, header_row_idx, auth_row, auth_start, auth_end, mode_col, rate_col ) if not raw_entries: print(f"⚠️ 从 '{sheet_name}' 未收集到有效数据") return compressed = self.compress_tx_limit_entries(raw_entries, sheet_config) # ✅ 新增:仅对 2.4G 频段进行信道边界统计 band = str(sheet_config.get("band", "")).strip().upper() if band in ["2G", "2.4G", "2.4GHZ", "BGN"]: # 执行信道统计 for entry in compressed: ch_range = entry.get("ch_list") or [] if not ch_range: continue ch_start = min(ch_range) ch_end = max(ch_range) # 更新全局最小最大值 if self.global_ch_min is None or ch_start < self.global_ch_min: self.global_ch_min = ch_start if self.global_ch_max is None or ch_end > self.global_ch_max: self.global_ch_max = ch_end # ✅ 强制打印当前状态 print(f"📊 [Band={band}] 累计 2.4G 信道范围: CH{self.global_ch_min} – CH{self.global_ch_max}") self.tx_limit_entries.extend(compressed) print(f"✔️ 成功从 '{sheet_name}' 添加 {len(compressed)} 条压缩后 TX 限幅条目") # 可选调试输出 if band == "2G" and self.global_ch_min is not None: print(f"📊 当前累计 2.4G 信道范围: CH{self.global_ch_min} – CH{self.global_ch_max}") def render_from_template(self, template_path, context, output_path): with open(template_path, 'r', encoding='utf-8') as f: template = Template(f.read()) content = template.render(**context) os.makedirs(os.path.dirname(output_path), exist_ok=True) with open(output_path, 'w', encoding='utf-8') as f: f.write(content) print(f"🎉 已生成: {output_path}") def generate_outputs(self): print("🔧 正在执行 generate_outputs()...") if not self.tx_limit_entries: print("⚠️ 无 TX 限幅数据可输出") return # === Step 1: 使用 "HT" 分类 entries === normal_entries = [] ht_entries = [] for e in self.tx_limit_entries: macro = e["rate_set_macro"] # 核心判断:是否包含 "HT" if "HT" in macro: ht_entries.append(e) else: normal_entries.append(e) print(f"📊 自动分类结果:") print(f" ├─ Normal 模式(不含 HT): {len(normal_entries)} 条") print(f" └─ HT 模式(含 HT): {len(ht_entries)} 条") # === Step 2: 构建 g_tx_limit_normal 结构(按 bw 排序)=== def build_normal_structure(entries): from collections import defaultdict grouped = defaultdict(list) for e in entries: grouped[e["bw"]].append(e) result = [] for bw in ["20", "40", "80", "160"]: if bw in grouped: sorted_entries = sorted(grouped[bw], key=lambda x: (x["ch_start"], x["encoded_power"])) result.append((bw, sorted_entries)) return result normal_struct = build_normal_structure(normal_entries) # === Step 3: 构建 g_tx_limit_ht 结构(严格顺序)=== def build_ht_structure(entries): from collections import defaultdict groups = defaultdict(list) for e in entries: if "EXT4" in e["rate_set_macro"]: level = "ext4" elif "EXT" in e["rate_set_macro"]: level = "ext" else: level = "base" groups[(level, e["bw"])].append(e) order = [ ("base", "20"), ("base", "40"), ("ext", "20"), ("ext", "40"), ("ext4", "20"), ("ext4", "40") ] segments = [] active_segment_count = sum(1 for key in order if key in groups) for idx, (level, bw) in enumerate(order): key = (level, bw) if key not in groups: continue seg_entries = sorted(groups[key], key=lambda x: (x["ch_start"], x["encoded_power"])) count = len(seg_entries) header_flags = f"CLM_DATA_FLAG_WIDTH_{bw} | CLM_DATA_FLAG_MEAS_COND" if idx < active_segment_count - 1: # 不是最后一个有效段 header_flags += " | CLM_DATA_FLAG_MORE" if level != "base": header_flags += " | CLM_DATA_FLAG_FLAG2" segment = { "header_flags": header_flags, "count": count, "entries": seg_entries } if level == "ext": segment["flag2"] = "CLM_DATA_FLAG2_RATE_TYPE_EXT" elif level == "ext4": segment["flag2"] = "CLM_DATA_FLAG2_RATE_TYPE_EXT4" segments.append(segment) return segments ht_segments = build_ht_structure(ht_entries) # === Step 4: fallback range for EIRP 和自动选择 CHANNEL_SET_X === fallback_range_macro = "RANGE_EIRP_DUMMY" fallback_ch_start = fallback_ch_end = 1 fallback_channel_set_id = 1 channel_set_comment = "Unknown channel set" # 默认描述 if self.global_ch_min is not None and self.global_ch_max is not None: fallback_range_macro = f"RANGE_2G_20M_{self.global_ch_min}_{self.global_ch_max}" fallback_ch_start = self.global_ch_min fallback_ch_end = self.global_ch_max # 查找对应的 channel set ID fallback_channel_set_id = self.channel_set_map.get(fallback_range_macro, 1) print(f"📊 根据 {fallback_range_macro} 自动选择 CHANNEL_SET_{fallback_channel_set_id}") # ======== 生成详细注释 ======== band = "2.4G" bw = "20MHz" # 尝试从 descriptions 获取自定义描述 desc = self.config.get("descriptions", {}).get(str(fallback_channel_set_id)) if desc: channel_set_comment = f"{band}: CH{fallback_ch_start}-{fallback_ch_end}, {bw}" else: channel_set_comment = f"{band}: CH{fallback_ch_start}-{fallback_ch_end}, {bw} bandwidth, locale flags" else: print("⚠️ 未检测到有效的 2.4G 信道范围,使用默认 CHANNEL_SET_1") channel_set_comment = "Fallback 2.4GHz channel set (default)" # === Step 5: 渲染上下文集合 === timestamp = __import__('datetime').datetime.now().strftime("%Y-%m-%d %H:%M:%S") locale_id_safe = self.locale_id.replace('-', '_') # Context for clm_locale.c context_clm = { "locale_id": locale_id_safe, "eirp_entries": self.eirp_entries or [], "fallback_encoded_eirp": 30, "fallback_range_macro": fallback_range_macro, "fallback_ch_start": fallback_ch_start, "fallback_ch_end": fallback_ch_end, "entries_grouped_by_bw": normal_struct, # 注意:这里只传 non-HT 的条目作为 TX limit } # Context for tx_limit_table.c and clm_macros.h context_tables = { "timestamp": timestamp, "locale_id": locale_id_safe, "locale_display_name": self.locale_display_name, "normal_table": normal_struct, "ht_segments": ht_segments, "fallback_encoded_eirp": 30, "fallback_range_macro": fallback_range_macro, "fallback_ch_start": fallback_ch_start, "fallback_ch_end": fallback_ch_end, "fallback_channel_set_id": fallback_channel_set_id, "channel_set_comment": channel_set_comment, } os.makedirs(self.output_dir, exist_ok=True) # === Step 6: 渲染多个输出文件 === # 1. 原始 CLM Locale 数据结构 self.render_from_template( "templates/clm_locale.c.j2", context_clm, os.path.join(self.output_dir, f"locale_{self.locale_id.lower()}.c") ) # 2. 新的 TX Power (g_tx_limit_normal + g_tx_limit_ht) self.render_from_template( "templates/tx_limit_table.c.j2", context_tables, os.path.join(self.output_dir, "tx_limit_table.c") ) # 3. 头文件 self.render_from_template( "templates/clm_macros.h.j2", context_tables, os.path.join(self.output_dir, "clm_macros.h") ) print("✅ 所有输出文件生成完成。") def convert(self, file_path): ext = os.path.splitext(file_path)[-1].lower() if ext == ".xlsx": wb = load_workbook(file_path, data_only=True) sheets = [{"sheet": ws, "format": "xlsx"} for ws in wb.worksheets] elif ext == ".xls": wb = xlrd.open_workbook(file_path) sheets = [{"sheet": ws, "format": "xls"} for ws in wb.sheets()] else: raise ValueError("仅支持 .xls 或 .xlsx 文件") for i, ws_obj in enumerate(sheets): sheet_name = wb.sheet_names()[i] if ext == ".xls" else ws_obj["sheet"].title config = self.match_sheet_to_config(sheet_name) if config: self.convert_sheet_with_config(ws_obj, sheet_name, config) self.generate_outputs() def read_excel(self): """ 【UI 兼容】供 PyQt UI 调用的入口方法 将当前 self.input_file 中的数据解析并填充到 tx_limit_entries """ if not hasattr(self, 'input_file') or not self.input_file: raise ValueError("未设置 input_file 属性!") if not os.path.exists(self.input_file): raise FileNotFoundError(f"文件不存在: {self.input_file}") print(f"📊 开始解析 Excel 文件: {self.input_file}") try: self.convert(self.input_file) # 调用已有逻辑 print(f"✅ Excel 解析完成,共生成 {len(self.tx_limit_entries)} 条 TX 限幅记录") except Exception as e: print(f"❌ 解析失败: {e}") raise if __name__ == "__main__": import sys import argparse import os # 切换到脚本所在目录 script_dir = os.path.dirname(__file__) os.chdir(script_dir) # 定义命令行参数解析器 parser = argparse.ArgumentParser(description="Convert Excel to CLM C code.") parser.add_argument( "input", nargs="?", default="input/Archer BE900US 2.xlsx", help="Input Excel file (default: input/Archer BE900US 2.xlsx)" ) parser.add_argument( "--config", default="config/config.json", help="Path to config.json (default: config/config.json)" ) parser.add_argument( "--output-dir", default="output", help="Output directory (default: output)" ) parser.add_argument( "--locale-id", default=None, help='Locale ID, e.g., "US", "CN-2G" (default: from config or "DEFAULT")' ) parser.add_argument( "--display-name", default=None, help='Display name in generated code, e.g., "FCC_Core" (default: derived from locale_id)' ) args = parser.parse_args() # 创建转换器实例,并传入所有参数 converter = ExcelToCLMConverter( config_path=args.config, output_dir=args.output_dir, locale_id=args.locale_id, locale_display_name=args.display_name ) # 执行转换 converter.convert(args.input)
10-15
# pygcbs: # app_name: 'APP1' # master: '192.168.0.123' # port: 6789 # level: 'DEBUG' # interval: 1 # checklist: [ "System","CPU", "GPU","Mem","NPU", ] # save_path: "./" # docker: # pygcbs_image: nvidia-pygcbs:v1.0 # worker_image: nvidia-mindspore1.8.1:v1.0 # python_path: /opt/miniconda/bin/python # workers: # - '192.168.0.123:1' # socket_ifname: # - enp4s0 # tasks: #--------------------wide_deep--------------------------------- # - application_domain: "推荐" # task_framework: "Mindspore" # task_type: "推理" # task_name: "wide_deep_infer" # scenario: "SingleStream" # is_run_infer: True # project_path: '/home/gcbs/infer/wide_deep_infer' # main_path: "main.py" # dataset_path: '/home/gcbs/Dataset/wide_deep_data/' # times: 1 # 重试次数 #distribute do_eval: True is_distributed: False is_mhost: False exp_value: 0.501 #model log name: "wide_deep" Metrics: "AUC" request_auc: 0.74 dataset_name: "Criteo 1TB Click Logs Dataset" application: "推荐" standard_time: 3600 python_version: 3.8 mindspore_version: 1.8.1 # Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unless you know exactly what you are doing) enable_modelarts: False data_url: "" train_url: "" checkpoint_url: "" data_path: "./data" dataset_path: "/home/gcbs/Dataset/wide_deep_data/" output_path: "/cache/train" load_path: "/cache/checkpoint_path" device_target: GPU enable_profiling: False data_format: 1 total_size: 10000000 performance_count: 10 # argparse_init 'WideDeep' epochs: 15 full_batch: False batch_size: 16000 eval_batch_size: 16000 test_batch_size: 16000 field_size: 39 vocab_size: 200000 vocab_cache_size: 0 emb_dim: 80 deep_layer_dim: [1024, 512, 256, 128] deep_layer_act: 'relu' keep_prob: 1.0 dropout_flag: False ckpt_path: "./check_points" stra_ckpt: "./check_points" eval_file_name: "./output/eval.log" loss_file_name: "./output/loss.log" host_device_mix: 0 dataset_type: "mindrecord" parameter_server: 0 field_slice: False sparse: False use_sp: True deep_table_slice_mode: "column_slice" #star_logen config mlperf_conf: './test.conf' user_conf: './user.conf' output: '/tmp/code/' scenario: 'Offline' max_batchsize: 16000 threads: 4 model_path: "./check_points/widedeep_train-12_123328.ckpt" is_accuracy: False find_peak_performance: False duration: False target_qps: False count_queries: False samples_per_query_multistream: False max_latency: False samples_per_query_offline: 500 # WideDeepConfig #data_path: "./test_raw_data/" #vocab_cache_size: 100000 #stra_ckpt: './checkpoints/strategy.ckpt' weight_bias_init: ['normal', 'normal'] emb_init: 'normal' init_args: [-0.01, 0.01] l2_coef: 0.00008 # 8e-5 manual_shape: None # wide_and_deep export device_id: 1 ckpt_file: "./check_points/widedeep_train-12_123328.ckpt" file_name: "wide_and_deep" file_format: "MINDIR" # src/process_data.py "Get and Process datasets" raw_data_path: "./raw_data" # src/preprocess_data.py "Recommendation dataset" dense_dim: 13 slot_dim: 26 threshold: 100 train_line_count: 45840617 skip_id_convert: 0 # src/generate_synthetic_data.py 'Generate Synthetic Data' output_file: "./train.txt" label_dim: 2 number_examples: 4000000 vocabulary_size: 400000000 random_slot_values: 0 #get_score threads_count: 4 base_score: 1 accuracy: 0.72 baseline_performance: 1文件中的这些是什么?、
06-17
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值