array_merge(): Argument #2 is not an array

本文讲述了在PHP中遇到`array_merge(): Argument #2 is not an array`错误时,如何通过将非数组参数强制转换为数组来修复问题。作者提供了修改后的代码示例,并强调了数组操作的基本原理。

代码报出::array_merge(): Argument #2 is not an array

//原代码
array_merge($shownFilms['movieList'],$hotMovies['movieList']);
//修改后
array_merge($shownFilms['movieList'],(array)$hotMovies['movieList']);

array_merge将第二个参数强制转换成数组

import cobra import pandas as pd import numpy as np import os, sys, argparse, logging from cobra.flux_analysis import flux_variability_analysis, single_gene_deletion # RIPTiDe import riptide # Statistics from sklearn.manifold import MDS from skbio.diversity import beta_diversity from skbio.stats.distance import permanova from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_auc_score # ========================= # Logging # ========================= def setup_logger(outdir): log_file = os.path.join(outdir, "pipeline.log") logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s %(message)s", handlers=[logging.FileHandler(log_file), logging.StreamHandler(sys.stdout)]) return log_file # ========================= # Core helpers # ========================= def read_sbml(path): model = cobra.io.read_sbml_model(path) logging.info(f"✅ Model loaded: {model.id}, Reactions={len(model.reactions)}, " f"Metabolites={len(model.metabolites)}, Genes={len(model.genes)}") return model def find_biomass_reaction(model): biomass_rxns = [r for r in model.reactions if "biomass" in r.id.lower() or "biomass" in r.name.lower()] if biomass_rxns: biomass = biomass_rxns[0] model.objective = biomass logging.info(f"✅ Biomass objective set: {biomass.id}") return biomass else: logging.warning("⚠️ No biomass reaction detected automatically") return None def find_atpm_reaction(model): candidates = [rid for rid in ["ATPM","R_ATPM","DM_atp_c_"] if rid in model.reactions] if candidates: rxn = model.reactions.get_by_id(candidates[0]) logging.info(f"ATPM reaction {rxn.id}: bounds {rxn.lower_bound},{rxn.upper_bound}") return rxn else: logging.warning("⚠️ No ATPM reaction detected") return None # ========================= # Media: S4 # ========================= def load_s4_gcb(path_s4): xls = pd.ExcelFile(path_s4) df = xls.parse("In Silico") out = df[["Reaction ID","LB (Equally Scaled)","LB (Molarity Scaled)"]].copy() out.columns = ["rxn_id","equally","molarity"] return out def build_media_from_s4(model, s4_df, scaling="equally", oxygen_override=None, log_prefix="S4"): lbcol = "equally" if scaling=="equally" else "molarity" applied = {} for _, row in s4_df.iterrows(): rid = row["rxn_id"] if rid not in model.reactions: continue if "_e" not in rid: continue rxn = model.reactions.get_by_id(rid) val = row[lbcol] if pd.isna(val): continue rxn.lower_bound = -float(val) rxn.upper_bound = 1000.0 applied[rid] = rxn.lower_bound # oxygen special if oxygen_override is not None and "EX_o2_e_" in model.reactions: model.reactions.EX_o2_e_.lower_bound = -float(oxygen_override) applied["EX_o2_e_"] = -float(oxygen_override) logging.info(f"{log_prefix} medium applied with {len(applied)} exchanges set") return applied # ========================= # Media: Custom GC broth + Vitox # ========================= def build_media_gc_vitox(model, scaling="molarity", oxygen_override=None): """ Custom GC broth + Vitox composition Values taken from proteose peptone analysis (mM) and Vitox supplement working concentrations. """ # Amino acids from proteose peptone (mM) base = { "EX_gly_e_":12.99, "EX_ala_L_e_":8.75, "EX_glu_L_e_":8.16, "EX_leu_L_e_":6.40, "EX_asp_L_e_":5.75, "EX_val_L_e_":4.48, "EX_pro_L_e_":4.95, "EX_lys_L_e_":4.31, "EX_arg_L_e_":3.70, "EX_ile_L_e_":3.66, "EX_phe_L_e_":3.18, "EX_thr_L_e_":1.89, "EX_ser_L_e_":2.28, "EX_his_L_e_":1.26, "EX_met_L_e_":1.31, "EX_tyr_L_e_":1.32, "EX_cys_L_e_":0.37, "EX_asn_L_e_":0.34, "EX_trp_L_e_":0.22 } # Ions (mM) base.update({ "EX_na1_e_":24.87,"EX_cl_e_":10.75,"EX_pi_e_":2.38,"EX_k_e_":5.05,"EX_so4_e_":0.58, "EX_mg2_e_":0.064,"EX_ca2_e_":0.049,"EX_fe3_e_":0.006 }) # Extra carbon source from starch (as glucose) base["EX_glc_D_e_"] = 2.8 # Vitox additions (approx mM) vitox = { "EX_cbl1_e_":0.000074, "EX_ade_e_":0.037, "EX_gln_L_e_":0.684, "EX_gua_e_":0.020, "EX_paba_e_":0.009, "EX_cyst_L_e_":0.092, "EX_nad_e_":0.003, "EX_thmpp_e_":0.003, "EX_fe3_e_":0.036, "EX_thm_e_":0.089, "EX_cys_L_e_":1.54, "EX_glc_D_e_":5.56 } # Merge media = {**base, **vitox} applied = {} for ex_id,val in media.items(): if ex_id not in model.reactions: continue rxn = model.reactions.get_by_id(ex_id) rxn.lower_bound = -float(val) rxn.upper_bound = 1000.0 applied[ex_id] = -float(val) if oxygen_override is not None and "EX_o2_e_" in model.reactions: model.reactions.EX_o2_e_.lower_bound = -float(oxygen_override) applied["EX_o2_e_"] = -float(oxygen_override) logging.info(f"GC+Vitox medium applied with {len(applied)} exchanges set") return applied # ========================= # Core analyses # ========================= def run_fba(model, biomass): sol = model.optimize() mu = sol.objective_value if mu>0: dt = np.log(2)*60.0/mu else: dt = np.inf logging.info(f"FBA: mu={mu:.4f} 1/h, DT={dt:.2f} min") return mu, dt, sol def run_fva(model): fva_res = flux_variability_analysis(model, fraction_of_optimum=1.0) return fva_res def run_sgd(model): sgd_res = single_gene_deletion(model) return sgd_res # ========================= # RIPTiDe contextualization # ========================= def contextualize_with_riptide(model, expr_path, outdir): """ Contextualize model using RIPTiDe with transcriptomic data - Exchange bounds: ±10 (O2 ±20) - maxfit_contextualize(min_frac=0.1, max_frac=0.8, n=1000) - Sampling n=500 """ # Load transcriptomics (assume gene ID → TPM/FPKM) expr = pd.read_csv(expr_path, index_col=0, squeeze=True) logging.info(f"Loaded expression file {expr_path} with {len(expr)} entries") # Set uniform exchange bounds ±10, except oxygen ±20,先统一设置 exchange bounds (±10, O₂=±20) for rxn in model.exchanges: if "o2" in rxn.id.lower(): rxn.lower_bound = -20 else: rxn.lower_bound = -10 rxn.upper_bound = 1000.0 # Contextualize,筛选与表达数据一致的代谢子网 ctx = riptide.maxfit_contextualize(model, expr, min_frac=0.1, max_frac=0.8, n=1000) logging.info("RIPTiDe contextualization complete") # Sampling,做 500 次 flux 抽样 samples = riptide.sample(ctx, n=500) df = pd.DataFrame(samples, columns=[r.id for r in ctx.reactions]) df.to_csv(os.path.join(outdir, "riptide_samples.tsv"), sep="\t") logging.info(f"RIPTiDe sampling complete, saved {df.shape} flux profiles") return df # ========================= # Downstream statistics # ========================= def analyze_flux_profiles(df, metadata, outdir): """ Perform: - Bray–Curtis NMDS - PERMANOVA - RandomForest classification with AUC """ # Compute Bray–Curtis dissimilarity,计算 Bray–Curtis 距离矩阵 dist = beta_diversity("braycurtis", df.values, ids=df.index) # 使用 MDS 实现 NMDS nmds = MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12, dissimilarity=&#39;precomputed&#39;, random_state=42, n_jobs=1, n_init=1) # NMDS ordination (2D),用 NMDS 可视化群落代谢流差异 coords_array = nmds.fit_transform(dist.data) coords = pd.DataFrame(coords_array, index=df.index, columns=["NMDS1", "NMDS2"]) coords.to_csv(os.path.join(outdir, "nmds_coords.tsv"), sep="\t") # PERMANOVA,用 PERMANOVA 检验组间差异是否显著 meta = pd.Series(metadata, index=df.index) perma_res = permanova(dist, meta, permutations=999) with open(os.path.join(outdir,"permanova.txt"),"w") as f: f.write(str(perma_res)) # RandomForest classification,用 随机森林 分类 flux profiles,输出 AUC 评估判别力 clf = RandomForestClassifier(n_estimators=1500, max_features=20, random_state=42) y = meta.values clf.fit(df.values, y) # AUC (binary assumed) if len(set(y))==2: probs = clf.predict_proba(df.values)[:,1] auc = roc_auc_score(y, probs) else: auc = np.nan with open(os.path.join(outdir,"rf_auc.txt"),"w") as f: f.write(f"AUC={auc}\n") logging.info("Downstream statistics complete") # ========================= # Main # ========================= def main(): ap = argparse.ArgumentParser() ap.add_argument("--sbml", default="NGO_557.sbml") # 默认模型路径 ap.add_argument("--s4", default="msystems.01265-22-s0004.xlsx") ap.add_argument("--expr", help="Transcriptomics file for RIPTiDe") ap.add_argument("--mode", choices=["s4","custom_gc_vitox"], default="s4") ap.add_argument("--scaling", choices=["equally","molarity"], default="molarity") ap.add_argument("--oxygen_override", type=float, default=None) ap.add_argument("--outdir", default="results_out") # 默认输出文件夹 args = ap.parse_args() os.makedirs(args.outdir,exist_ok=True) setup_logger(args.outdir) model = read_sbml(args.sbml) biomass = find_biomass_reaction(model) find_atpm_reaction(model) # Media setup if args.mode=="s4": if not args.s4: logging.error("S4 path required for mode s4") return s4df = load_s4_gcb(args.s4) build_media_from_s4(model,s4df,args.scaling,oxygen_override=args.oxygen_override) elif args.mode=="custom_gc_vitox": build_media_gc_vitox(model,scaling=args.scaling,oxygen_override=args.oxygen_override) # Analyses mu,dt,sol = run_fba(model,biomass) fva = run_fva(model); fva.to_csv(os.path.join(args.outdir,"fva.tsv"),sep="\t") sgd = run_sgd(model); sgd.to_csv(os.path.join(args.outdir,"sgd.tsv"),sep="\t") # RIPTiDe if transcriptomics provided if args.expr: flux_df = contextualize_with_riptide(model, args.expr, args.outdir) # downstream stats skeleton (needs metadata, here dummy example) metadata = {idx: ("Group1" if i < len(flux_df)//2 else "Group2") for i,idx in enumerate(flux_df.index)} analyze_flux_profiles(flux_df, metadata, args.outdir) logging.info("Pipeline complete") if __name__=="__main__": main() 这段代码的报错为:D:\python+pycharm\Miniconda\envs\skbio-python39\python.exe D:/实验室/python/25.8.22.py Traceback (most recent call last): File "D:\实验室\python\25.8.22.py", line 283, in <module> main() File "D:\实验室\python\25.8.22.py", line 253, in main model = read_sbml(args.sbml) File "D:\实验室\python\25.8.22.py", line 33, in read_sbml model = cobra.io.read_sbml_model(path) File "D:\python+pycharm\Miniconda\envs\skbio-python39\lib\site-packages\cobra\io\sbml.py", line 460, in read_sbml_model raise e File "D:\python+pycharm\Miniconda\envs\skbio-python39\lib\site-packages\cobra\io\sbml.py", line 457, in read_sbml_model doc = _get_doc_from_filename(filename) File "D:\python+pycharm\Miniconda\envs\skbio-python39\lib\site-packages\cobra\io\sbml.py", line 504, in _get_doc_from_filename raise IOError( OSError: The file with &#39;NGO_557.sbml&#39; does not exist, or is not an SBML string. Provide the path to an existing SBML file or a valid SBML string representation: 进程已结束,退出代码1
08-26
参考我的主要文件# clm_generator/excel_to_clm.py import os import re import json from openpyxl import load_workbook import xlrd from jinja2 import Template class ExcelToCLMConverter: def __init__(self, config_path="config/config.json", output_dir="output", locale_id=None, locale_display_name=None): # Step 1: 加载配置文件 if not os.path.exists(config_path): raise FileNotFoundError(f"配置文件不存在: {config_path}") with open(config_path, &#39;r&#39;, encoding=&#39;utf-8&#39;) as f: self.config = json.load(f) print(f"✅ 配置文件已加载: {config_path}") # Step 2: 初始化属性 self.output_dir = output_dir self.locale_id = locale_id or self.config.get("DEFAULT_LOCALE_ID", "DEFAULT") # 使用 display name 或 fallback 到 locale_id 的转换 self.locale_display_name = ( locale_display_name or self.config.get("DEFAULT_DISPLAY_NAME") or self.locale_id.replace(&#39;-&#39;, &#39;_&#39;) ) # Step 3: 构建 channel_set_map(现在 self.config 已经可用) self.channel_set_map = self._build_channel_set_map(self.config.get("channel_sets", {})) # 其他初始化 self.tx_limit_entries = [] self.eirp_entries = [] self.global_ch_min = None self.global_ch_max = None # 创建输出目录 os.makedirs(self.output_dir, exist_ok=True) # ==================== 新增工具方法:大小写安全查询 ==================== def _ci_get(self, data_dict, key): """ Case-insensitive 字典查找 """ for k, v in data_dict.items(): if k.lower() == key.lower(): return v return None def _ci_contains(self, data_list, item): """ Case-insensitive 判断元素是否在列表中 """ return any(x.lower() == item.lower() for x in data_list) # ==================== 原有 parse_mode_cell 方法保持不变 ==================== def parse_mode_cell(self, cell_value): if not cell_value: return None val = str(cell_value).strip() val = re.sub(r&#39;\s+&#39;, &#39; &#39;, val.replace(&#39;\n&#39;, &#39; &#39;).replace(&#39;\r&#39;, &#39; &#39;)) val_upper = val.upper() found_modes = [] # ✅ 改进:使用 match + 允许后续内容(比如 20M),不再要求全匹配 if re.match(r&#39;^11AC\s*/\s*AX&#39;, val_upper) or re.match(r&#39;^11AX\s*/\s*AC&#39;, val_upper): found_modes = [&#39;11AC&#39;, &#39;11AX&#39;] print(f"🔍 解析复合模式 &#39;{val}&#39; → {found_modes}") # ======== 一般情况:正则匹配标准模式 ======== else: mode_patterns = [ (r&#39;\b11BE\b|\bEHT\b&#39;, &#39;11BE&#39;), (r&#39;\b11AX\b|\bHE\b&#39;, &#39;11AX&#39;), (r&#39;\b11AC\b|\bVHT\b&#39;, &#39;11AC&#39;), # 自动匹配 11AC 或 VHT (r&#39;\b11N\b|\bHT\b&#39;, &#39;11N&#39;), (r&#39;\b11G\b|\bERP\b&#39;, &#39;11G&#39;), (r&#39;\b11B\b|\bDSSS\b|\bCCK\b&#39;, &#39;11B&#39;) ] for pattern, canonical in mode_patterns: if re.search(pattern, val_upper) and canonical not in found_modes: found_modes.append(canonical) # ======== 提取带宽 ======== bw_match = re.search(r&#39;\b(20|40|80|160)\s*(?:MHZ|M)?\b&#39;, val_upper) bw = bw_match.group(1) if bw_match else None # fallback 带宽 if not bw: if all(m in [&#39;11B&#39;, &#39;11G&#39;] for m in found_modes): bw = &#39;20&#39; else: bw = &#39;20&#39; if not found_modes: print(f"🟡 无法识别物理模式: &#39;{cell_value}&#39;") return None return { "phy_mode_list": found_modes, "bw": bw } def format_phy_mode(self, mode: str) -> str: """ 自定义物理层模式输出格式: - 11B/G/N 输出为小写:11b / 11g / 11n - 其他保持原样(如 11AC, 11BE) """ return { &#39;11B&#39;: &#39;11b&#39;, &#39;11G&#39;: &#39;11g&#39;, &#39;11N&#39;: &#39;11n&#39; }.get(mode, mode) def _build_channel_set_map(self, channel_sets_cfg): """ 构建 range_macro → channel_set_id 的映射表 Args: channel_sets_cfg (dict): 来自 config.json 的 channel_sets 部分 Returns: dict: {range_macro: channel_set_id} """ mapping = {} for set_id_str, ranges in channel_sets_cfg.items(): try: set_id = int(set_id_str) except ValueError: print(f"❌ 无效的 channel set ID: {set_id_str}") continue for rng in ranges: if not isinstance(rng, str): print(f"⚠️ 跳过非字符串 RANGE 宏: {rng}") continue if rng in mapping: print(f"⚠️ Warning: {rng} 已存在于 CHANNEL_SET_{mapping[rng]}") mapping[rng] = set_id return mapping def load_config(self, path="config/config.json"): """加载配置文件,并在此时设置默认 locale_id""" if not os.path.exists(path): raise FileNotFoundError(f"配置文件不存在: {path}") with open(path, &#39;r&#39;, encoding=&#39;utf-8&#39;) as f: self.config = json.load(f) # ✅ 只有在这里才安全地使用 self.config.get() if not self.locale_id: self.locale_id = self.config.get("DEFAULT_LOCALE_ID", "DEFAULT") print(f"✅ 配置文件加载成功: {path}") print(f"🌍 使用 Locale ID: {self.locale_id}") def col_to_letter(self, col): col += 1 result = "" while col > 0: col -= 1 result = chr(col % 26 + ord(&#39;A&#39;)) + result col //= 26 return result def is_valid_power(self, value): try: float(value) return True except (ValueError, TypeError): return False def get_cell_value(self, ws_obj, row_idx, col_idx): fmt = ws_obj["format"] if fmt == "xls": return str(ws_obj["sheet"].cell_value(row_idx, col_idx)).strip() else: cell = ws_obj["sheet"].cell(row=row_idx + 1, column=col_idx + 1) val = cell.value return str(val).strip() if val is not None else "" def find_table_header_row(self, ws_obj): """查找包含 &#39;Mode&#39; 和 &#39;Rate&#39; 的表头行""" fmt = ws_obj["format"] ws = ws_obj["sheet"] for r in range(15): mode_col = rate_col = None if fmt == "xlsx": if r + 1 > ws.max_row: continue for c in range(1, ws.max_column + 1): cell = ws.cell(row=r + 1, column=c) if not cell.value: continue val = str(cell.value).strip() if val == "Mode": mode_col = c elif val == "Rate": rate_col = c if mode_col and rate_col and abs(mode_col - rate_col) == 1: print(f"✅ 找到表头行: 第 {r+1} 行") return r, mode_col - 1, rate_col - 1 # 转为 0-based else: if r >= ws.nrows: continue for c in range(ws.ncols): val = ws.cell_value(r, c) if not val: continue val = str(val).strip() if val == "Mode": mode_col = c elif val == "Rate": rate_col = c if mode_col and rate_col and abs(mode_col - rate_col) == 1: print(f"✅ 找到表头行: 第 {r+1} 行") return r, mode_col, rate_col return None, None, None def find_auth_power_above_row(self, ws_obj, start_row): """查找 &#39;认证功率&#39; 所在的合并单元格及其列范围""" fmt = ws_obj["format"] ws = ws_obj["sheet"] print(f"🔍 开始向上查找 &#39;认证功率&#39;,扫描第 0 ~ {start_row} 行...") if fmt == "xlsx": for mr in ws.merged_cells.ranges: top_left = ws.cell(row=mr.min_row, column=mr.min_col) val = str(top_left.value) if top_left.value else "" if "证功率" in val or "Cert" in val: r_idx = mr.min_row - 1 if r_idx <= start_row: start_col = mr.min_col - 1 end_col = mr.max_col - 1 print(f"📌 发现合并单元格含 &#39;证功率&#39;: &#39;{val}&#39; → {self.col_to_letter(start_col)}{mr.min_row}") return start_col, end_col, r_idx # fallback:普通单元格 for r in range(start_row + 1): for c in range(1, ws.max_column + 1): cell = ws.cell(row=r + 1, column=c) if cell.value and ("证功率" in str(cell.value)): print(f"📌 普通单元格发现 &#39;证功率&#39;: &#39;{cell.value}&#39; @ R{r+1}C{c}") return c - 1, c - 1, r else: for r in range(min(ws.nrows, start_row + 1)): for c in range(ws.ncols): val = ws.cell_value(r, c) if val and ("证功率" in str(val)): print(f"📌 发现 &#39;证功率&#39;: &#39;{val}&#39; @ R{r+1}C{c+1}") return c, c, r return None, None, None def parse_ch_columns_under_auth(self, ws_obj, ch_row_idx, auth_start_col, auth_end_col): """ 只解析位于 [auth_start_col, auth_end_col] 区间内的 CHx 列 """ fmt = ws_obj["format"] ws = ws_obj["sheet"] ch_map = {} print(f"🔍 解析 CH 行(第 {ch_row_idx + 1} 行),限定列范围: Col {auth_start_col} ~ {auth_end_col}") if fmt == "xlsx": for c in range(auth_start_col, auth_end_col + 1): cell = ws.cell(row=ch_row_idx + 1, column=c + 1) val = self.get_cell_value(ws_obj, ch_row_idx, c) match = re.search(r"CH(\d+)", val, re.I) if match: ch_num = int(match.group(1)) ch_map[ch_num] = c print(f" 👉 发现 CH{ch_num} @ Col{c}") else: for c in range(auth_start_col, auth_end_col + 1): val = self.get_cell_value(ws_obj, ch_row_idx, c) match = re.search(r"CH(\d+)", val, re.I) if match: ch_num = int(match.group(1)) ch_map[ch_num] = c print(f" 👉 发现 CH{ch_num} @ Col{c}") if not ch_map: print("❌ 在指定区域内未找到任何 CHx 列") else: chs = sorted(ch_map.keys()) print(f"✔️ 成功提取 CH{min(chs)}-{max(chs)} 共 {len(chs)} 个信道") return ch_map def encode_power(self, dbm): return int(round((float(dbm) + 1.5) * 4)) def merge_consecutive_channels(self, ch_list): if not ch_list: return [] sorted_ch = sorted(ch_list) ranges = [] start = end = sorted_ch[0] for ch in sorted_ch[1:]: if ch == end + 1: end = ch else: ranges.append((start, end)) start = end = ch ranges.append((start, end)) return ranges # ==================== 修改 collect_tx_limit_data ==================== def collect_tx_limit_data(self, ws_obj, sheet_config, header_row_idx, auth_row, auth_start, auth_end, mode_col, rate_col): ch_row_idx = auth_row + 2 nrows = ws_obj["sheet"].nrows if ws_obj["format"] == "xls" else ws_obj["sheet"].max_row if ch_row_idx >= nrows: print(f"❌ CH 行 ({ch_row_idx + 1}) 超出范围") return [] # ✅ 提取认证功率下方的 CH 列映射 ch_map = self.parse_ch_columns_under_auth(ws_obj, ch_row_idx, auth_start, auth_end) if not ch_map: return [] entries = [] row_mode_info = {} # {row_index: parsed_mode_info} fmt = ws_obj["format"] ws = ws_obj["sheet"] # ======== 第一步:构建 row_mode_info —— 使用新解析器 ======== if fmt == "xlsx": merged_cells_map = {} for mr in ws.merged_cells.ranges: for r in range(mr.min_row - 1, mr.max_row): for c in range(mr.min_col - 1, mr.max_col): merged_cells_map[(r, c)] = mr for row_idx in range(header_row_idx + 1, nrows): cell_value = None is_merged = (row_idx, mode_col) in merged_cells_map if is_merged: mr = merged_cells_map[(row_idx, mode_col)] top_cell = ws.cell(row=mr.min_row, column=mr.min_col) cell_value = top_cell.value else: raw_cell = ws.cell(row=row_idx + 1, column=mode_col + 1) cell_value = raw_cell.value mode_info = self.parse_mode_cell(cell_value) if mode_info: if is_merged: mr = merged_cells_map[(row_idx, mode_col)] for r in range(mr.min_row - 1, mr.max_row): if header_row_idx < r < nrows: row_mode_info[r] = mode_info.copy() else: row_mode_info[row_idx] = mode_info.copy() else: for row_idx in range(header_row_idx + 1, ws.nrows): cell_value = self.get_cell_value(ws_obj, row_idx, mode_col) mode_info = self.parse_mode_cell(cell_value) if mode_info: row_mode_info[row_idx] = mode_info.copy() # ======== 第二步:生成条目(关键修改区)======== for row_idx in range(header_row_idx + 1, nrows): mode_info = row_mode_info.get(row_idx) if not mode_info: continue bw_clean = mode_info["bw"] has_valid_power = False for ch, col_idx in ch_map.items(): power_val = self.get_cell_value(ws_obj, row_idx, col_idx) if self.is_valid_power(power_val): has_valid_power = True break if not has_valid_power: print(f"🗑️ 跳过空行: 第 {row_idx + 1} 行(无任何有效功率值)") continue # ---- 遍历每个 phy_mode ---- for phy_mode in mode_info["phy_mode_list"]: formatted_mode = self.format_phy_mode(phy_mode) mode_key = f"{formatted_mode}_{bw_clean}M" # ✅ 改为大小写不敏感判断 if not self._ci_contains(sheet_config.get("modes", []), mode_key): print(f"⚠️ 忽略不支持的模式: {mode_key}") continue # === 获取 rate_set 定义(可能是 str 或 list)=== raw_rate_set = self._ci_get(sheet_config["rate_set_map"], mode_key) if not raw_rate_set: print(f"❌ 找不到 rate_set 映射: {mode_key}") continue # 统一转为 list 处理 if isinstance(raw_rate_set, str): rate_set_list = [raw_rate_set] elif isinstance(raw_rate_set, list): rate_set_list = raw_rate_set else: continue # 非法类型跳过 for rate_set_macro in rate_set_list: ch_count = 0 for ch, col_idx in ch_map.items(): power_val = self.get_cell_value(ws_obj, row_idx, col_idx) if not self.is_valid_power(power_val): continue try: power_dbm = float(power_val) except: continue encoded_power = self.encode_power(power_dbm) entries.append({ "ch": ch, "power_dbm": round(power_dbm, 2), "encoded_power": encoded_power, "rate_set_macro": rate_set_macro, # <<< 每个 macro 单独一条记录 "mode": phy_mode, "bw": bw_clean, "src_row": row_idx + 1, "band": sheet_config["band"] }) ch_count += 1 print( f"📊 已采集第 {row_idx + 1} 行 → {formatted_mode} {bw_clean}M, {ch_count} 个信道, 使用宏: {rate_set_macro}" ) return entries def compress_tx_limit_entries(self, raw_entries, sheet_config): """ 压缩TX限制条目。 Args: raw_entries (list): 原始条目列表。 sheet_config (dict): Excel表格配置字典。 Returns: list: 压缩后的条目列表。 """ from collections import defaultdict modes_order = sheet_config["modes"] # ✅ 构建小写映射用于排序(key: "11n_20M") mode_lower_to_index = {mode.lower(): idx for idx, mode in enumerate(modes_order)} range_template = sheet_config["range_macro_template"] group_key = lambda e: (e["encoded_power"], e["rate_set_macro"]) groups = defaultdict(list) for e in raw_entries: groups[group_key(e)].append(e) compressed = [] for (encoded_power, rate_set_macro), entries_in_group in groups.items(): first = entries_in_group[0] power_dbm = first["power_dbm"] mode = first["mode"] # 如 &#39;11N&#39; bw = first["bw"] # 如 &#39;20&#39; 或 &#39;40&#39; ch_list = sorted(e["ch"] for e in entries_in_group) for start, end in self.merge_consecutive_channels(ch_list): range_macro = range_template.format( band=sheet_config["band"], bw=bw, start=start, end=end ) # ✅ 格式化物理层模式(如 &#39;11N&#39; -> &#39;11n&#39;) formatted_mode = self.format_phy_mode(mode) # ✅ 构造 mode_key 用于查找排序优先级 mode_key = f"{formatted_mode}_{bw}M" mode_order_idx = mode_lower_to_index.get(mode_key.lower(), 999) # ✅ 生成注释 comment = f"/* {power_dbm:5.2f}dBm, CH{start}-{end}, {formatted_mode} @ {bw}MHz */" # ✅ 新增:生成该段落的实际信道列表 segment_ch_list = list(range(start, end + 1)) compressed.append({ "encoded_power": encoded_power, "range_macro": range_macro, "rate_set_macro": rate_set_macro, "comment": comment, "_mode_order": mode_order_idx, # --- 👇 新增:保留关键字段供模板使用 --- "bw": bw, # 带宽数字(字符串) "mode": formatted_mode, # 统一格式化的模式名 "ch_start": start, "ch_end": end, "power_dbm": round(power_dbm, 2), "ch_list": segment_ch_list, # ✅ 关键!用于 global_ch_min/max 统计 }) # 排序后删除临时字段 compressed.sort(key=lambda x: x["_mode_order"]) for item in compressed: del item["_mode_order"] return compressed def clean_sheet_name(self, name): cleaned = re.sub(r&#39;[^\w\.\=\u4e00-\u9fa5]&#39;, &#39;&#39;, str(name)) return cleaned def match_sheet_to_config(self, sheet_name): cleaned = self.clean_sheet_name(sheet_name) for cfg in self.config["sheets"]: for pat in cfg["pattern"]: if re.search(pat, cleaned, re.I): print(f"🧹 &#39;{sheet_name}&#39; → 清洗后: &#39;{cleaned}&#39;") print(f"✅ 匹配成功!&#39;{sheet_name}&#39; → [{cfg[&#39;band&#39;]}] 配置") return cfg print(f"🧹 &#39;{sheet_name}&#39; → 清洗后: &#39;{cleaned}&#39;") print(f"🟡 未匹配到 &#39;{sheet_name}&#39; 的模式,跳过...") return None def convert_sheet_with_config(self, ws_obj, sheet_name, sheet_config): header_row_idx, mode_col, rate_col = self.find_table_header_row(ws_obj) if header_row_idx is None: print(f"🟡 跳过 &#39;{sheet_name}&#39;:未找到 &#39;Mode&#39; 和 &#39;Rate&#39;") return auth_start, auth_end, auth_row = self.find_auth_power_above_row(ws_obj, header_row_idx) if auth_start is None: print(f"🟡 跳过 &#39;{sheet_name}&#39;:未找到 &#39;认证功率&#39;") return raw_entries = self.collect_tx_limit_data( ws_obj, sheet_config, header_row_idx, auth_row, auth_start, auth_end, mode_col, rate_col ) if not raw_entries: print(f"⚠️ 从 &#39;{sheet_name}&#39; 未收集到有效数据") return compressed = self.compress_tx_limit_entries(raw_entries, sheet_config) # ✅ 新增:仅对 2.4G 频段进行信道边界统计 band = str(sheet_config.get("band", "")).strip().upper() if band in ["2G", "2.4G", "2.4GHZ", "BGN"]: # 执行信道统计 for entry in compressed: ch_range = entry.get("ch_list") or [] if not ch_range: continue ch_start = min(ch_range) ch_end = max(ch_range) # 更新全局最小最大值 if self.global_ch_min is None or ch_start < self.global_ch_min: self.global_ch_min = ch_start if self.global_ch_max is None or ch_end > self.global_ch_max: self.global_ch_max = ch_end # ✅ 强制打印当前状态 print(f"📊 [Band={band}] 累计 2.4G 信道范围: CH{self.global_ch_min} – CH{self.global_ch_max}") self.tx_limit_entries.extend(compressed) print(f"✔️ 成功从 &#39;{sheet_name}&#39; 添加 {len(compressed)} 条压缩后 TX 限幅条目") # 可选调试输出 if band == "2G" and self.global_ch_min is not None: print(f"📊 当前累计 2.4G 信道范围: CH{self.global_ch_min} – CH{self.global_ch_max}") def render_from_template(self, template_path, context, output_path): with open(template_path, &#39;r&#39;, encoding=&#39;utf-8&#39;) as f: template = Template(f.read()) content = template.render(**context) os.makedirs(os.path.dirname(output_path), exist_ok=True) with open(output_path, &#39;w&#39;, encoding=&#39;utf-8&#39;) as f: f.write(content) print(f"🎉 已生成: {output_path}") def generate_outputs(self): print("🔧 正在执行 generate_outputs()...") if not self.tx_limit_entries: print("⚠️ 无 TX 限幅数据可输出") return # === Step 1: 使用 "HT" 分类 entries === normal_entries = [] ht_entries = [] for e in self.tx_limit_entries: macro = e["rate_set_macro"] # 核心判断:是否包含 "HT" if "HT" in macro: ht_entries.append(e) else: normal_entries.append(e) print(f"📊 自动分类结果:") print(f" ├─ Normal 模式(不含 HT): {len(normal_entries)} 条") print(f" └─ HT 模式(含 HT): {len(ht_entries)} 条") # === Step 2: 构建 g_tx_limit_normal 结构(按 bw 排序)=== def build_normal_structure(entries): from collections import defaultdict grouped = defaultdict(list) for e in entries: grouped[e["bw"]].append(e) result = [] for bw in ["20", "40", "80", "160"]: if bw in grouped: sorted_entries = sorted(grouped[bw], key=lambda x: (x["ch_start"], x["encoded_power"])) result.append((bw, sorted_entries)) return result normal_struct = build_normal_structure(normal_entries) # === Step 3: 构建 g_tx_limit_ht 结构(严格顺序)=== def build_ht_structure(entries): from collections import defaultdict groups = defaultdict(list) for e in entries: if "EXT4" in e["rate_set_macro"]: level = "ext4" elif "EXT" in e["rate_set_macro"]: level = "ext" else: level = "base" groups[(level, e["bw"])].append(e) order = [ ("base", "20"), ("base", "40"), ("ext", "20"), ("ext", "40"), ("ext4", "20"), ("ext4", "40") ] segments = [] active_segment_count = sum(1 for key in order if key in groups) for idx, (level, bw) in enumerate(order): key = (level, bw) if key not in groups: continue seg_entries = sorted(groups[key], key=lambda x: (x["ch_start"], x["encoded_power"])) count = len(seg_entries) header_flags = f"CLM_DATA_FLAG_WIDTH_{bw} | CLM_DATA_FLAG_MEAS_COND" if idx < active_segment_count - 1: # 不是最后一个有效段 header_flags += " | CLM_DATA_FLAG_MORE" if level != "base": header_flags += " | CLM_DATA_FLAG_FLAG2" segment = { "header_flags": header_flags, "count": count, "entries": seg_entries } if level == "ext": segment["flag2"] = "CLM_DATA_FLAG2_RATE_TYPE_EXT" elif level == "ext4": segment["flag2"] = "CLM_DATA_FLAG2_RATE_TYPE_EXT4" segments.append(segment) return segments ht_segments = build_ht_structure(ht_entries) # === Step 4: fallback range for EIRP 和自动选择 CHANNEL_SET_X === fallback_range_macro = "RANGE_EIRP_DUMMY" fallback_ch_start = fallback_ch_end = 1 fallback_channel_set_id = 1 channel_set_comment = "Unknown channel set" # 默认描述 if self.global_ch_min is not None and self.global_ch_max is not None: fallback_range_macro = f"RANGE_2G_20M_{self.global_ch_min}_{self.global_ch_max}" fallback_ch_start = self.global_ch_min fallback_ch_end = self.global_ch_max # 查找对应的 channel set ID fallback_channel_set_id = self.channel_set_map.get(fallback_range_macro, 1) print(f"📊 根据 {fallback_range_macro} 自动选择 CHANNEL_SET_{fallback_channel_set_id}") # ======== 生成详细注释 ======== band = "2.4G" bw = "20MHz" # 尝试从 descriptions 获取自定义描述 desc = self.config.get("descriptions", {}).get(str(fallback_channel_set_id)) if desc: channel_set_comment = f"{band}: CH{fallback_ch_start}-{fallback_ch_end}, {bw}" else: channel_set_comment = f"{band}: CH{fallback_ch_start}-{fallback_ch_end}, {bw} bandwidth, locale flags" else: print("⚠️ 未检测到有效的 2.4G 信道范围,使用默认 CHANNEL_SET_1") channel_set_comment = "Fallback 2.4GHz channel set (default)" # === Step 5: 渲染上下文集合 === timestamp = __import__(&#39;datetime&#39;).datetime.now().strftime("%Y-%m-%d %H:%M:%S") locale_id_safe = self.locale_id.replace(&#39;-&#39;, &#39;_&#39;) # Context for clm_locale.c context_clm = { "locale_id": locale_id_safe, "eirp_entries": self.eirp_entries or [], "fallback_encoded_eirp": 30, "fallback_range_macro": fallback_range_macro, "fallback_ch_start": fallback_ch_start, "fallback_ch_end": fallback_ch_end, "entries_grouped_by_bw": normal_struct, # 注意:这里只传 non-HT 的条目作为 TX limit } # Context for tx_limit_table.c and clm_macros.h context_tables = { "timestamp": timestamp, "locale_id": locale_id_safe, "locale_display_name": self.locale_display_name, "normal_table": normal_struct, "ht_segments": ht_segments, "fallback_encoded_eirp": 30, "fallback_range_macro": fallback_range_macro, "fallback_ch_start": fallback_ch_start, "fallback_ch_end": fallback_ch_end, "fallback_channel_set_id": fallback_channel_set_id, "channel_set_comment": channel_set_comment, } os.makedirs(self.output_dir, exist_ok=True) # === Step 6: 渲染多个输出文件 === # 1. 原始 CLM Locale 数据结构 self.render_from_template( "templates/clm_locale.c.j2", context_clm, os.path.join(self.output_dir, f"locale_{self.locale_id.lower()}.c") ) # 2. 新的 TX Power 表(g_tx_limit_normal + g_tx_limit_ht) self.render_from_template( "templates/tx_limit_table.c.j2", context_tables, os.path.join(self.output_dir, "tx_limit_table.c") ) # 3. 头文件 self.render_from_template( "templates/clm_macros.h.j2", context_tables, os.path.join(self.output_dir, "clm_macros.h") ) print("✅ 所有输出文件生成完成。") def convert(self, file_path): ext = os.path.splitext(file_path)[-1].lower() if ext == ".xlsx": wb = load_workbook(file_path, data_only=True) sheets = [{"sheet": ws, "format": "xlsx"} for ws in wb.worksheets] elif ext == ".xls": wb = xlrd.open_workbook(file_path) sheets = [{"sheet": ws, "format": "xls"} for ws in wb.sheets()] else: raise ValueError("仅支持 .xls 或 .xlsx 文件") for i, ws_obj in enumerate(sheets): sheet_name = wb.sheet_names()[i] if ext == ".xls" else ws_obj["sheet"].title config = self.match_sheet_to_config(sheet_name) if config: self.convert_sheet_with_config(ws_obj, sheet_name, config) self.generate_outputs() def read_excel(self): """ 【UI 兼容】供 PyQt UI 调用的入口方法 将当前 self.input_file 中的数据解析并填充到 tx_limit_entries """ if not hasattr(self, &#39;input_file&#39;) or not self.input_file: raise ValueError("未设置 input_file 属性!") if not os.path.exists(self.input_file): raise FileNotFoundError(f"文件不存在: {self.input_file}") print(f"📊 开始解析 Excel 文件: {self.input_file}") try: self.convert(self.input_file) # 调用已有逻辑 print(f"✅ Excel 解析完成,共生成 {len(self.tx_limit_entries)} 条 TX 限幅记录") except Exception as e: print(f"❌ 解析失败: {e}") raise if __name__ == "__main__": import sys import argparse import os # 切换到脚本所在目录 script_dir = os.path.dirname(__file__) os.chdir(script_dir) # 定义命令行参数解析器 parser = argparse.ArgumentParser(description="Convert Excel to CLM C code.") parser.add_argument( "input", nargs="?", default="input/Archer BE900US 2.xlsx", help="Input Excel file (default: input/Archer BE900US 2.xlsx)" ) parser.add_argument( "--config", default="config/config.json", help="Path to config.json (default: config/config.json)" ) parser.add_argument( "--output-dir", default="output", help="Output directory (default: output)" ) parser.add_argument( "--locale-id", default=None, help=&#39;Locale ID, e.g., "US", "CN-2G" (default: from config or "DEFAULT")&#39; ) parser.add_argument( "--display-name", default=None, help=&#39;Display name in generated code, e.g., "FCC_Core" (default: derived from locale_id)&#39; ) args = parser.parse_args() # 创建转换器实例,并传入所有参数 converter = ExcelToCLMConverter( config_path=args.config, output_dir=args.output_dir, locale_id=args.locale_id, locale_display_name=args.display_name ) # 执行转换 converter.convert(args.input)
10-15
def parse_args(): parser = ArgumentParser( description=&#39;Perform MMDET inference on large images.&#39;) parser.add_argument( &#39;img&#39;, help=&#39;Image path, include image file, dir and URL.&#39;) parser.add_argument(&#39;config&#39;, help=&#39;Config file&#39;) parser.add_argument(&#39;checkpoint&#39;, help=&#39;Checkpoint file&#39;) parser.add_argument( &#39;--out-dir&#39;, default=&#39;./output&#39;, help=&#39;Path to output file&#39;) parser.add_argument( &#39;--device&#39;, default=&#39;cuda:0&#39;, help=&#39;Device used for inference&#39;) parser.add_argument( &#39;--show&#39;, action=&#39;store_true&#39;, help=&#39;Show the detection results&#39;) parser.add_argument( &#39;--tta&#39;, action=&#39;store_true&#39;, help=&#39;Whether to use test time augmentation&#39;) parser.add_argument( &#39;--score-thr&#39;, type=float, default=0.3, help=&#39;Bbox score threshold&#39;) parser.add_argument( &#39;--patch-size&#39;, type=int, default=640, help=&#39;The size of patches&#39;) parser.add_argument( &#39;--patch-overlap-ratio&#39;, type=float, default=0.25, help=&#39;Ratio of overlap between two patches&#39;) parser.add_argument( &#39;--merge-iou-thr&#39;, type=float, default=0.25, help=&#39;IoU threshould for merging results&#39;) parser.add_argument( &#39;--merge-nms-type&#39;, type=str, default=&#39;nms&#39;, help=&#39;NMS type for merging results&#39;) parser.add_argument( &#39;--batch-size&#39;, type=int, default=1, help=&#39;Batch size, must greater than or equal to 1&#39;) parser.add_argument( &#39;--debug&#39;, action=&#39;store_true&#39;, help=&#39;Export debug results before merging&#39;) parser.add_argument( &#39;--save-patch&#39;, action=&#39;store_true&#39;, help=&#39;Save the results of each patch. &#39; &#39;The `--debug` must be enabled.&#39;) args = parser.parse_args() return args def main(): args = parse_args() config = args.config if isinstance(config, (str, Path)): config = Config.fromfile(config) elif not isinstance(config, Config): raise TypeError(&#39;config must be a filename or Config object, &#39; f&#39;but got {type(config)}&#39;) if &#39;init_cfg&#39; in config.model.backbone: config.model.backbone.init_cfg = None if args.tta: assert &#39;tta_model&#39; in config, &#39;Cannot find ``tta_model`` in config.&#39; \ " Can&#39;t use tta !" assert &#39;tta_pipeline&#39; in config, &#39;Cannot find ``tta_pipeline`` &#39; \ "in config. Can&#39;t use tta !" config.model = ConfigDict(**config.tta_model, module=config.model) test_data_cfg = config.test_dataloader.dataset while &#39;dataset&#39; in test_data_cfg: test_data_cfg = test_data_cfg[&#39;dataset&#39;] test_data_cfg.pipeline = config.tta_pipeline # TODO: TTA mode will error if cfg_options is not set. # This is an mmdet issue and needs to be fixed later. # build the model from a config file and a checkpoint file model = init_detector( config, args.checkpoint, device=args.device, cfg_options={}) if not os.path.exists(args.out_dir) and not args.show: os.mkdir(args.out_dir) # init visualizer visualizer = VISUALIZERS.build(model.cfg.visualizer) visualizer.dataset_meta = model.dataset_meta # get file list files, source_type = get_file_list(args.img) # start detector inference print(f&#39;Performing inference on {len(files)} images.... &#39; &#39;This may take a while.&#39;) progress_bar = ProgressBar(len(files)) for file in files: # read image img = mmcv.imread(file) # arrange slices height, width = img.shape[:2] sliced_image_object = slice_image( img, slice_height=args.patch_size, slice_width=args.patch_size, auto_slice_resolution=False, overlap_height_ratio=args.patch_overlap_ratio, overlap_width_ratio=args.patch_overlap_ratio, ) # perform sliced inference slice_results = [] start = 0 while True: # prepare batch slices end = min(start + args.batch_size, len(sliced_image_object)) images = [] for sliced_image in sliced_image_object.images[start:end]: images.append(sliced_image) # forward the model slice_results.extend(inference_detector(model, images)) if end >= len(sliced_image_object): break start += args.batch_size if source_type[&#39;is_dir&#39;]: filename = os.path.relpath(file, args.img).replace(&#39;/&#39;, &#39;_&#39;) else: filename = os.path.basename(file) img = mmcv.imconvert(img, &#39;bgr&#39;, &#39;rgb&#39;) out_file = None if args.show else os.path.join(args.out_dir, filename) # export debug images if args.debug: # export sliced image results name, suffix = os.path.splitext(filename) shifted_instances = shift_predictions( slice_results, sliced_image_object.starting_pixels, src_image_shape=(height, width)) merged_result = slice_results[0].clone() merged_result.pred_instances = shifted_instances debug_file_name = name + &#39;_debug&#39; + suffix debug_out_file = None if args.show else os.path.join( args.out_dir, debug_file_name) visualizer.set_image(img.copy()) debug_grids = [] for starting_point in sliced_image_object.starting_pixels: start_point_x = starting_point[0] start_point_y = starting_point[1] end_point_x = start_point_x + args.patch_size end_point_y = start_point_y + args.patch_size debug_grids.append( [start_point_x, start_point_y, end_point_x, end_point_y]) debug_grids = np.array(debug_grids) debug_grids[:, 0::2] = np.clip(debug_grids[:, 0::2], 1, img.shape[1] - 1) debug_grids[:, 1::2] = np.clip(debug_grids[:, 1::2], 1, img.shape[0] - 1) palette = np.random.randint(0, 256, size=(len(debug_grids), 3)) palette = [tuple(c) for c in palette] line_styles = random.choices([&#39;-&#39;, &#39;-.&#39;, &#39;:&#39;], k=len(debug_grids)) visualizer.draw_bboxes( debug_grids, edge_colors=palette, alpha=1, line_styles=line_styles) visualizer.draw_bboxes( debug_grids, face_colors=palette, alpha=0.15) visualizer.draw_texts( list(range(len(debug_grids))), debug_grids[:, :2] + 5, colors=&#39;w&#39;) visualizer.add_datasample( debug_file_name, visualizer.get_image(), data_sample=merged_result, draw_gt=False, show=args.show, wait_time=0, out_file=debug_out_file, pred_score_thr=args.score_thr, ) if args.save_patch: debug_patch_out_dir = os.path.join(args.out_dir, f&#39;{name}_patch&#39;) for i, slice_result in enumerate(slice_results): patch_out_file = os.path.join( debug_patch_out_dir, f&#39;{filename}_slice_{i}_result.jpg&#39;) image = mmcv.imconvert(sliced_image_object.images[i], &#39;bgr&#39;, &#39;rgb&#39;) visualizer.add_datasample( &#39;patch_result&#39;, image, data_sample=slice_result, draw_gt=False, show=False, wait_time=0, out_file=patch_out_file, pred_score_thr=args.score_thr, ) image_result = merge_results_by_nms( slice_results, sliced_image_object.starting_pixels, src_image_shape=(height, width), nms_cfg={ &#39;type&#39;: args.merge_nms_type, &#39;iou_threshold&#39;: args.merge_iou_thr }) visualizer.add_datasample( filename, img, data_sample=image_result, draw_gt=False, show=args.show, wait_time=0, out_file=out_file, pred_score_thr=args.score_thr, ) progress_bar.update() if not args.show or (args.debug and args.save_patch): print_log( f&#39;\nResults have been saved at {os.path.abspath(args.out_dir)}&#39;) if __name__ == &#39;__main__&#39;: main()
08-21
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值