# app_level_leave_one_out_core_app_comparison.py
# 功能:对比XGBoost与MLP模型的留一核心App交叉验证性能
# 核心逻辑:
# 1. 提取app核心名称(数字后缀合并,字符后缀独立)
# 2. 统一特征预处理流程(分类编码、缺失值填充、标准化)
# 3. 分别运行XGBoost与MLP的留一验证
# 4. 对比两者的分类性能
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedGroupKFold
from sklearn.preprocessing import LabelEncoder, RobustScaler
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix, ConfusionMatrixDisplay
import xgboost as xgb
import matplotlib as mpl
import matplotlib.font_manager as fm
import os
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from pytorch_tabnet.tab_model import TabNetClassifier
import torch
pd.set_option('future.no_silent_downcasting', True)
# ---------- 全局参数 ----------
SELECTED_TOP_N = 100 # 保留的Top特征数
RESULTS_DIR = "app_loo_cv_results_core_comparison" # 结果存储文件夹
# MLP超参数(可调整)
MLP_HIDDEN1 = 128 # 第一层隐藏层神经元数
MLP_HIDDEN2 = 64 # 第二层隐藏层神经元数
MLP_DROPOUT = 0.3 # Dropout比例
MLP_EPOCHS = 100 # 最大训练轮次
MLP_BATCH = 32 # 批量大小
# ------------------------------
# 创建结果文件夹(若不存在则自动创建)
os.makedirs(RESULTS_DIR, exist_ok=True)
def set_chinese_font():
"""设置中文字体(确保覆盖所有文本元素)"""
common_fonts = ['SimHei', 'Microsoft YaHei', 'KaiTi', 'SimSun', 'FangSong']
available_fonts = {f.name for f in fm.fontManager.ttflist}
# 优先选择常见中文字体
font = next((f for f in common_fonts if f in available_fonts), None)
if not font:
# 尝试寻找包含"CJK"的字体(适用于Linux/macOS)
cjk_fonts = [f for f in fm.fontManager.ttflist if 'CJK' in f.name]
font = cjk_fonts[0].name if cjk_fonts else None
if not font:
# 最后尝试添加Droid Sans Fallback(Linux系统常见)
droid_font_path = "/usr/share/fonts/truetype/droid/DroidSansFallbackFull.ttf"
if os.path.exists(droid_font_path):
fm.fontManager.addfont(droid_font_path)
font = "Droid Sans Fallback"
# 打印当前使用的字体(调试用)
print(f"\n已启用中文字体: {font}")
if font:
# 全局字体家族(覆盖所有文本)
mpl.rcParams['font.family'] = font
# 明确设置sans-serif字体(Matplotlib默认用这个系列)
mpl.rcParams['font.sans-serif'] = [font]
# 解决负号显示问题
mpl.rcParams['axes.unicode_minus'] = False
# 高DPI保存(避免模糊)
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.autolayout'] = True
set_chinese_font()
# ========== 1. 标签映射 & 保留类别 ==========
category_mapping = {
"001": "游戏", "002": "云游戏", "101": "语音通话", "102": "视频通话",
"202": "视频媒体", "301": "直播", "501": "文件下载", "502": "文件上传"
}
keep_ids = list(category_mapping.keys()) # 仅保留这些二级标签
# ========== 2. 数据读取 & 过滤 & 核心app提取 ==========
def extract_app_core(app_name: str) -> str:
"""
提取app核心名称:
- 数字后缀截断(如baiduNetDisk_upload-1 → baiduNetDisk_upload)
- 字符后缀保留(如wtfast-fact → wtfast-fact)
- 无后缀保留原名称(如test → test)
"""
parts = app_name.split('-')
if len(parts) < 2: # 无分隔符,直接返回
return app_name
# 判断最后一部分是否为纯数字
if parts[-1].isdigit():
return '-'.join(parts[:-1]) # 截断数字后缀
else:
return app_name # 保留字符后缀
# 读取数据
df = pd.read_csv("../data/900_gather_feature_results.csv")
df['second_id'] = df['second_id'].astype(str).str.zfill(3) # 确保二级标签为3位字符串
df = df[df['second_id'].isin(keep_ids)].reset_index(drop=True) # 过滤无效标签
# 提取核心app名称
df['app_core'] = df['app_name'].apply(extract_app_core)
print(f"过滤后数据形状: {df.shape}")
print(f"二级标签分布:\n{df['second_id'].value_counts()}\n")
print(f"核心app分布(合并数字后缀后):\n{df['app_core'].value_counts()}\n")
# ========== 3. 特征/标签/分组定义 ==========
# 特征:排除ID、标签、时间戳等非特征列
X = df.drop(columns=["first_id", "second_id", "app_name", "app_core", "timestamp"])
# 标签:原始3位字符串(后续编码)
y = df["second_id"]
# 标签编码(将字符串转为数字,顺序与category_mapping一致)
label_encoder = LabelEncoder()
y_encoded = label_encoder.fit_transform(y)
encoded_classes = label_encoder.classes_ # 编码顺序 = sorted(keep_ids)
print(f"LabelEncoder 编码顺序: {encoded_classes}\n")
# ========== 4. 特征重要性计算(基于XGBoost,与原逻辑一致) ==========
def preprocess_data(X_train, X_test, y_train, y_test):
"""统一特征预处理函数(XGBoost与MLP共享)"""
# 1. 分类特征编码(每个fold独立拟合)
cat_cols = X_train.select_dtypes(include=["object"]).columns
for col in cat_cols:
le = LabelEncoder()
X_train[col] = le.fit_transform(X_train[col])
X_test[col] = le.transform(X_test[col]) # 测试集用训练集的编码器
# 2. 缺失值/异常值填充(用训练集的中位数)
for col in X_train.columns:
med = X_train[col].median()
X_train[col] = X_train[col].fillna(med).replace([np.inf, -np.inf], med)
X_test[col] = X_test[col].fillna(med).replace([np.inf, -np.inf], med)
# 3. 标准化(用训练集的Scaler)
scaler = RobustScaler()
X_tr = scaler.fit_transform(X_train)
X_te = scaler.transform(X_test)
return X_tr, X_te, y_train, y_test
def train_xgb(X_train, X_test, y_train, y_test, params):
"""XGBoost训练函数(基于统一预处理)"""
X_tr, X_te, y_tr, y_te = preprocess_data(X_train, X_test, y_train, y_test)
# XGBoost训练
dtrain = xgb.DMatrix(X_tr, label=y_tr, feature_names=X_train.columns.tolist())
dtest = xgb.DMatrix(X_te, label=y_te, feature_names=X_test.columns.tolist())
model = xgb.train(params, dtrain, num_boost_round=100, verbose_eval=False)
y_pred = model.predict(dtest).astype(int)
imp = model.get_score(importance_type='gain')
return y_pred, imp
# XGBoost基础参数(保持与原代码一致)
xgb_params = {
"objective": "multi:softmax",
"num_class": len(encoded_classes),
"max_depth": 3,
"eta": 0.2,
"eval_metric": "merror",
"verbosity": 0,
"random_state": 42
}
# 计算特征重要性(与原逻辑一致)
sgkf = StratifiedGroupKFold(n_splits=4, shuffle=True, random_state=36)
fold_importance = []
for fold, (tr_idx, te_idx) in enumerate(sgkf.split(X, y_encoded, groups=df["app_core"]), 1):
X_tr = X.iloc[tr_idx].copy()
X_te = X.iloc[te_idx].copy()
y_tr, y_te = y_encoded[tr_idx], y_encoded[te_idx]
_, imp = train_xgb(X_tr, X_te, y_tr, y_te, xgb_params)
fold_importance.append(imp)
# 平均特征重要性并选Top-N
avg_imp = {}
for d in fold_importance:
for feat, val in d.items():
avg_imp[feat] = avg_imp.get(feat, []) + [val]
for feat in avg_imp:
avg_imp[feat] = np.mean(avg_imp[feat])
sorted_features = sorted(avg_imp.items(), key=lambda x: x[1], reverse=True)
selected_features = [f for f, _ in sorted_features[:SELECTED_TOP_N]]
print(f"已选 Top-{SELECTED_TOP_N} 特征: {selected_features[:5]}...\n")
# ========== 5. 定义深度学习模型 ==========
def build_mlp(input_dim, num_classes):
"""构建多层感知机模型(多分类任务)"""
model = Sequential([
Dense(MLP_HIDDEN1, activation='relu', input_shape=(input_dim,)),
Dropout(MLP_DROPOUT),
Dense(MLP_HIDDEN2, activation='relu'),
Dropout(MLP_DROPOUT),
Dense(num_classes, activation='softmax') # 多分类输出用softmax
])
model.compile(
optimizer=Adam(learning_rate=0.001),
loss='sparse_categorical_crossentropy', # 标签为整数编码时使用
metrics=['accuracy']
)
return model
def train_mlp(X_train, X_test, y_train, y_test):
"""MLP训练函数(基于统一预处理)"""
X_tr, X_te, y_tr, y_te = preprocess_data(X_train, X_test, y_train, y_test)
# 构建模型
input_dim = X_tr.shape[1]
num_classes = len(np.unique(y_tr))
model = build_mlp(input_dim, num_classes)
# 早停法防止过拟合
early_stopping = EarlyStopping(
monitor='val_loss',
patience=5, # 连续5轮无提升则停止
restore_best_weights=True # 恢复最优权重
)
# 训练模型
model.fit(
X_tr, y_tr,
epochs=MLP_EPOCHS,
batch_size=MLP_BATCH,
validation_data=(X_te, y_te),
callbacks=[early_stopping],
verbose=0
)
# 预测(取概率最大的类别)
y_pred_prob = model.predict(X_te, verbose=0)
y_pred = np.argmax(y_pred_prob, axis=1)
return y_pred
def build_tabnet(input_dim: int, num_classes: int) -> TabNetClassifier:
"""
构建TabNet分类器(优化后的超参数,适合流量特征)
参数说明:
- n_d/n_a: 决策/注意力维度(控制模型复杂度)
- n_steps: 注意力步骤数(捕捉更多特征交互)
- gamma: 注意力更新率(控制特征重用)
"""
model = TabNetClassifier(
n_d=64, # 决策特征维度(与注意力维度一致)
n_a=64, # 注意力特征维度
n_steps=5, # 注意力步骤数(越大越能捕捉复杂交互)
gamma=1.5, # 注意力更新率(控制特征多样性)
cat_idxs=[], # 分类特征的索引(无分类特征时为空)
cat_dims=[], # 分类特征的维度(无分类特征时为空)
cat_emb_dim=1, # 分类特征的嵌入维度(无分类特征时忽略)
output_dim=num_classes,
verbose=0 # 关闭冗余输出
)
return model
def train_tabnet(X_train: pd.DataFrame, X_test: pd.DataFrame, y_train: np.ndarray, y_test: np.ndarray) -> np.ndarray:
"""TabNet训练函数(与原预处理流程完全兼容)"""
# 1. 共享预处理(分类编码、缺失值填充、标准化)
X_tr, X_te, y_tr, y_te = preprocess_data(X_train, X_test, y_train, y_test)
# 2. 转换为TabNet要求的输入格式(numpy数组)
X_tr = X_tr.astype(np.float32)
X_te = X_te.astype(np.float32)
y_tr = y_tr.astype(np.int64)
y_te = y_te.astype(np.int64)
# 3. 构建并训练模型
input_dim = X_tr.shape[1]
num_classes = len(np.unique(y_tr))
model = build_tabnet(input_dim, num_classes)
# TabNet内置早停(patience=5),避免过拟合
model.fit(
X_train=X_tr, # 训练集特征(正确)
y_train=y_tr, # 训练集标签(正确)
eval_set=[(X_te, y_te)], # 验证集(元组列表格式)
max_epochs=100, # 最大训练轮次(正确)
batch_size=32, # 批量大小(正确)
patience=5, # 早停 patience(正确)
eval_metric=['accuracy'], # 评估指标(正确)
verbose=0 # 关闭输出(正确)
)
# 4. 预测(返回数字编码的类别)
y_pred = model.predict(X_te)
return y_pred
# ========== 6. 留一核心App交叉验证(XGBoost vs MLP) ==========
unique_app_cores = df['app_core'].unique()
print(f"\n待测试的独立核心app数量: {len(unique_app_cores)}\n")
# ------------------------------
# 6.1 XGBoost交叉验证
# ------------------------------
print("\n" + "="*60)
print("开始XGBoost模型交叉验证")
print("="*60)
xgb_results = {
"true": [], "pred": [], "cores": [], "indices": []
}
for app_idx, test_core in enumerate(unique_app_cores, 1):
print(f"\n{'='*20} XGBoost - 第 {app_idx}/{len(unique_app_cores)} 个测试核心App: {test_core} {'='*20}")
# 划分训练/测试集
train_mask = df['app_core'] != test_core
test_mask = df['app_core'] == test_core
X_train = X[selected_features][train_mask].copy()
X_test = X[selected_features][test_mask].copy()
y_train = y_encoded[train_mask]
y_test = y_encoded[test_mask]
print(f"训练集大小: {X_train.shape[0]}, 测试集大小: {X_test.shape[0]}")
# 训练并预测
y_pred, _ = train_xgb(X_train, X_test, y_train, y_test, xgb_params)
# 保存结果
xgb_results["true"].extend(label_encoder.inverse_transform(y_test))
xgb_results["pred"].extend(label_encoder.inverse_transform(y_pred))
xgb_results["cores"].extend([test_core] * len(y_test))
xgb_results["indices"].extend(df[test_mask].index.tolist())
# ------------------------------
# 6.2 MLP交叉验证
# ------------------------------
print("\n" + "="*60)
print("开始MLP模型交叉验证")
print("="*60)
mlp_results = {
"true": [], "pred": [], "cores": [], "indices": []
}
for app_idx, test_core in enumerate(unique_app_cores, 1):
print(f"\n{'='*20} MLP - 第 {app_idx}/{len(unique_app_cores)} 个测试核心App: {test_core} {'='*20}")
# 划分训练/测试集(与XGBoost完全一致)
train_mask = df['app_core'] != test_core
test_mask = df['app_core'] == test_core
X_train = X[selected_features][train_mask].copy()
X_test = X[selected_features][test_mask].copy()
y_train = y_encoded[train_mask]
y_test = y_encoded[test_mask]
print(f"训练集大小: {X_train.shape[0]}, 测试集大小: {X_test.shape[0]}")
# 训练并预测
y_pred = train_mlp(X_train, X_test, y_train, y_test)
# 保存结果
mlp_results["true"].extend(label_encoder.inverse_transform(y_test))
mlp_results["pred"].extend(label_encoder.inverse_transform(y_pred))
mlp_results["cores"].extend([test_core] * len(y_test))
mlp_results["indices"].extend(df[test_mask].index.tolist())
# ------------------------------
# 6.3 TabNet交叉验证
# ------------------------------
print("\n" + "="*60)
print("开始TabNet模型交叉验证")
print("="*60)
tabnet_results = {
"true": [], "pred": [], "cores": [], "indices": []
}
for app_idx, test_core in enumerate(unique_app_cores, 1):
print(f"\n{'='*20} TabNet - 第 {app_idx}/{len(unique_app_cores)} 个测试核心App: {test_core} {'='*20}")
# 1. 划分训练/测试集(与XGBoost完全一致)
train_mask = df['app_core'] != test_core
test_mask = df['app_core'] == test_core
X_train = X[selected_features][train_mask].copy()
X_test = X[selected_features][test_mask].copy()
y_train = y_encoded[train_mask]
y_test = y_encoded[test_mask]
print(f"训练集大小: {X_train.shape[0]}, 测试集大小: {X_test.shape[0]}")
# 2. 训练并预测
y_pred = train_tabnet(X_train, X_test, y_train, y_test)
# 3. 保存结果(还原为字符串标签)
tabnet_results["true"].extend(label_encoder.inverse_transform(y_test))
tabnet_results["pred"].extend(label_encoder.inverse_transform(y_pred))
tabnet_results["cores"].extend([test_core] * len(y_test))
tabnet_results["indices"].extend(df[test_mask].index.tolist())
# ========== 7. 结果汇总与对比 ==========
class_names = [category_mapping[c] for c in encoded_classes]
def save_results(model_name, results, save_dir):
"""保存单模型结果的通用函数"""
# 逐样本结果
df_res = df.loc[results["indices"], ['app_name', 'app_core', 'second_id']].reset_index(drop=True)
df_res['true_label'] = [category_mapping[c] for c in results["true"]]
df_res['pred_label'] = [category_mapping[c] for c in results["pred"]]
df_res['correct'] = (np.array(results["true"]) == np.array(results["pred"])).astype(int)
df_res.rename(columns={'second_id': 'true_second_id'}, inplace=True)
df_res.to_csv(os.path.join(save_dir, f'{model_name}_cv_sample_results.csv'), index=False, encoding='utf-8-sig')
# 核心App级准确率
app_acc = df_res.groupby('app_core').agg(
total=('correct', 'count'),
correct=('correct', 'sum'),
accuracy=('correct', 'mean')
).reset_index()
app_acc['accuracy'] = app_acc['accuracy'].round(4)
app_acc.to_csv(os.path.join(save_dir, f'{model_name}_app_core_accuracy.csv'), index=False, encoding='utf-8-sig')
# 混淆矩阵
cm = confusion_matrix(results["true"], results["pred"], labels=encoded_classes)
plt.figure(figsize=(10, 8))
disp = ConfusionMatrixDisplay(cm, display_labels=class_names)
disp.plot(cmap=plt.cm.Blues, xticks_rotation=45, text_kw={'fontfamily': mpl.rcParams['font.family'], 'fontsize': 10})
plt.xticks(fontfamily=mpl.rcParams['font.family'], fontsize=10)
plt.yticks(fontfamily=mpl.rcParams['font.family'], fontsize=10)
plt.title(f'{model_name} Confusion Matrix', fontfamily=mpl.rcParams['font.family'], fontsize=12)
plt.tight_layout()
plt.savefig(os.path.join(save_dir, f'{model_name}_confusion_matrix.png'), dpi=300, bbox_inches='tight')
plt.close()
# 整体报告
print(f"\n{model_name} 整体分类报告:")
print(classification_report(results["true"], results["pred"], target_names=class_names))
# 保存并打印XGBoost结果
save_results("XGBoost", xgb_results, RESULTS_DIR)
# 保存并打印MLP结果
save_results("MLP", mlp_results, RESULTS_DIR)
# 保存TabNet结果(复用原save_results函数)
save_results("TabNet", tabnet_results, RESULTS_DIR)
# 生成三模型对比表格(XGBoost vs MLP vs TabNet)
def generate_triple_comparison(xgb_report, mlp_report, tabnet_report):
comparison_df = pd.DataFrame({
'Metric': ['Precision (Macro)', 'Recall (Macro)', 'F1-Score (Macro)', 'Total Samples'],
'XGBoost': [
round(xgb_report['macro avg']['precision'], 4),
round(xgb_report['macro avg']['recall'], 4),
round(xgb_report['macro avg']['f1-score'], 4),
xgb_report['weighted avg']['support']
],
'MLP': [
round(mlp_report['macro avg']['precision'], 4),
round(mlp_report['macro avg']['recall'], 4),
round(mlp_report['macro avg']['f1-score'], 4),
mlp_report['weighted avg']['support']
],
'TabNet': [
round(tabnet_report['macro avg']['precision'], 4),
round(tabnet_report['macro avg']['recall'], 4),
round(tabnet_report['macro avg']['f1-score'], 4),
tabnet_report['weighted avg']['support']
]
})
return comparison_df
# 生成对比表格
tabnet_report = classification_report(tabnet_results["true"], tabnet_results["pred"], target_names=class_names, output_dict=True)
triple_comparison_df = generate_triple_comparison(xgb_report, mlp_report, tabnet_report)
print("\n" + "="*60)
print("XGBoost vs MLP vs TabNet 宏观性能对比")
print("="*60)
print(triple_comparison_df)
triple_comparison_df.to_csv(os.path.join(RESULTS_DIR, 'triple_model_comparison.csv'), index=False, encoding='utf-8-sig')
将代码优化结构,不同的模型封装好,用户可以选择训练哪些模型