day 18

常见的特征筛选算法
1.方差筛选
2.皮尔逊相关系数筛选
3.lasso筛选
4.树模型重要性
5.shap重要性
6.递归特征消除REF

作业:对心脏病数据集完成特征筛选,对比精度

import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, LassoCV
from sklearn.feature_selection import VarianceThreshold, SelectKBest, f_classif, RFE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import shap
data=pd.read_csv('heart.csv')
data.head()
# 分离特征和目标变量
X = data.drop('target', axis=1)
y = data['target']

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)


# 1. 方差筛选
selector_var = VarianceThreshold(threshold=0.1)
X_train_var = selector_var.fit_transform(X_train)
X_test_var = selector_var.transform(X_test)

model_var = LogisticRegression()
model_var.fit(X_train_var, y_train)
y_pred_var = model_var.predict(X_test_var)
acc_var = accuracy_score(y_test, y_pred_var)


# 2. 皮尔逊相关系数筛选
selector_pearson = SelectKBest(score_func=f_classif, k=10)
X_train_pearson = selector_pearson.fit_transform(X_train, y_train)
X_test_pearson = selector_pearson.transform(X_test)

model_pearson = LogisticRegression()
model_pearson.fit(X_train_pearson, y_train)
y_pred_pearson = model_pearson.predict(X_test_pearson)
acc_pearson = accuracy_score(y_test, y_pred_pearson)


# 3. Lasso 筛选
lasso = LassoCV()
lasso.fit(X_train, y_train)
selected_features_lasso = lasso.coef_ != 0
X_train_lasso = X_train[:, selected_features_lasso]
X_test_lasso = X_test[:, selected_features_lasso]

model_lasso = LogisticRegression()
model_lasso.fit(X_train_lasso, y_train)
y_pred_lasso = model_lasso.predict(X_test_lasso)
acc_lasso = accuracy_score(y_test, y_pred_lasso)


# 4. 树模型重要性筛选
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
feature_importances = rf.feature_importances_
sorted_indices = np.argsort(feature_importances)[::-1]
top_n = 10
selected_indices = sorted_indices[:top_n]
X_train_tree = X_train[:, selected_indices]
X_test_tree = X_test[:, selected_indices]

model_tree = LogisticRegression()
model_tree.fit(X_train_tree, y_train)
y_pred_tree = model_tree.predict(X_test_tree)
acc_tree = accuracy_score(y_test, y_pred_tree)


# 5. SHAP 重要性筛选
explainer = shap.Explainer(rf)
shap_values = explainer(X_train)
shap_importance = np.abs(shap_values.values).mean(axis=0)
if len(shap_importance.shape) > 1:
    shap_importance = shap_importance.mean(axis=0)
top_n_shap = 10
shap_selected_indices = np.argsort(shap_importance)[::-1][:top_n_shap]
X_train_shap = X_train[:, shap_selected_indices]
X_test_shap = X_test[:, shap_selected_indices]

model_shap = LogisticRegression()
model_shap.fit(X_train_shap, y_train)
y_pred_shap = model_shap.predict(X_test_shap)
acc_shap = accuracy_score(y_test, y_pred_shap)


# 输出不同筛选方法的精度
print("方差筛选后的精度:", acc_var)
print("皮尔逊相关系数筛选后的精度:", acc_pearson)
print("Lasso 筛选后的精度:", acc_lasso)
print("树模型重要性筛选后的精度:", acc_tree)
print("SHAP 重要性筛选后的精度:", acc_shap)
print("递归特征消除筛选后的精度:", acc_rfe)

对比:

方差筛选后的精度: 0.8524590163934426
皮尔逊相关系数筛选后的精度: 0.8688524590163934
Lasso 筛选后的精度: 0.8688524590163934
树模型重要性筛选后的精度: 0.8524590163934426
SHAP 重要性筛选后的精度: 0.5901639344262295
递归特征消除筛选后的精度: 0.8852459016393442

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值