python打卡DAY19

##注入所需库

import pandas as pd

import seaborn as sns

import matplotlib.pyplot as plt

import random

import numpy as np

import time

import shap

from sklearn.svm import SVC #支持向量机分类器

# from sklearn.neighbors import KNeighborsClassifier #K近邻分类器

# from sklearn.linear_model import LogisticRegression #逻辑回归分类器

import xgboost as xgb #XGBoost分类器

import lightgbm as lgb #LightGBM分类器

from sklearn.ensemble import RandomForestClassifier #随机森林分类器

# from catboost import CatBoostClassifier #CatBoost分类器

# from sklearn.tree import DecisionTreeClassifier #决策树分类器

# from sklearn.naive_bayes import GaussianNB #高斯朴素贝叶斯分类器

from skopt import BayesSearchCV

from skopt.space import Integer

from deap import base, creator, tools, algorithms

from sklearn.model_selection import StratifiedKFold, cross_validate # 引入分层 K 折和交叉验证工具

from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # 用于评估分类器性能的指标

from sklearn.metrics import classification_report, confusion_matrix #用于生成分类报告和混淆矩阵

from sklearn.metrics import make_scorer#定义函数

import warnings #用于忽略警告信息

warnings.filterwarnings("ignore") # 忽略所有警告信息

#聚类

from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering

from sklearn.preprocessing import StandardScaler

from sklearn.decomposition import PCA

from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score

#3D可视化

from mpl_toolkits.mplot3d import Axes3D

#设置中文字体&负号正确显示

plt.rcParams['font.sans-serif']=['STHeiti']

plt.rcParams['axes.unicode_minus']=True

plt.rcParams['figure.dpi']=100

#查看基本信息&读取数据

data=pd.read_csv(r'data.csv')

print(f'{data.info()}\n{data.isnull().sum()}\n{data.info()}')

#绘制图像

# plt.figure(figsize=(6,4))

# sns.boxplot(x=data['Annual Income'])

# plt.title('年收入箱线图')

# plt.xlabel('annual income')

# plt.tight_layout()

# plt.show()

# plt.figure(figsize=(6,4))

# sns.boxplot(

# x='Credit Default',

# y='Annual Income',

# data=data.dropna(),

# )

# plt.title('年收入分类箱线图')

# plt.xlabel('credit default')

# plt.ylabel('count')

# plt.xticks([0,1],['n','y'])

# plt.tight_layout()

# plt.show()

# plt.figure(figsize=(6,4))

# sns.histplot(

# x='Annual Income',

# hue='Credit Default',

# hue_order=[0,1],

# data=data.dropna(),

# element='bars',

# kde=True

# )

# plt.title('年收入箱线图')

# plt.xlabel('Annual income')

# plt.ylabel('count')

# plt.legend(labels=['n','y'])

# plt.tight_layout()

# plt.show()

features=['Annual Income','Current Credit Balance','Years of Credit History','Credit Score']

# #绘制箱线图子图

# fig,axes=plt.subplots(2,2,figsize=(6,4))

# for i,feature in enumerate(features):

# row,col=i//2,i%2

# sns.boxenplot(x=data[feature],ax=axes[row,col])

# axes[row,col].set_title(f'boxplot of{feature}')

# axes[row,col].set_xlabel(feature)

# plt.tight_layout()

# plt.show()

# #绘制分类箱线图子图

# fig,axes=plt.subplots(2,2,figsize=(6,4))

# for i,feature in enumerate(features):

# row,col=i//2,i%2

# sns.boxenplot(

# x='Credit Default',

# y=feature,

# data=data.dropna(),

# ax=axes[row,col]

# )

# axes[row,col].set_title(f'boxplot of {feature}')

# axes[row,col].set_xlabel('Credit Default')

# axes[row,col].set_ylabel('count')

# plt.tight_layout()

# plt.show()

# #绘制分类直方图

# fig,axes=plt.subplots(2,2,figsize=(6,4))

# for i,feature in enumerate(features):

# row,col=i//2,i%2

# sns.histplot(

# x=feature,

# hue='Credit Default',

# hue_order=[0,1],

# data=data.dropna(),

# element='bars',

# kde=True,

# ax=axes[row,col]

# )

# axes[row,col].set_title(f'histplot of {feature}')

# axes[row,col].set_xlabel(feature)

# axes[row,col].set_ylabel('count')

# axes[row,col].legend(labels=['n','y'])

# plt.tight_layout()

# plt.show()

#数据填补

for i in data.columns:

if data[i].dtype!='object':

if data[i].isnull().sum()>0:

data[i].fillna(data[i].mean(),inplace=True)

else:

if data[i].isnull().sum()>0:

data[i].fillna(data[i].mode()[0],inplace=True)

mapping={'10+ years':0,

'9 years':1,

'8 years':2,

'7 years':3,

'6 years':4,

'5 years':5,

'4 years':6,

'3 years':7,

'2 years':8,

'1 year':9,

'< 1 year':10}

data['Years in current job']=data['Years in current job'].map(mapping)

dummies_list=[]

data2=pd.read_csv(r'data.csv')

data=pd.get_dummies(data=data,drop_first=True)

for i in data.columns:

if i not in data2.columns:

dummies_list.append(i)

for i in dummies_list:

data[i]=data[i].astype(int)

print(f'{data.info()}\n{data.columns}')

# #绘制相关热力图

# continuous_list=['Annual Income', 'Years in current job', 'Tax Liens',

# 'Number of Open Accounts', 'Years of Credit History',

# 'Maximum Open Credit', 'Number of Credit Problems',

# 'Months since last delinquent', 'Bankruptcies', 'Current Loan Amount',

# 'Current Credit Balance', 'Monthly Debt', 'Credit Score']

# confusion_matrix=data[continuous_list].corr()

# plt.figure(figsize=(12,10))

# sns.heatmap(confusion_matrix,annot=True,cmap='coolwarm',vmin=-1,vmax=1)

# plt.xticks(rotation=45,ha='right')

# plt.tight_layout()

# plt.show()

#划分数据集

from sklearn.model_selection import train_test_split

x=data.drop('Credit Default',axis=1)

y=data['Credit Default']

x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=42)

print(f'train:{x_train.shape}\n{x_test.shape}')

# #模型训练

# ##SVM

# start_time=time.time()

# svm_model=SVC(random_state=42,class_weight='balanced')

# svm_model.fit(x_train,y_train)

# svm_pred=svm_model.predict(x_test)

# end_time=time.time()

# print(f'训练与预测耗时:{end_time-start_time:.4f}')

# print('\nSVM分类报告')

# print(classification_report(y_test,svm_pred))

# print('\n SVM混淆矩阵')

# print(confusion_matrix(y_test,svm_pred))

# ##randomforrest

# start_time=time.time()

# rf_model=RandomForestClassifier(random_state=42,class_weight='balanced')

# rf_model.fit(x_train,y_train)

# rf_pred=rf_model.predict(x_test)

# print(f'训练与预测耗时:{end_time-start_time:.4f}')

# print('\n随机森林分类报告')

# print(classification_report(y_test,rf_pred))

# print('\n随机森林混淆矩阵')

# print(confusion_matrix(y_test,rf_pred))

#定义约登指数

def youden_score(y_true,y_pred):

tn,fp,fn,tp=confusion_matrix(y_true,y_pred).ravel()

sensitivity=tp/(tp+fn)

specificity=tn/(tn+fp)

return sensitivity+specificity-1

youden_scorer=make_scorer(youden_score)

# # # #SMOTE过采样后带权重网格搜索优化的随机森林

# # # #SMOTE

# from imblearn.over_sampling import SMOTE

# smote=SMOTE(random_state=42)

# x_train_smote,y_train_smote=smote.fit_resample(x_train,y_train)

# #网格搜索&交叉验证

# from sklearn.model_selection import GridSearchCV

# cv_strategy=StratifiedKFold(n_splits=5,shuffle=True,random_state=42)

# param_grid={

# 'n_estimators':[5,10,15],

# 'max_depth':[None,5,10],

# 'min_samples_split':[2,3,4],

# 'min_samples_leaf':[2,3,4]

# }

# grid_search=GridSearchCV(

# estimator=RandomForestClassifier(random_state=42,class_weight='balanced'),

# param_grid=param_grid,

# cv=cv_strategy,

# n_jobs=-1,

# scoring=youden_scorer

# )

# start_time=time.time()

# grid_search.fit(x_train_smote,y_train_smote)

# end_time=time.time()

# best_model=grid_search.best_estimator_

# best_pred=best_model.predict(x_test)

# print(f'网格搜索耗时:{end_time-start_time:.4f}秒')

# print('最佳参数:',grid_search.best_params_)

# print('\n带权重网格搜索优化后的随机森林在测试集上的分类报告')

# print(classification_report(y_test,best_pred))

# print('网格搜索优化后的随机森林在测试集上的混淆矩阵')

# print(confusion_matrix(y_test,best_pred))

# # # #SMOTE过采样后带权重的贝叶斯优化的随机森林

# # # #SMOTE过采样

# from imblearn.over_sampling import SMOTE

# smote=SMOTE(random_state=42)

# x_train_smote,y_train_smote=smote.fit_resample(x_train,y_train)

# #贝叶斯优化&交叉验证

# from sklearn.model_selection import GridSearchCV

# cv_strategy=StratifiedKFold(n_splits=5,shuffle=True,random_state=42)

# search_space={

# 'n_estimators':Integer(1,5),

# 'max_depth':Integer(1,5),

# 'min_samples_split':(2,6),

# 'min_samples_leaf':(1,5)

# }

# bayes_search=BayesSearchCV(

# estimator=RandomForestClassifier(random_state=42,class_weight='balanced'),

# search_spaces=search_space,

# cv=cv_strategy,

# n_jobs=-1,

# scoring=youden_scorer

# )

# start_time=time.time()

# bayes_search.fit(x_train_smote,y_train_smote)

# end_time=time.time()

# best_model=bayes_search.best_estimator_

# best_pred=best_model.predict(x_test)

# print(f'贝叶斯优化耗时:{end_time-start_time:.4f}秒')

# print('最佳参数',bayes_search.best_params_)

# print('\n贝叶斯优化后的随机森林在测试集上的分类报告')

# print(classification_report(y_test,best_pred))

# print('\n贝叶斯优化后的随机森林在测试集上侧混淆矩阵')

# print(confusion_matrix(y_test,best_pred))


 

# # #SHAP 分析

# start_time=time.time()

# explainer=shap.TreeExplainer(best_model)

# shap_values=explainer.shap_values(x_test)

# end_time=time.time()

# print(f"shap分析耗时: {end_time - start_time:.4f} 秒")

# print('shap_values shape:',shap_values.shape)

# print('shap_values[0,:,:]shape:',shap_values[0,:,:].shape)##这里也可以省略后面的写作shap_values[0】,代表第一个样本所有特征对所有类别的贡献,后面部位可以省略但是前面不能。

# print('shap_values[:,:,0]shape',shap_values[:,:,0].shape)

# print('x_test shape',x_test.shape)

# # ##SHAP特征重要性条形图 (Summary Plot - bar)

# print("--- 2. SHAP 特征重要性条形图 ---")

# shap.summary_plot(

# shap_values[:,:,0],

# x_test,

# plot_type='bar',

# show=False

# )

# plt.title('SHAP特征重要条形图')

# plt.tight_layout()

# plt.show()

# # # ##SHAP特征重要性蜂巢图

# print("--- 2. SHAP 特征重要性蜂巢图 ---")

# shap.summary_plot(

# shap_values[:,:,0],

# x_test,

# plt_type='violin',

# show=False

# )

# plt.tight_layout()

# plt.show()

#标准化数据,将自变量标准化,聚类就是从自变量中聚合新的自变量,与因变量无关

scaler=StandardScaler()

x_scaled=scaler.fit_transform(x)

#kmeans++聚类

k_range=range(2,5)

inertia_value=[]

silhouette_scores=[]

ch_scores=[]

db_scores=[]

start_time=time.time()

for k in k_range:

kmeans=KMeans(n_clusters=k,random_state=42)

kmeans_label=kmeans.fit_predict(x_scaled)#提供了每个数据点所属的簇的信息,用于区分不同簇的数据点

inertia_value.append(kmeans.inertia_)#惯性(肘部法则)

silhouette=silhouette_score(x_scaled,kmeans_label)#轮廓系数

silhouette_scores.append(silhouette)

ch=calinski_harabasz_score(x_scaled,kmeans_label)#ch系数

ch_scores.append(ch)

db=davies_bouldin_score(x_scaled,kmeans_label)#db系数

db_scores.append(ch)

# print(f'k={k}\n 惯性:{kmeans.inertia_:.2f}\n轮廓系数:{silhouette:.3f}\n CH系数:{ch:.2f}\n DB{db:.3f}')

end_time=time.time()

print(f'聚类分析耗时:{end_time-start_time:.4f}')

# #绘制评估指标图

# plt.figure(figsize=(12,6))

# #肘部法则图

# plt.subplot(2,2,1)

# plt.plot(k_range,inertia_value,marker='o')

# plt.title('肘部法则确定最优聚类数 k(惯性,越小越好)')

# plt.xlabel('聚类数 (k)')

# plt.ylabel('惯性')

# plt.grid(True)

# #轮廓系数图

# plt.subplot(2,2,2)

# plt.plot(k_range,silhouette_scores,marker='o',color='orange')

# plt.title('轮廓系数确定最优聚类数 k(越大越好)')

# plt.xlabel('聚类数 (k)')

# plt.ylabel('轮廓系数')

# plt.grid(True)

# #CH指数图

# plt.subplot(2,2,3)

# plt.plot(k_range,ch_scores,marker='o',color='red')

# plt.title('Calinski-Harabasz 指数确定最优聚类数 k(越大越好)')

# plt.xlabel('聚类数 (k)')

# plt.ylabel('CH 指数')

# plt.grid(True)

# #DB指数图

# plt.subplot(2,2,4)

# plt.plot(k_range,db_scores,marker='o',color='yellow')

# plt.xlabel('聚类数 (k)')

# plt.ylabel('DB 指数')

# plt.grid(True)

# plt.tight_layout()

# plt.show()

# #选择K值进行聚类

# selected_K=20

# kmeans=KMeans(n_clusters=selected_K,random_state=42)

# kmeans_label=kmeans.fit_predict(x_scaled)

x['KMeans_Cluster']=kmeans_label

# #PCA降维

# pca=PCA(n_components=2)

# x_pca=pca.fit_transform(x_scaled)

# #聚类可视化

# plt.figure(figsize=(6,5))

# sns.scatterplot(

# x=x_pca[:,0],

# y=x_pca[:,1],

# hue=kmeans_label,

# palette='viridis'

# )

# plt.title(f'KMean Clustering with k={selected_K} (PCA Visualization)')

# plt.xlabel('PCA Component 1')

# plt.ylabel('PCA Component 2')

# plt.show()

# # 打印 KMeans 聚类标签的前几行

# print(f'KMeans Clueter labels(k={selected_K})added to X:')

# print(x[['KMeans_Cluster']].value_counts())

x1=x.drop('KMeans_Cluster',axis=1)#删除聚类标签

y1=x['KMeans_Cluster']

rf1_model=RandomForestClassifier(random_state=42,class_weight='balanced')

rf1_model.fit(x1,y1)


 

explainer=shap.TreeExplainer(rf1_model)

shap_values=explainer.shap_values(x1)

print(shap_values.shape)

# # --- 1. SHAP 特征重要性条形图 (Summary Plot - Bar) ---

# print("--- 1. SHAP 特征重要性条形图 ---")

# shap.summary_plot(shap_values[:,:,0],x1,plot_type='bar',show=False)

# plt.title("SHAP Feature Importance (Bar Plot)")

# plt.tight_layout()

# plt.show()

selected_features=['Purpose_debt consolidation','Home Ownership_Home Mortgage','Purpose_home improvements','Purpose_other']

# for feature in selected_features:

# unique_count=x[feature].nunique()

# print(f'{feature}的唯一值数量:{unique_count}')

# if unique_count<10:

# print(f'{feature}可能是离散型变量')

# else:

# print(f'{feature}可能是连续型变量')

# fig,axes=plt.subplots(2,2,figsize=(10,8))

# axes=axes.flatten()

# for i,feature in enumerate(selected_features):

# axes[i].hist(x[feature],bins=10)

# axes[i].set_title(f'Histogram of {feature}')

# axes[i].set_xlabel(feature)

# axes[i].set_ylabel('Frequency')

# plt.tight_layout()

# plt.show()

# print(x[['KMeans_Cluster']].value_counts())

# x_cluster0=x[x['KMeans_Cluster']==0]

# x_cluster1=x[x['KMeans_Cluster']==1]

# x_cluster2=x[x['KMeans_Cluster']==2]

# x_cluster3=x[x['KMeans_Cluster']==3]

# #簇0

# fig,axes=plt.subplots(2,2,figsize=(6,4))

# axes=axes.flatten()

# for i,feature in enumerate(selected_features):

# axes[i].hist(x_cluster0[feature],bins=20)

# axes[i].set_title(f'Histogram of {feature}')

# axes[i].set_xlabel(feature)

# axes[i].set_ylabel('frequency')

# plt.tight_layout()

# plt.show()

# #簇1

# fig,axes=plt.subplots(2,2,figsize=(6,4))

# axes=axes.flatten()

# for i,feature in enumerate(selected_features):

# axes[i].hist(x_cluster1[feature],bins=20)

# axes[i].set_title(f'Histogram of {feature}')

# axes[i].set_xlabel(feature)

# axes[i].set_ylabel('frequency')

# plt.tight_layout()

# plt.show()

# #簇2

# fig,axes=plt.subplots(2,2,figsize=(6,4))

# axes=axes.flatten()

# for i,feature in enumerate(selected_features):

# axes[i].hist(x_cluster2[feature],bins=20)

# axes[i].set_title(f'Histogram of {feature}')

# axes[i].set_xlabel(feature)

# axes[i].set_ylabel('frequency')

# plt.tight_layout()

# plt.show()

# #簇3

# fig,axes=plt.subplots(2,2,figsize=(6,4))

# axes=axes.flatten()

# for i,feature in enumerate(selected_features):

# axes[i].hist(x_cluster3[feature],bins=20)

# axes[i].set_title(f'Histogram of {feature}')

# axes[i].set_xlabel(feature)

# axes[i].set_ylabel('frequency')

# plt.tight_layout()

# plt.show()

print("--- 递归特征消除 (RFE) ---")

from sklearn.feature_selection import RFE

start_time=time.time()

base_model=RandomForestClassifier(random_state=42,class_weight='balanced')

rfe=RFE(base_model,n_features_to_select=5)

rfe.fit(x_train,y_train)

x_train_rfe=rfe.transform(x_train)

x_test_rfe=rfe.transform(x_test)

selected_features_rfe=x_train.columns[rfe.support_].tolist()

print(f"RFE筛选后保留的特征数量: {len(selected_features_rfe)}")

print(f"保留的特征: {selected_features_rfe}")

#训练随机森林模型

rf_model_rfe=RandomForestClassifier(random_state=42,class_weight='balanced')

rf_model_rfe.fit(x_train_rfe,y_train)

rf_pred_rfe=rf_model_rfe.predict(x_test_rfe)

end_time=time.time()

print(f"训练与预测耗时: {end_time - start_time:.4f} 秒")

print("\nRFE筛选后随机森林在测试集上的分类报告:")

print(classification_report(y_test, rf_pred_rfe))

print("RFE筛选后随机森林在测试集上的混淆矩阵:")

print(confusion_matrix(y_test, rf_pred_rfe))

# 决策树模型构建(基于处理后的初创企业收购数据) --- ## ✅ 目标 使用 `startup_encoded` 数据集训练一个**决策树分类器**,预测初创公司是否会被收购(`Acquired = 1 or 0`)。 --- ## 🔧 步骤 1:准备特征和标签 ```python from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import classification_report, confusion_matrix, accuracy_score import pandas as pd # 特征矩阵 X(所有输入变量) X = startup_encoded.drop('Acquired', axis=1) # 删除目标列 # 目标向量 y(是否被收购) y = startup_encoded['Acquired'] ``` --- ## 🔧 步骤 2:划分训练集和测试集 ```python # 按 80% 训练,20% 测试 分割数据 X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y # 保持类别比例一致 ) ``` --- ## 🔧 步骤 3:训练决策树模型 ```python # 创建决策树分类器 clf = DecisionTreeClassifier( random_state=42, max_depth=6, # 控制树深,防止过拟合 min_samples_split=10, # 内部节点分裂最小样本数 min_samples_leaf=5, # 叶子节点最小样本数 class_weight='balanced' # 处理类别不平衡问题 ) # 训练模型 clf.fit(X_train, y_train) ``` --- ## 🔍 步骤 4:模型评估 ### (1) 预测测试集结果 ```python y_pred = clf.predict(X_test) ``` ### (2) 输出准确率 ```python print("Accuracy:", accuracy_score(y_test, y_pred)) ``` > 示例输出: > ``` > Accuracy: 0.875 > ``` ### (3) 混淆矩阵 ```python print("Confusion Matrix:") print(confusion_matrix(y_test, y_pred)) ``` > 示例: > ``` > [[15 3] > [ 2 10]] > ``` > - 正确预测未被收购:15 > - 错误预测为未被收购:3(实际被收购) > - 错误预测为被收购:2(实际未被收购) > - 正确预测被收购:10 ### (4) 分类报告(精确率、召回率、F1-score) ```python print("Classification Report:") print(classification_report(y_test, y_pred)) ``` > 示例输出片段: > ``` > precision recall f1-score support > 0 0.88 0.83 0.86 18 > 1 0.77 0.83 0.80 12 > accuracy 0.84 30 > ``` --- ## 📊 步骤 5:可视化决策树(可选) ```python from sklearn.tree import plot_tree import matplotlib.pyplot as plt plt.figure(figsize=(20, 10)) plot_tree(clf, feature_names=X.columns, class_names=['Not Acquired', 'Acquired'], filled=True, fontsize=10) plt.title("Decision Tree for Startup Acquisition Prediction", fontsize=16) plt.show() ``` 📌 这将展示模型是如何根据 `Tech_AI`, `Valuation`, `Funding Stage` 等特征进行判断的。 --- ## 💡 模型洞察与关键特征 你可以查看哪些特征最重要: ```python # 获取特征重要性 importances = clf.feature_importances_ feature_importance_df = pd.DataFrame({ 'Feature': X.columns, 'Importance': importances }).sort_values(by='Importance', ascending=False) # 显示前10个重要特征 print(feature_importance_df.head(10)) ``` > 典型输出示例: > > | Feature | Importance | > |---------------------|------------| > | Tech_AI | 0.32 | > | Valuation | 0.25 | > | Funding Stage_Series B | 0.15 | > | Annual Revenue | 0.10 | ✅ **结论**: - 使用 **AI 技术** 和拥有高 **估值** 的公司最可能被收购 - 融资阶段 **Series B** 是并购活跃期 --- ## ✅ 总结:这个模型能做什么? | 功能 | 说明 | |------|------| | 🎯 预测新公司被收购概率 | 输入一家公司的信息,输出“是否会被人收购” | | 🔍 发现关键驱动因素 | 找出哪些技术、行业、财务指标影响最大 | | 💼 辅助投资决策 | 帮助风投判断退出路径(IPO or 被收购) | --- ## 知识点 - **train_test_split 划分数据**:确保模型在未见数据上验证性能。 - **DecisionTreeClassifier 支持分类任务**:适合处理混合类型特征(数值+虚拟变量)。 - **feature_importances_ 揭示关键变量**:帮助理解模型逻辑,提升可解释性。
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值