本次 DataWhale 第二十三期组队学习,其开源内容的链接为:https://github.com/datawhalechina/team-learning-data-mining/tree/master/EnsembleLearning
在上一期的组队学习中介绍了Bagging和Boosting,现在简单学习下Stacking的方法,这个方法原理上没有那么依赖抽样的原理,理解上也相对较为容易,相关的原理介绍可以参考教程文档或者链接:https://blog.youkuaiyun.com/sinat_35821976/article/details/83622594
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("ggplot")
%matplotlib inline
import seaborn as sns
# 创建数据
from sklearn import datasets
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
data, target = make_blobs(n_samples=10000, centers=2, random_state=1, cluster_std=1.0 )
## 创建训练集和测试集
X_train1,X_test,y_train1,y_test = train_test_split(data, target, test_size=0.2,
random_state=1)
## 创建训练集和验证集
X_train,X_val,y_train,y_val = train_test_split(X_train1, y_train1, test_size=0.3,
random_state=1)
print("The shape of training X:",X_train.shape)
print("The shape of training y:",y_train.shape)
print("The shape of test X:",X_test.shape)
print("The shape of test y:",y_test.shape)
print("The shape of validation X:",X_val.shape)
print("The shape of validation y:",y_val.shape)
The shape of training X: (5600, 2)
The shape of training y: (5600,)
The shape of test X: (2000, 2)
The shape of test y: (2000,)
The shape of validation X: (2400, 2)
The shape of validation y: (2400,)
# 选择前500进行画一下图,直观看看分布
for i, j in zip(data[:500], target[:500]):
plt.scatter(i[0], i[1], c = 'b',marker = (j + 3))
# 设置第一层分类器
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
clfs = [SVC(probability = True),RandomForestClassifier(n_estimators=5, n_jobs=-1, criterion='gini'),KNeighborsClassifier()]
# 设置第二层分类器
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
# 输出第一层的验证集结果与测试集结果
val_features = np.zeros((X_val.shape[0],len(clfs))) # 初始化验证集结果
test_features = np.zeros((X_test.shape[0],len(clfs))) # 初始化测试集结果
for i,clf in enumerate(clfs):
clf.fit(X_train,y_train)
val_feature = clf.predict_proba(X_val)[:, 1]
test_feature = clf.predict_proba(X_test)[:,1]
val_features[:,i] = val_feature
test_features[:,i] = test_feature
val_features.shape, test_features.shape # 记录3个元学习器的结果作为特征
((2400, 3), (2000, 3))
val_features[:10]
array([[2.08726662e-04, 0.00000000e+00, 0.00000000e+00],
[5.27987796e-04, 0.00000000e+00, 0.00000000e+00],
[9.99999856e-01, 1.00000000e+00, 1.00000000e+00],
[1.75675488e-04, 0.00000000e+00, 0.00000000e+00],
[2.18160888e-04, 0.00000000e+00, 0.00000000e+00],
[1.82912262e-04, 0.00000000e+00, 0.00000000e+00],
[9.99999779e-01, 1.00000000e+00, 1.00000000e+00],
[2.35917348e-04, 0.00000000e+00, 0.00000000e+00],
[1.82249155e-04, 0.00000000e+00, 0.00000000e+00],
[9.99999847e-01, 1.00000000e+00, 1.00000000e+00]])
# 将第一层的 验证集的结果 输入第二层训练第二层分类器
lr.fit(val_features,y_val)
# 输出预测的结果
from sklearn.model_selection import cross_val_score
cross_val_score(lr,test_features,y_test,cv=5)
array([1., 1., 1., 1., 1.])
可以看到,在每一折的交叉验证的效果都是非常好的,这个集成学习方法在这个数据集上是十分有效的,不过这个数据集是我们虚拟的,前面的可视化也看出了数据本身差距就很大。
作业:在鸢尾花上测试
import sklearn
iris = sklearn.datasets.load_iris()
X = iris.data[:,:2] # 为了简化案例,只选择前两列特征
y = iris.target
# 按8:2分割训练集和测试集
from sklearn.model_selection import train_test_split
# 按照y的类别进行等比例抽样
X_train1, X_test, y_train1, y_test = train_test_split(X, y, train_size = 0.8, random_state = 1, stratify = y)
## 创建训练集和验证集
X_train,X_val,y_train,y_val = train_test_split(X_train1, y_train1, test_size=0.3, random_state=1)
print("The shape of training X:",X_train.shape)
print("The shape of training y:",y_train.shape)
print("The shape of test X:",X_test.shape)
print("The shape of test y:",y_test.shape)
print("The shape of validation X:",X_val.shape)
print("The shape of validation y:",y_val.shape)
The shape of training X: (84, 2)
The shape of training y: (84,)
The shape of test X: (30, 2)
The shape of test y: (30,)
The shape of validation X: (36, 2)
The shape of validation y: (36,)
# 可视化案例数据 -- 可以看出有两类的分别并不明显
for i, j in zip(X, y):
plt.scatter(i[0], i[1], c = 'b',marker = j)
# 设置第一层分类器
clfs = [SVC(probability = True),RandomForestClassifier(n_estimators=5, n_jobs=-1, criterion='gini'),KNeighborsClassifier()]
# 设置第二层分类器
lr = LinearRegression()
# 输出第一层的验证集结果与测试集结果
val_features = np.zeros((X_val.shape[0],len(clfs))) # 初始化验证集结果
test_features = np.zeros((X_test.shape[0],len(clfs))) # 初始化测试集结果
for i,clf in enumerate(clfs):
clf.fit(X_train,y_train)
val_feature = clf.predict_proba(X_val)[:, 1]
test_feature = clf.predict_proba(X_test)[:,1]
val_features[:,i] = val_feature
test_features[:,i] = test_feature
val_features.shape, test_features.shape # 记录3个元学习器的结果作为特征
((36, 3), (30, 3))
# 将第一层的 验证集的结果 输入第二层训练第二层分类器
lr.fit(val_features,y_val)
# 输出预测的结果
cross_val_score(lr,test_features,y_test,cv=5)
array([ 0.03009714, -0.10311871, -0.34988705, 0.410725 , 0.48324261])
# 可视化决策边界
x_min = X_train[:, 0].min() - 1
x_max = X_train[:, 0].max() + 1
y_min = X_train[:, 1].min() - 1
y_max = X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),np.arange(y_min, y_max, 0.1))
# 决策边界
for i, j in zip(X_train, y_train):
plt.scatter(i[0], i[1], c = 'b',marker = j)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z)