接着上一篇:XGBoost 实现保险赔偿预测:数据预处理(一)
下面是Boost建立模型的整个调参过程:
#xgboost调参策略
import xgboost as xgb
import pandas as pd
import numpy as np
import pickle
import sys
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.preprocessing import StandardScaler
# from sklearn.grid_search import GridSearchCV 老版本
from scipy.sparse import csr_matrix,hstack
from sklearn.model_selection import KFold,train_test_split,GridSearchCV
from xgboost import XGBRFRegressor
import warnings
warnings.filterwarnings('ignore')
#数据预处理
train =pd.read_csv('D:/机器学习算法/allstate-claims-severity/train.csv')
#对数转换
train['log_loss']=np.log(train['loss'])
#特征分成连续和离散特征
features=[x for x in train.columns if x not in ['id','loss','log_loss']]
cat_features=[x for x in train.select_dtypes(include=['object']).columns
if x not in ['id','loss','log_loss']] #72个二值,88个四值
num_features=[x for x in train.select_dtypes(exclude=['object']).columns
if x not in ['id','loss','log_loss']]
print("Categorical features:",len(cat_features))
print("Numerical features:",len(num_features))
ntrain=train.shape[0]
train_x=train[features]
train_y=train['log_loss']
字符型变量进行数字编码:
#离散类别型特征转换成数字标志
for c in range(len(cat_features)):
train_x[cat_features[c]]=train_x[cat_features[c]].astype('category').cat.codes
字典:数字与类别的映射关系 待处理
#字典:字符字段映射关系
category_dict={
col: {
cat: n for n, cat in enumerate(train[col].astype('category').cat.categories )}
for col in cat_features}
第一步,先建立一个基准模型:
#Simple XGBoost Model
def xg_eval_mae(yhat,dtrain):
y=dtrain.get_label()
return 'mae',mean_absolute_error(np.exp(y),np.exp(yhat))
#Model :xgboost自定义了一个数据矩阵类DMatrix,会在训练开始时进行一遍预处理,从而提高之后每次迭代的效率
dtrain=xgb.DMatrix(train_x,train['log_loss'])
XGBoost参数说明:
'''
XGBoost参数
'booster':'gbtree',梯度提升回归树
'objective':'multi:softmax',多分类的问题。损失函数,分类和回归
'num_class':10,类别数,与multisoftmax并用
'ganmma':损失下降多少才进行分裂
'max_depth':12,构建树的深度,越大越容易过拟合
'lamda':2,控制模型复杂度的权重值得L2正则化项参数,参数越大,模型越不容易过拟合,即1/2λWi^2,λ越大,Wi^2部分必须越小,即叶子权重值越小
’subsample':0.7,随机采样训练样本
'colsample_bytree':0.7,生成树时进行的列采样
'min_child_weight':3,孩子节点中最小的样本权重和。如果一个叶子节点的样本权重和小于min_child_weight则拆分过程结束
'silent':0,设置成1则没有运行信息输出,最好设置为0
'eta':0.007,如同学习率,每颗树的贡献率
'seed':1000
'nthread':7,cpu线程数
'''
建立50颗树的基础模型:
#基础模型参数
xgb_params={
'seed':0,
'eta':0.1,
'colsample_bytree':0.5,
'silent':1,
'subsample':0.5,
'objective':'reg:linear',
'max_depth':5,
'min_child_weight':3
}
#使用交叉验证:xgb.cv
%%time
#num_boost_round:树的数目,nfold:几折交叉验证
bst_cvl=xgb.cv(xgb_params,dtrain,num_boost_round=50,nfold=3,seed=0
,feval=xg_eval_mae,maximize=False,early_stopping_rounds=10) #xg_eval_mae函数的yhat在哪里计算?,test验证是?
print('CV score:',bst_cvl.iloc[-1,:]['test-mae-mean'])
plt.figure()
bst_cvl[['train-mae-mean','test-mae-mean']].plot()
CV score: 1220.1099446666667
当100颗的时候:
#建立100个树模型
%%time
bst_cv2=xgb.cv(xgb_params,dtrain,num_boost_round=100,
nfold=3,seed=0,feval=xg_eval_mae,maximize=False,early_stopping_rounds=10)
print('CV score:',bst_cv2.iloc[-1,:]['test-mae-mean'])
# CV score: 1172.059570333333
fig,(ax1,ax2)=plt.subplots(1,2)
fig.set_size_inches(16,4)
ax1.set_title('1000 rounds of training')
ax1.set_xlabel('Rounds')
ax1.set_ylabel('Loss')
ax1.grid(True)
ax1.plot(bst_cv2[['train-mae-mean','test-mae-mean']])
ax1.legend(['Training Loss','Test Loss'])
ax2.set_title('60 last rounds of traing')
ax2.set_xlabel(