风控模型开发流程
· 数据抽取
· 数据探索
· 建模数据准备
· 变量选择
· 模型开发与验证
· 模型部署
· 模型监督
加载包:
importos
importsys
importstring
importpymysql
importnumpyasnp
importpandasaspd
importstatsmodels.apiassm
importmatplotlib.pyplotasplt
%matplotlibinline
importseabornassns
importsklearn.cross_validationascross_validation
importsklearn.treeastree
importsklearn.ensembleasensemble
importsklearn.linear_modelaslinear_model
importsklearn.svmassvm
importsklearn.feature_selectionasfeature_selection
importsklearn.metricsasmetrics
- 数据抽取
model_data = pd.read_csv("credit_develop.csv")
model_data.head()#查看数据格式
读取数据后查看大致数据的情况,主要看每个字段的格式属性。比如Branch_of_Bank等字段为分类变量,后续需要进行虚拟化变量处理。

- 数据探索
数据探索是建模人员了解特征时使用的方法,可以通过数据表或是图形的方式了解整体数据。
model_data.describe().T
查看数据的分布。
data = model_data["Credit_Score"].dropna() # 去除缺失值sns.distplot(data)

查看信用分数的分布情况
plt.boxplot(data)
利用箱线图查看数据离散情况
model_data=model_data.drop_duplicates()#去除重复项
填充缺失值
model_data = model_data.fillna(model_data.mean()) #用均值来填充
变量相似度分析,变量聚类
simpler = np.random.randint(0,len(model_data),size=50)sns.clustermap(model_data.iloc[simpler,3:].T,col_cluster=False,row_cluster=True)
- 生成模型训练/测试数据集
将分类变量变为虚拟变量
Area_Classification_dummy = pd.get_dummies(model_data["Area_Classification"],prefix="Area_Class")model_data.join(Area_Classification_dummy)
model_data.join(model_data[="Branch"))
· 分成目标变量和应变量
target = model_data["target"]
pd.crosstab(target,"target")
data = model_data.ix[ :,'Age':]
· 分成训练集和测试集,比例为6:4
train_data, test_data, train_target, test_target = cross_validation.train_test_split(data, target, test_size=0.4, random_state=0)
- 筛选变量
因为使用的是普通的罗吉斯回归,所以变量筛选变得尤为重要。如果筛选不当会产生过拟合或是欠拟合现象。(当然可以使用一些更高级的算法完成筛选功能)首先使用最原始的方法线性相关系数。
corr_matrix = model_data.corr(method='pearson')
corr_matrix = corr_matrix.abs()
sns.set(rc={"figure.figsize": (10, 10)})
sns.heatmap(corr_matrix,square=True,cmap="Blues")

corr = model_data.corr(method='pearson').ix["target"].abs()
corr.sort(ascending=False)corr.plot(kind="bar",title="corr",figsize=[12,6])
· 使用随机森林方法来选择模型模型变量
rfc = ensemble.RandomForestClassifier(criterion='entropy', n_estimators=3, max_features=0.5, min_samples_split=5)
rfc_model = rfc.fit(train_data, train_target)
rfc_model.feature_importances_
rfc_fi = pd.DataFrame()
rfc_fi["features"] = list(data.columns)
rfc_fi["importance"] = list(rfc_model.feature_importances_)
rfc_fi=rfc_fi.set_index("features",drop=True)
rfc_fi.sort_index(by="importance",ascending=False).plot(kind="bar",title="corr",figsize=[12,6])

5.模型训练
- 使用原始变量进行logistic回归
logistic_model = linear_model.LogisticRegression()
logistic_model.fit(train_data, train_target)Out[39]:
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, penalty='l2', random_state=None, tol=0.0001)In [40]:
test_est = logistic_model.predict(test_data)
train_est = logistic_model.predict(train_data)In [41]:
test_est_p = logistic_model.predict_proba(test_data)[:,1]
train_est_p = logistic_model.predict_proba(train_data)[:,1]In [42]:
print metrics.classification_report(test_target, test_est)
precision recall f1-score support 0 0.68 0.61 0.64 2825 1 0.64 0.71 0.67 2775avg / total 0.66 0.66 0.66 5600In [43]:
print metrics.classification_report(train_target, train_est)
precision recall f1-score support 0 0.67 0.63 0.65 4175 1 0.66 0.70 0.68 4225avg / total 0.67 0.67 0.66 8400In [44]:
metrics.zero_one_loss(test_target, test_est)Out[44]:
0.34053571428571427In [45]:
metrics.zero_one_loss(train_target, train_est)Out[45]:
0.33476190476190482
- 目标样本和非目标样本的分数分布
x
red, blue = sns.color_palette("Set1",2)In [47]:
sns.kdeplot(test_est_p[test_target==1], shade=True, color=red)
sns.kdeplot(test_est_p[test_target==0], shade=True, color=blue)Out[47]:
<matplotlib.axes._subplots.AxesSubplot at 0x2183aa20>
- ROC曲线
fpr_test, tpr_test, th_test = metrics.roc_curve(test_target, test_est_p)
fpr_train, tpr_train, th_train = metrics.roc_curve(train_target, train_est_p)
plt.figure(figsize=[6,6])
plt.plot(fpr_test, tpr_test, color=blue)
plt.plot(fpr_train, tpr_train, color=red)
plt.title('ROC curve')Out[48]:
<matplotlib.text.Text at 0x2201a940>

test_x_axis = np.arange(len(fpr_test))/float(len(fpr_test))
train_x_axis = np.arange(len(fpr_train))/float(len(fpr_train))
plt.figure(figsize=[6,6])
plt.plot(fpr_test, test_x_axis, color=blue)
plt.plot(tpr_test, test_x_axis, color=blue)
plt.plot(fpr_train, train_x_axis, color=red)
plt.plot(tpr_train, train_x_axis, color=red)
plt.title('KS curve')Out[49]:
<matplotlib.text.Text at 0x22171198>

- 模型的监督
· 分数分布稳定性
分数指分数越高的人违约概率越低,分数分布指的是在分数较低的人群中违约概率是否成正比的。
· 变量判别能力稳定性
指变量的重要性
· 变量分布稳定性
- 其他机器学习算法
lr = linear_model.LogisticRegression()
lr_scores = cross_validation.cross_val_score(lr, train_data, train_target, cv=5)
print("logistic regression accuracy:")
print(lr_scores)
clf = tree.DecisionTreeClassifier(criterion='entropy', max_depth=8, min_samples_split=5)
clf_scores = cross_validation.cross_val_score(clf, train_data, train_target, cv=5)
print("decision tree accuracy:")
print(clf_scores)
rfc = ensemble.RandomForestClassifier(criterion='entropy', n_estimators=3, max_features=0.5, min_samples_split=5)
rfc_scores = cross_validation.cross_val_score(rfc, train_data, train_target, cv=5)
print("random forest accuracy:")
print(rfc_scores)
etc = ensemble.ExtraTreesClassifier(criterion='entropy', n_estimators=3, max_features=0.6, min_samples_split=5)
etc_scores = cross_validation.cross_val_score(etc, train_data, train_target, cv=5)
print("extra trees accuracy:")
print(etc_scores)
gbc = ensemble.GradientBoostingClassifier()
gbc_scores = cross_validation.cross_val_score(gbc, train_data, train_target, cv=5)
print("gradient boosting accuracy:")
print(gbc_scores)
svc = svm.SVC()
svc_scores = cross_validation.cross_val_score(svc, train_data, train_target, cv=5)
print("svm classifier accuracy:")
print(svc_scores)
abc = ensemble. AdaBoostClassifier(n_estimators=100)
abc_scores = cross_validation.cross_val_score(abc, train_data, train_target, cv=5)
print("abc classifier accuracy:")
print(abc_scores)
logistic regression accuracy:
[ 0.66785714 0.65416667 0.65357143 0.65535714 0.65833333]
decision tree accuracy:
[ 0.74107143 0.75654762 0.73988095 0.73035714 0.73690476]
random forest accuracy:
[ 0.7125 0.72142857 0.71190476 0.72083333 0.68809524]
extra trees accuracy:
[ 0.66964286 0.70714286 0.6827381 0.67738095 0.67797619]
gradient boosting accuracy:
[ 0.78214286 0.78035714 0.77202381 0.76071429 0.75119048]
svm classifier accuracy:
[ 0.50297619 0.50595238 0.50654762 0.50297619 0.50297619]
abc classifier accuracy:
[ 0.76785714 0.76488095 0.76904762 0.74940476 0.75 ]