决策树
决策树是基于信息论提出的概念,划分原则是将原本无序的数据变得更加有序。
信息熵的概念
信息熵是指信息的不确定性,是对信息不确定性的度量。
信息熵的计算公式
信息增益
信息增益表示的是原来的数据在没有按照任何属性划分时的熵与按照某一属性A进行划分后的信息熵的差值。
信息熵的计算
import numpy as np
# 判断账号是否真实:3 no(0.3)7 yes(0.7)
# 不进行划分,计算信息熵
info_D=0.3*np.log2(1/0.3)+0.7*np.log2(1/0.7)
print(info_D)
# 建立决策树,对目标值进行划分
# 三个属性:日志密度、好友密度、是否真实头像
# 使用日志密度进行决策树的构建
# 3s 0.3 ----->2 no 1 yes
# 4m 0.4 ----->1 no 3 yes
# 3l 0.3 ----->3 yes
info_L_D=0.3*(2/3*np.log2(3/2)+1/3*np.log2(3))+0.4*(0.25*np.log2(4)+0.75*np.log2(4/3))+0.3*(1*np.log2(1))
print(info_L_D)
print(info_D-info_L_D)
0.8812908992306926
0.5999999999999999
0.2812908992306927
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
iris=load_iris()
X=iris['data']
y=iris['target']
feature_names=iris.feature_names
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=1024)
# 连续的数据,选择属性需要用阈值选择,优先选择属性波动较大的,利于区分
print(X_train.std(axis=0))
clf=DecisionTreeClassifier(criterion='entropy')
clf.fit(X_train,y_train)
y_=clf.predict(X_test)
accuracy=accuracy_score(y_test,y_)
# plt.figure(figsize=(18,12))
# # sklearn 的版本为0.21.3
# _=tree.plot_tree(clf,filled=True,feature_names=feature_names)
# 获得不属于类别0的数据,并进行划分
X_train2=X_train[y_train!=0]
y_train2=y_train[y_train!=0]
index=np.argsort(X_train2[:,3])
print(X_train2[:,3][index])
print(y_train2[index])
[0.82300095 0.42470578 1.74587112 0.75016619]
[1. 1. 1. 1. 1. 1. 1.1 1.1 1.2 1.2 1.2 1.2 1.2 1.3 1.3 1.3 1.3 1.3
1.3 1.3 1.3 1.3 1.3 1.4 1.4 1.4 1.4 1.4 1.4 1.5 1.5 1.5 1.5 1.5 1.5 1.5
1.5 1.5 1.5 1.6 1.6 1.6 1.6 1.7 1.7 1.8 1.8 1.8 1.8 1.8 1.8 1.8 1.8 1.8
1.9 1.9 1.9 1.9 1.9 2. 2. 2. 2. 2. 2.1 2.1 2.1 2.1 2.2 2.2 2.3 2.3
2.3 2.3 2.3 2.3 2.3 2.3 2.4 2.5 2.5]
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1
1 2 2 1 1 1 2 1 2 2 2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2]
gini系数
随机森林
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
# 随机森林:多颗决策树构建而成,每一颗决策树都是基于决策树原理
# 多颗决策树一起运算------>集成算法
wine=datasets.load_wine()
X=wine['data']
y=wine['target']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
clf=RandomForestClassifier(n_estimators=100)
clf.fit(X_train,y_train)
y_=clf.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,y_))
1.0