Python_机器学习_sklearn _3、决策树和随机森林

 

 

 

from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.tree import DecisionTreeClassifier,export_graphviz
from sklearn.ensemble import RandomForestClassifier
import pandas as pd 
def decision():
    """
    决策树对泰坦尼克号进行预测生死

    """
    # 1. pd 读取数据
    titan = pd.read_csv("http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt")
    # 2. 选择有影响的特征 ,处理缺失值
    # 特征数据和目标值的提取
    x = titan[['pclass','age','sex']]
    y = titan['survived']

    # 缺失值处理
    x['age'].fillna(x['age'].mean(), inplace = True)


    # 3. 进行特征工程, pd转换字典,特征抽取
    # 分割数据集
    x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.25)  


    # 进行特征工程  one-hot编码
    dict = DictVectorizer(sparse=False)
    x_train = dict.fit_transform(x_train.to_dict(orient = "records"))

    print(dict.get_feature_names())

    x_test = dict.transform(x_test.to_dict(orient = "records"))
    # print(x_train)

    # # 4. 决策树估计器流程
    # dec = DecisionTreeClassifier()

    # dec.fit(x_train, y_train)

    # print("准确率",dec.score(x_test,y_test))

    # # 导出树的结构
    # export_graphviz(dec, out_file="./tree.dot",feature_names = ['年龄', 'pclass=1st', 'pclass=2nd', 'pclass=3rd', '女性', '男性'])


    # 随机森林进行预测(超参数调优)
    rf = RandomForestClassifier()

    # 网格搜索交叉验证
    param = {"n_estimators":[100,120,200,300,500,1200],"max_depth":[3,4,5,8,15,30]}
    gc = GridSearchCV(rf, param_grid=param, cv=2)

    gc.fit(x_train,y_train)

    print("准确率",gc.score(x_test,y_test))
    print("查看选择的参数模型",gc.best_params_)

    return None


if __name__ == "__main__":
    decision()

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值