import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report
titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
#特征选取
X = titanic[['pclass','age','sex']]
y = titanic['survived']
#对空白的age列进行填充,因为中位数和平均数对模型的影响最小,所以使用平均数进行填充
X['age'].fillna(X['age'].mean(),inplace=True)
#进行训练集和测试集的分割
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=33)
vec = DictVectorizer(sparse=False)
X_train = vec.fit_transform(X_train.to_dict(orient='record'))
print(vec.feature_names_)#经过特征转换以后,我们发现凡是类别形的特征都单独剥离出来,数值型的则保持不变
X_test = vec.transform(X_test.to_dict(orient='record'))#对测试数据进行特征转换
#使用单一决策树模型训练及分析数据
dtc = DecisionTreeClassifier()
dtc.fit(X_train,y_train)
dtc_y_predict = dtc.predict(X_test)
#使用随机森林分类器进行集成模型的训练及预测分析
rfc = RandomForestClassifier()
rfc.fit(X_train,y_train)
rfc_y_predict = rfc.predict(X_test)
#使用梯度提升决策树集成模型的训练及分析
gbc = GradientBoostingClassifier()
gbc.fit(X_train,y_train)
gbc_y_predict = gbc.predict(X_test)
print('The accuracy of decision tree is: ',dtc.score(X_test,y_test))
print(classification_report(dtc_y_predict,y_test))
print('\n'*2)
print('The accuracy of random forest classifier:',rfc.score(X_test,y_test))
print(classification_report(rfc_y_predict,y_test))
print('\n'*2)
print('The accuracy of gradient tree boosting',gbc.score(X_test,y_test))
print(classification_report(gbc_y_predict,y_test))
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report
titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
#特征选取
X = titanic[['pclass','age','sex']]
y = titanic['survived']
#对空白的age列进行填充,因为中位数和平均数对模型的影响最小,所以使用平均数进行填充
X['age'].fillna(X['age'].mean(),inplace=True)
#进行训练集和测试集的分割
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=33)
vec = DictVectorizer(sparse=False)
X_train = vec.fit_transform(X_train.to_dict(orient='record'))
print(vec.feature_names_)#经过特征转换以后,我们发现凡是类别形的特征都单独剥离出来,数值型的则保持不变
X_test = vec.transform(X_test.to_dict(orient='record'))#对测试数据进行特征转换
#使用单一决策树模型训练及分析数据
dtc = DecisionTreeClassifier()
dtc.fit(X_train,y_train)
dtc_y_predict = dtc.predict(X_test)
#使用随机森林分类器进行集成模型的训练及预测分析
rfc = RandomForestClassifier()
rfc.fit(X_train,y_train)
rfc_y_predict = rfc.predict(X_test)
#使用梯度提升决策树集成模型的训练及分析
gbc = GradientBoostingClassifier()
gbc.fit(X_train,y_train)
gbc_y_predict = gbc.predict(X_test)
print('The accuracy of decision tree is: ',dtc.score(X_test,y_test))
print(classification_report(dtc_y_predict,y_test))
print('\n'*2)
print('The accuracy of random forest classifier:',rfc.score(X_test,y_test))
print(classification_report(rfc_y_predict,y_test))
print('\n'*2)
print('The accuracy of gradient tree boosting',gbc.score(X_test,y_test))
print(classification_report(gbc_y_predict,y_test))
本文通过使用pandas处理泰坦尼克号数据集,并采用多种机器学习模型(包括决策树、随机森林和梯度提升决策树)进行生存预测任务。在特征选择、数据预处理、模型训练与评估等方面进行了详细介绍。
1342

被折叠的 条评论
为什么被折叠?



