实践
在这里插入import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn import svm
import lightgbm as lgb
from sklearn.metrics import f1_score
from sklearn.externals import joblib
from sklearn.model_selection import GridSearchCV
# 读取数据
df_train = pd.read_csv('./new_data/train_set.csv',index_col='id',nrows=5000)
test_data = pd.read_csv('./new_data/test_set.csv',index_col='id')
train_lable = df_train['class']
#TF-IDF处理
tfidf = TfidfVectorizer()
train_data = tfidf.fit_transform(df_train['word_seg'])
#划分数据集
X_train,X_test,Y_train,Y_test = train_test_split(train_data, train_lable, test_size=0.3, random_state=2019)
lr = LogisticRegression(C=100, dual = True)
lr.fit(X_train, Y_train)
lr_predictions = lr.predict(X_test)
lr_f1 = f1_score(Y_test, lr_predictions, average='weighted')
clf = svm.LinearSVC(C=5,dual=False)
clf.fit(X_train,Y_train)
clf_predictions = clf.predict(X_test)
clf_f1 = f1_score(Y_test, clf_predictions, average='weighted')
gbm = lgb.sklearn.LGBMClassifier(num_leaves=31,learning_rate=0.05,n_estimators=20)
gbm.fit(X_train,Y_train)
gbm_predictions = gbm.predict(X_test)
gbm_f1 = f1_score(Y_test, gbm_predictions, average='weighted')
print("The lr F1 Score:{:.5f}".format(lr_f1))
print("The SVM F1 Score: {:.5f}".format(clf_f1))
print("The gbm F1 Score: {:.5f}".format(gbm_f1))代码片
调参
GBM_grid = lgb.sklearn.LGBMClassifier()
params = { ‘learning_rate’: [0.01, 0.05, 0.1], ‘n_estimators’: [20,30]}
gbm = GridSearchCV(GBM_grid, params )
gbm.fit(X_train,Y_train)
print(‘Best parameters found by grid search are:’, gbm.best_params_)