- 新闻类别:财经、经济、军事、娱乐、体育……
1.导包
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB,BernoulliNB,MultinomialNB
# ENGLISH_STOP_WORDS:停用词 是助词标点符号对文章划分无益 这类词无足轻重可删除 比如中文:了,的,得,地等 停用词不是绝对不用的
from sklearn.feature_extraction.text import TfidfVectorizer,ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
frozenset({'a',
'about',
...
'yourself',
'yourselves'})
2.数据加载
news = datasets.fetch_20newsgroups(data_home='./data/',subset='all') # data_home:数据路径 subset:数据子集-有训练、测试、全部数据
len(news['target'])
18846
news = datasets.fetch_20newsgroups(data_home='./data/',subset='all', # 加载一部分数据
remove= ('headers', 'footers', 'quotes'), # categories:类别
categories = ['rec.motorcycles','rec.sport.hockey','talk.politics.guns'])
print(len(news['target']))
2905
3.文本数据向量化
- TfidfVectorizer:向量化
tf_idf = TfidfVectorizer()
X = tf_idf.fit_transform(news['data'])
X
<2905x29045 sparse matrix of type '<class 'numpy.float64'>'
with 251634 stored elements in Compressed Sparse Row format>
4.数据集加载
X_train,X_test,y_train,y_test = train_test_split(X,news['target']) # X_train,X_test依然是稀松矩阵
5.数据建模
# 5.1 高斯分布朴素贝叶斯- 必须用toarray转化为稠密矩阵
%%time
gNB = GaussianNB()
gNB.fit(X_train.toarray(),y_train)
gNB.score(X_test.toarray(),y_test)
Wall time: 10.9 s
0.8500687757909215
# 5.2 伯努利分布朴素贝叶斯
%%time
bNB = BernoulliNB() # 二项分布
bNB.fit(X_train,y_train)
bNB.score(X_test,y_test)
Wall time: 68 ms
0.7317744154057771
# 5.3 多项式分布朴素贝叶斯
%%time
mNB = MultinomialNB()
mNB.fit(X_train,y_train)
mNB.score(X_test,y_test)
Wall time: 19.5 ms
0.9133425034387895