import pandas as pd
data_origin = pd.read_csv('data.csv',encoding = 'gbk')
#打开停用词文件
with open('stopwords.txt','r',encoding = 'utf-8') as fp:
stop_words = fp.readlines() #返回列表
#通过map函数将空格去掉,再用map函数映射生成生成器,用list散列
stop_list = list(map(lambda word:word.strip(),stop_words))
print(len(stop_list))
#对列表进行去重
stopword_list = set(stop_list)
data_content = data_origin['内容 '] #列名字段
data_tag = data_origin['评价']
import jieba
comment_list = []
for comment in data_content:
seglist = jieba.cut(comment,cut_all = False) #cut_all 参数默认为False,所有使用cut方法时默认为精确模式(True时为全模式,所有可以成词的词语都扫描出来,速度快,eg:今天,今天天气,天天,天气,真好)
#去除停用词
final = ""
for seg in seglist:
if seg not in stopword_list: #如果不在停用词的列表里就取出来
final += seg
贝叶斯分类——分词实例(停用词)
最新推荐文章于 2022-04-05 13:41:30 发布