思路:1.读取所有文章标题;2.用“结巴分词”的工具包进行文章标题的词语分割;3.用“sklearn”的工具包计算Tf-idf(词频-逆文档率);4.得到满足关键词权重阈值的词
结巴分词详见:结巴分词Github
sklearn详见:文本特征提取——4.2.3.4 Tf-idf项加权
import os
import jieba
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
sys.path.append("../")
jieba.load_userdict('userdictTest.txt')
STOP_WORDS = set((
"基于", "面向", "研究", "系统", "设计", "综述", "应用", "进展", "技术", "框架", "txt"
))
def getFileList(path):
filelist = []
files = os.listdir(path)
for f in files:
if f[0] == '.':
pass
else:
filelist.append(f)
return filelist, path
def fenci(filename, path, segPath):
# 保存分词结果的文件夹
if not os.path.exists(segPath):
os.mkdir(segPath)
seg_list = jieba.cut(filename)
r