import jieba
#1.获取文本
with open('book.txt','r',encoding='UTF-8') as fp:
txt = fp.read()
#2.分词
words = jieba.lcut(txt
python实现中文词语词频统计
最新推荐文章于 2025-06-17 22:05:32 发布
import jieba
#1.获取文本
with open('book.txt','r',encoding='UTF-8') as fp:
txt = fp.read()
#2.分词
words = jieba.lcut(txt