from wordcloud import WordCloud,STOPWORDS
import PIL.Image as image
import numpy as np
import jieba
# 分词
def trans_CN(text):
# 接收分词的字符串
word_list = jieba.cut(text)
# 分词后在单独个体之间加上空格
result = " ".join(word_list)
return result
with open("C:\\Users\\Ygh\\Desktop\\note.txt",encoding='utf-8') as fp:
text = fp.read()
# print(text)
# 将读取的中文文档进行分词
text = trans_CN(text)
# mask = np.array(image.open("F:\wordcloud\image\love.jpg"))
stop_words = ["一斤", "元"] + list(STOPWORDS)
wordcloud = WordCloud(
# 添加遮罩层
# mask=mask,
# 生成中文字的字体,必须要加,不然看不到中文
background_color=(236, 245, 250),
font_path="C:\Windows\Fonts\simhei.ttf",
# stopwords=STOPWORDS.add(stop_words),
stopwords=stop_words
).generate(text)
image_produce = wordcloud.to_image()
image_produce.show()
使用Wordcold生成中文词云,并更换背景色、设置停用词
最新推荐文章于 2025-02-28 23:30:14 发布