爬虫代码:
#encoding=utf-8
import requests
from bs4 import BeautifulSoup
import time
import random
urls = ['https://www.duanwenxue.com/huayu/tianyanmiyu/list_{}.html'.format(str(i)) for i in range(1,50)]
def get_one_page(url):
resp = requests.get(url)
if resp.status_code == 200:
soup = BeautifulSoup(resp.text,'lxml')
phases = soup.select('body > div.row.inner-row > div.row-left > div.list-short-article > ul > li > p > a')
with open(u'C:\\Users\Administrator\Desktop\sweet_words.txt', 'wb') as f:
for phase in phases:
data = phase.get_text()
print(data)
f.write(data.encode('utf-8'))
else:
print('not found')
if __name__ == '__main__':
for url in urls:
get_one_page(url)
time.sleep(random.randint(2,10))
print('ok')
分词及词云
import jieba
import numpy as np
import codecs
import pandas
import matplotlib.pyplot as plt
from scipy.misc import imread
from wordcloud import WordCloud,ImageColorGenerator
#分词
file=codecs.open(u"C:\\Users\Administrator\Desktop\sweet_words.txt",'r',encoding='UTF-8')
content=file.read()
file.close()
segment=[]
#jiaba调用了自己的分词算法,将切分好的文本按逗号分隔符分开
segs=jieba.cut(content)
for seg in segs:
if len(seg)>1 and seg!='\r\n':
segment.append(seg)
#统计分词结果
words_df=pandas.DataFrame({'segment':segment})#组建数据框pandas.DataFrame
words_df.head()
words_stat=words_df.groupby(by=['segment'])['segment'].agg({"计数":np.size})
words_stat=words_stat.reset_index()
words_stat
%matplotlib
#生成一个matplot对象,传入一个字体位置的路径和背景颜色即可
wordcloud=WordCloud(font_path="C:\simhei.ttf",background_color="white",max_words=200)
#WordCloud方法接受一个字典结构的输入,我们前面整理出来的词频统计结果是数据框的形式,因此需要转换,转换的方法,
#首先把分词设置为数据框的索引,然后在调用一个to_dict()的方法,就可以转换为字典的机构
words=words_stat.set_index('segment').to_dict()
#接着调用fit_words方法来调用我们的词频
wordcloud.fit_words(words['计数'])
#绘图
plt.imshow(wordcloud)
plt.show()
不同字体网上都能搜到,桃心图片是用ps裁剪的,一定要是桃心形状,而不是方形的图片