#导入模块
import re
import os
import time
import random
import requests
import pandas as pd
import jieba
from wordcloud import WordCloud
from wordcloud import STOPWORDS # 停用词
import matplotlib.pyplot as plt
%matplotlib inline
#爬取评论
# 脚本运行时间过长,以及防止被封禁,不要重复跑!!!
# 设置请求头
proxies = [{'http': 'http://58.212.42.116:36708'}, {'http':'http://117.57.91.53:9999'}, {'http':'123.169.35.184:9999'}]
header ={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.42 Safari/537.36'
,'cookie':'ll="108296"; bid=gusxxUTWp9I; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1581049781%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3Dmzh_Pk4i-povaM7pB3vRB7Et4D-K6Lg5L8Tn5YIv-hx2ueyIsbHUqdPpmRj87dk_%26wd%3D%26eqid%3D83bf0904000135db000000045e3ce7b1%22%5D; _pk_ses.100001.4cf6=*; ap_v=0,6.0; __utma=30149280.1211952577.1580917272.1580950353.1581049782.3; __utmc=30149280; __utmz=30149280.1581049782.3.3.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1581049782.3.3.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmc=223695111; __utmb=223695111.0.10.1581049782; __utma=223695111.566881256.1580917283.1580950353.1581049782.3; __utmb=30149280.1.10.1581049782; __yadk_uid=crWVMXvktYBNgceapMv5fFt1bmAOsI7g; __gads=ID=d77b8fa744b89a30:T=1581049833:S=ALNI_MYwCr7Wx1HzWSH2WE1N6CmGIArotg; _vwo_uuid_v2=D6159A510239E43D8B4D907A1857327CA|f6f15951d7bc4f3169dbfc8755cf9d9d; dbcl2="195651471:6XKseD+bWlc"; ck=VMGV; _pk_id.100001.4cf6=e99d9ec95c88db5a.1580917283.3.1581051285.1580950723.; push_noty_num=0; push_doumail_num=0'
,'Connection':'keep-alive'}
start = '0'
while True:
try:
url = r'https://movie.douban.com/subject/25875034/comments?start='+start+'&limit=20&sort=new_score&status=P'
response = requests.get(url,headers=header, proxies = random.choice(proxies))
html = response.text # 网址源代码
# 正则表达式,爬取短评
patt = '<span class="short">(.*?)</span>'
regrex = re.compile(patt, re.S)
# re.S使.能匹配包含换行符在内的所有字符串,之前的.是匹配除了换行符之外的任意字符(\s:匹配空白字符)
# 正则只能匹配一行的数据,如果有\n在中间就不能全部匹配出来,所以加上一个re.S
short = re.findall(regrex, html)
# 正则表达式,爬取后页的关键字
patt2 = re.compile('.*?start=(\d+).*?class="next">后页 ></a>')
start = re.findall(patt2,html)[0] # 这里注意返回的是一个列表,findall匹配的是一个列表,虽然每次都是一个!!这里通过索引取数
if response.status_code == 200 :
print('正在爬取start为%s 的数据'%start)
for one_short in short:
with open(r'./豆瓣舌尖中国.txt', 'a', encoding='utf-8') as f: # 模式现在a 表示在原数据的前提上进行追加
f.write(one_short + '\n')
time.sleep(random.randint(6,10))
except Exception as e:
print(e)
break
# 自定义停用词,因为jieba自带的停用词都是英文字符
stop_word = [',', '的', '说实话', '看了第一集', '舌尖上的中国']
# 词云图分析,增加背景图片
with open('./豆瓣舌尖中国.txt', 'r', encoding='utf-8') as f:
txt = f.read()
# 结巴分词
new_txt = jieba.lcut(txt)
new_txt = ''.join(new_txt)
# 设置背景图片
backgroud_Image = plt.imread('mask.jpg')
w = WordCloud(width=1000,
height=700,
font_path='C:\Windows\Fonts\STLITI.TTF',
background_color= 'white',
stopwords = stop_word,
mask = backgroud_Image,
max_words = 200)
wordc = w.generate(new_txt)
plt.imshow(wordc)
plt.axis('off') # 不显示坐标尺寸