准大二学生,暑假无聊,随便翻翻书,崔庆才所著《python3网络爬虫实战》属实?,参考这本书里已有的新浪微博爬虫,我给它再加了个输入新浪微博id查询的功能
完整代码如下:
from urllib.parse import urlencode
import requests
#ajax爬新浪微博
#爬取过多微博时易被检测出是爬虫,添加代理即可
def get_page_json(value,page):#获取页面json文件
base_url=' https://m.weibo.cn/api/container/getIndex?'
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'X-Requested-With':'XMLHttpRequest',
'Referer': 'https://m.weibo.cn/u/5229469494'
}
params={
'type':'uid',
'value':value,
'containerid':'107603{}'.format(value),
'page':page
}
url=base_url+urlencode(params)
response=requests.get(url,headers=headers)
return response.json()
def parse_json(json):#解析json文件
items=json.get('data').get('cards')
for item in items:
mblog=item.get('mblog')
weibo={}
weibo['time']=mblog.get('created_at')
weibo['text']=mblog.get('text')
weibo['转发数']=mblog.get('reposts_count')
weibo['评论数']=mblog.get('comments_count')
weibo['点赞数']=mblog.get('attitudes_count')
print(weibo)
value=input('input the weibo userid you want to search:')
pages=int(input('input pages of weibo you want:'))
for page in range(pages):
print('第{}页'.format(page+1))
parse_json(get_page_json(value,page))
#参考崔庆才所著《python3网络爬虫开发实战》