Ajax网页消息爬取练习,来自《Python3网络爬虫开发实战》
from urllib.parse import urlencode
import requests
from bs4 import BeautifulSoup
import pymysql
base_url = 'https://m.weibo.cn/api/container/getIndex?'
headers = {
'Host': 'm.weibo.cn',
'Referer': 'https://m.weibo.cn/u/2830678474',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
# 如果 requestedWith 为 null,则为同步请求。
# 如果 requestedWith 为 XMLHttpRequest 则为 Ajax 请求。
}
def get_page(page):
params = {
'type': 'uid',
'value': '2145291155',#who
'containerid': '1076032145291155',
'page': page
}
url = base_url + urlencode(params)
try: