1. 爬取的源代码
import requests
from requests.exceptions import RequestException
import re
import json
def get_one_page(url):
try:
headers = { # 注意,这里要构造请求头部,现在很多爬取越来越难了
'Host': 'maoyan.com',
'User-Agent': 'User-Agent Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept': '*/*',
'Cookie': '__mta=188504343.1585564348951.1585572276562.1585572280897.7; _csrf=ebfe1c23a4d96029614'
'05c363e3cf3f98267d86470d7ca6c313115588fa118ac; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2'
'=1585572281; lt=7OiE5kMu6MrHiBaxwKx07KTQhGsAAAAAQwoAAPAJrqO-H-fhERYDNfNmzGlzgHnMMmU9xXl'
'xr9DawRc5qBgseZFG-2qgrttITxQn0Q; lt.sig=v4coglI_OOFTaEJY7RFaGMf3Rew; uuid_n_v=v1; mojo-'
'uuid=cfd6075c8bc5b2dd547ad5bb2283597a; uuid=C15C8B40727111EAA672FF30D787611C3D4297010620'
'41BFB1C117867DEE024C; _lxsdk_cuid=1712afff1dfc8-04cfe438e1872-71415a3a-159fe7-1712afff1e0'
'c8; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1585564349,1585570378; mojo-session-id={"id":"'
'fe5a0b51302cafb98065da43ae4fff93","time":1585570296050}; mojo-trace-id=9; _lxsdk=C15C8B4072'
'7111EAA672FF30D787611C3D429701062041BFB1C117867DEE024C; _lxsdk_s=1712b591914-65b-fcf-8b4%7C%7C13',
# 注意:这里用的是我本地的cookie,请替换成你自己的,因为猫眼现在需要认证(可去除cookie试试就知道)
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a.*?>(.*?)</a>.*?star">'
+ '(.*?)</p>.*?releasetime">(.*?)</p>'
+ '.*?integer">(.*?)</i>.*?fraction">(.*?)</i></p>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip()[3:],
'time': item[4].strip()[5:],
'score': item[5] + item[6]
}
def write_to_file(content):
with open('result.txt','a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
f.close()
'''
# 参考自:https://blog.youkuaiyun.com/m0_37438418/article/details/80698151
def save_image_file(url, path):
img_file = requests.get(url)
if img_file.status_code == 200:
with open(path, 'ab+') as f:
f.write(img_file.content)
f.close()
'''
def main(offset):
url = 'https://www.maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
print(html)
for item in parse_one_page(html):
print(item)
write_to_file(item)
# save_image_file(item['image'], 'covers_'+'%03d' % int(item['index']) + item['title'] + '.jpg')
if __name__ == '__main__':
for i in range(10):
main(i*10)
# 或者用进程池加快处理
#from multiprocessing import Pool
#pool = Pool()
#pool.map(main, [i*10 for i in range(10)])
2. 爬取的结果
3.一些经验
1.起初,根据崔庆才当时编写的代码是没有构造请求头部的,响应的是验证的html代码,故需要构造请求头部
(原因可想而知啦,爬虫越来越难了)
2. 注意请求头部要用Cookie,我试过不带cookie(第一次不带是成功的)去爬取又响应的是验证的html代码(可恶...啊)
3. 这篇实战可以进一步掌握”正则表达式“,其它(像requests库等)都是不难理解
参考:崔庆才的python3爬虫教程,B站有免费教程
此外,更完整全面的代码可参考崔老哥的 https://github.com/Germey/TouTiao/blob/master/spider.py