"""
1、爬取猫眼电影 top100榜
1、程序运行直接爬取第一页
2、是否继续爬取 y\n
y 爬取第二页
n 爬取结束,谢谢使用
3、把每一页的内容保存到本地 格式 第一页.html
普通版&类版
"""
"""
猫眼电影 top100
第一页:https://maoyan.com/board/4?offset=0
第二页:https://maoyan.com/board/4?offset=10
第三页:https://maoyan.com/board/4?offset=20
"""
import urllib.request
import urllib.parse
import re
url = "https://maoyan.com/board/4?offset="
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}
def zhixing():
# f = open("第%d页.html"%page, 'w', encoding="utf8")
# print("正在写入第%d页" % page)
# f.write(html)
# f.close()
# print("第%d页写入完成" % page)
L1 = re.findall('title="(\S+)" data-act', html)
L2 = re.findall('主演:(.+)\n', html)
L3 = re.findall('上映时间:(.+)</p>', html)
# print(L1)
# print(L2)
# print(L3)
for i in range(0, 10):
print(L1[i] + ' ' + L2[i] + ' ' + L3[i])
d = 0
urls = url + str(d)
print(urls)
request = urllib.request.Request(urls,headers = headers)
response = urllib.request.urlopen(request)
page = 1
html = response.read().decode("utf-8")
zhixing()
while input("是否继续(y/n):") == 'y':
page = page + 1
d = d + 10
urls = url + str(d)
print(urls)
request = urllib.request.Request(urls, headers=headers)
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
zhixing()
else:
print("爬取结束,谢谢使用")
Python爬虫技术——爬取猫眼电影TOP100榜单
最新推荐文章于 2024-11-25 22:59:28 发布