import requests from requests.exceptions import RequestException import re import json # 多线程,一秒完成数据的爬取 # from multiprocessing import Pool def get_one_page(url): # 默认的ua是Python的 ,一般都会首选过滤掉这种ua,可以手动模拟写上ua kv = {"user-agent": "Mizilla/5.0"} try: response = requests.get(url,headers=kv) if response.status_code == 200: return response.text return None except RequestException: return None def parse_one_page(html): pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a' + '.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>' + '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S) items = re.findall(pattern, html) for item in items: yield { 'index': item[0], 'image': item[1], 'title': item[2], 'actor': item[3].strip()[3:], 'time': item[4].strip()[5:], 'score': item[5] + item[6] } def write_to_file(content): with open('result.txt', 'a', encoding='utf-8') as f: f.write(json.dumps(content, ensure_ascii=False) + '\n') f.close() def main(offset): url = "http://maoyan.com/board/4?offset=" + str(offset) html = get_one_page(url) for item in parse_one_page(html): print(item) write_to_file(item) if __name__ == '__main__': for i in range(10): main(i*10) # 多线程 # pool = Pool() # pool.map(main, [i * 10 for i in range(10)])
爬虫实战python爬取猫眼电影TOP100
最新推荐文章于 2025-02-14 17:25:53 发布