#encoding=utf-8 import requests from requests.exceptions import RequestException import re import json from multiprocessing import Pool import os def get_one_page(url): try: headers = { 'Host': 'maoyan.com', 'User-Agent': 'User-Agent Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN' } response = requests.get(url,headers=headers) if response.status_code == 200: return response.text #这里用response.text return None except RequestException: return None def parse_one_page(html): pattern = re.compile('<dd>.*?board-index.*?>(\d*)</i>.*?data-src="(.*?)".*?name"><a.*?>(.*?)</a>.*?star">' +'(.*?)</p>.*?releasetime">(.*?)</p>' +'.*?integer">(.*?)</i>.*?fraction">(.*?)</i></p>.*?</dd>',re.S) items = re.findall(pattern,html) for item in items: yield { 'index':item[0], 'image':item[1], 'title':item[2], 'actor':item[3], 'time':item[4], 'score':item[5]+item[6] } def write_to_file(content): with open("maoyan1.txt", 'a', encoding='utf-8') as f: f.write(json.dumps(content, ensure_ascii=False) + "\n") #输出中文的设置 f.close() def save_image_file(url, path): ''' 保存电影封面 ''' image_file = requests.get(url) if image_file.status_code == 200: with open(path, 'ab+') as f: f.write(image_file.content) f.close() def main(offset): url = 'http://maoyan.com/board/4?offset=' + str(offset) html = get_one_page(url) parse_one_page(html) if not os.path.exists('covers'): os.mkdir('covers') for item in parse_one_page(html): print(item) write_to_file(item) save_image_file(item['image'], 'covers/' + '%03d' % int(item['index']) + item['title'] + '.jpg') # def save_image_file(url, path): if __name__=='__main__': # for i in range(10): # main(i*10) pool = Pool() pool.map(main, [i*10 for i in range(10)]) #多线程 #这里文件的读写部分不是很明白,如何让文件一直写而不覆盖呢??