用requests爬取猫眼网站的top100榜单,再用正则提取所需的数据,并将数据存储
#成功抓取所有数据,并存为文件(参考代码:https://github.com/Germey/TouTiao/blob/master/spider.py)
import requests
import re
import json
from multiprocessing import Pool#Python多进程
url='http://maoyan.com/board/4?offset='
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:57.0) Gecko/20100101 Firefox/57.0'}
def get(url):
response=requests.get(url,headers=headers)
response.encoding='utf-8'
#print(response.text)
#pattern = re.compile(r'<dd>.*?(\d+)</i>.*?"data-src="(.*?)".*?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>.*?score">.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>',re.S)
pattern = re.compile(r'<dd>.*?(\d+)</i>.*?data-src="(.*?)".*?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>.*?score">.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>',re.S)
info=re.findall(pattern,response.text)
for item in info:
yield {
'index':item[0].strip(),
'img':item[1].strip(),
'name':item[2].strip(),
'star':item[3].strip()[3:],
'releasetime':item[4].strip()[5:],
'score':item[5].strip()+item[6].strip()
}
def main(url):
with open(r'D:\cs.txt','a',encoding='utf-8') as f:
for i in get(url):
f.write(json.dumps(i,ensure_ascii=False))
f.write('\n')
if __name__ == '__main__':
#多进程方法抓取
pool = Pool()
pool.map(main,[url+str(x) for x in range(0,100,10)])
pool.close()
pool.join()