首先创建分布式管理:管理内得到的队列 注册到网络上 然后暴露网络
url管理进程 针对每一个url进行分析 操作 url_manager_proc()
判断url类中是否有新的链接 有的话塞入url_q队列中
判断在conn_q队列中有没有新的内容 有的话 塞入url类中
result_solve_proc 主要是从爬虫调度器中出来的数据 爬虫调度器每一条url中爬出来的信息 依次塞入 result_q队列
有关新url 的 放入 conn_q队列
数据则放入数据储存队列 store_q
store_proc 主要是处理store_q 队列中的内容 依次存储数据库或本地
from multiprocessing.managers import BaseManager
import time
from multiprocessing import Process, Queue
from DataOutputNew import DataOutputNew
from URLManagerNew import UrlManagerNew
class NodeManage(object):
def star_Manager(self,url_q,result_q):
BaseManager.register('get_task_queue', callable=lambda: url_q)
BaseManager.register('get_result_queue', callable=lambda: result_q)
manager = BaseManager(address=('',8001),authkey=b'ceshi')
return manager
def url_manager_proc(self,url_q,conn_q,root_url):
url_manager = UrlManagerNew()
url_manager.add_new_url(root_url)
while True:
while(url_manager.has_new_url()):
new_url = url_manager.get_new_url()
url_q.put(new_url)
print ('old_url=',url_manager.old_url_size())
if(url_manager.old_url_size()>100):
url_q.put('end')
print ('控制节点发起结束通知!')
url_manager.save_progress('new_urls.txt',url_manager.new_urls)
url_manager.save_progress('old_urls.txt',url_manager.old_urls)
return
try:
if not conn_q.empty():
urls = conn_q.get()
url_manager.add_new_urls(urls)
except :
time.sleep(0.1)
def result_solve_proc(self,result_q,conn_q,store_q):
while(True):
try:
if not result_q.empty():
content = result_q.get(True)
if content['new_urls']=='end':
print ('结果分析进程接受通知然后结束!')
store_q.put('end')
return
conn_q.put(content['new_urls'])
store_q.put(content['data'])
else:
time.sleep(0.1)
except :
time.sleep(0.1)
def store_proc(self,store_q):
output = DataOutputNew()
while True:
if not store_q.empty():
data = store_q.get()
if data=='end':
print ('存储进程接受通知然后结束!')
output.ouput_end(output.filepath)
return
output.store_data(data)
else:
time.sleep(0.1)
pass
if __name__=='__main__':
url_q = Queue()
result_q = Queue()
store_q = Queue()
conn_q = Queue()
node = NodeManage()
manager = node.star_Manager(url_q,result_q)
url_manager_proc = Process(target=node.url_manager_proc, args=(url_q,conn_q,'http://baike.baidu.com/item/网络爬虫',))
result_solve_proc = Process(target=node.result_solve_proc, args=(result_q,conn_q,store_q,))
store_proc = Process(target=node.store_proc, args=(store_q,))
url_manager_proc.start()
result_solve_proc.start()
store_proc.start()
manager.get_server().serve_forever()
爬虫调度器代码
from multiprocessing.managers import BaseManager
from HtmlDownloader import HtmlDownloader
from HtmlParser import HtmlParser
class SpiderWork(object):
def __init__(self):
BaseManager.register('get_task_queue')
BaseManager.register('get_result_queue')
server_addr = '127.0.0.1'
print('Connect to server %s...' % server_addr)
self.m = BaseManager(address=(server_addr, 8001), authkey=b'ceshi')
self.m.connect()
self.task = self.m.get_task_queue()
self.result = self.m.get_result_queue()
self.downloader = HtmlDownloader()
self.parser = HtmlParser()
print ('init finish')
def crawl(self):
while(True):
try:
if not self.task.empty():
url = self.task.get()
if url =='end':
print ('控制节点通知爬虫节点停止工作...')
self.result.put({'new_urls':'end','data':'end'})
return
print ('爬虫节点正在解析:%s'%url.encode('utf-8'))
content = self.downloader.download(url)
new_urls,data = self.parser.parser(url,content)
self.result.put({"new_urls":new_urls,"data":data})
except EOFError:
print ("连接工作节点失败")
return
if __name__=="__main__":
spider = SpiderWork()
spider.crawl()
URL管理器代码
import pickle
import hashlib
class UrlManagerNew(object):
def __init__(self):
self.new_urls = self.load_progress('new_urls.txt')
self.old_urls = self.load_progress('old_urls.txt')
def load_progress(self,path):
"""
从本地文件加载进度
:param path:
:return: 返回set集合
"""
print('[+]从文件加载进度: %s' % path)
try:
with open(path,'rb') as f:
tmp = pickle.load(f)
return tmp
except:
print('[!] 无进度文件,创建%s' % path)
return set()
def save_progress(self,path,data):
"""
保存进度
:param path:文件路径
:param data: 数据
:return:
"""
with open(path,'wb') as f:
pickle.dump(data,f)
def has_new_url(self):
return self.new_url_size() != 0
def get_new_url(self):
new_url = self.new_urls.pop()
m = hashlib.md5()
m.update(new_url.encode("utf8"))
self.old_urls.add(m.hexdigest()[8:-8])
return new_url
def add_new_url(self,url):
if url is None:
return
m = hashlib.md5()
m.update(url.encode("utf8"))
murl_md5 = m.hexdigest()[8:-8]
if murl_md5 not in self.new_urls and murl_md5 not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self,urls):
if urls is None or len(urls) == 0:
return
for url in urls:
self.add_new_url(url)
def new_url_size(self):
return len(self.new_urls)
def old_url_size(self):
return len(self.old_urls)
存储器代码
import codecs
import time
class DataOutputNew(object):
def __init__(self):
self.filepath = 'baike_%s.html' %(time.strftime("%Y_%m_%d_%H_%M_%S",time.localtime()))
self.output_head(self.filepath)
self.datas = []
def store_data(self,data):
if data is None:
return
self.datas.append(data)
if len(self.datas) > 10:
self.output_html(self.filepath)
def output_head(self,path):
fout = codecs.open(path,'w',encoding='utf-8')
fout.write("<html>")
fout.write("<body>")
fout.write("<table>")
fout.close()
def output_html(self,path):
fout = codecs.open(path,'a',encoding='utf-8')
for data in self.datas:
fout.write("<tr>")
fout.write("<td>%s</td>" % data['url'])
fout.write("<td>%s</td>" % data['title'])
fout.write("<td>%s</td>" % data['summary'])
fout.write("</tr>")
self.datas.remove(data)
fout.close()
def ouput_end(self,path):
fout = codecs.open(path,'a',encoding='utf-8')
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close()
html下载代码
import requests
class HtmlDownloader(object):
def download(self,url):
if url is None:
return None
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent': user_agent}
r = requests.get(url, headers=headers)
if r.status_code == 200:
r.encoding = 'utf-8'
return r.text
return None
hTML解析代码
import re
import requests
import urllib.parse
from bs4 import BeautifulSoup
class HtmlParser(object):
def parser(self,page_url,html_cont):
'''
用于解析网页内容,抽取url和数据
:param page_url: 下载页面的url
:param html_cont: 下载的网页内容
:return: 返回url和数据
'''
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont,'html.parser')
new_urls = self._get_new_urls(page_url,soup)
new_data = self._get_new_data(page_url,soup)
return new_urls,new_data
def _get_new_urls(self,page_url,soup):
'''
抽取新的url集合
:param page_url:
:param soup:
:return:
'''
new_urls = set()
links = soup.find_all('a',href=re.compile(r'/item/.*'))
for link in links:
new_url = link['href']
new_full_url = urllib.parse.urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self,page_url,soup):
data = {}
data['url'] = page_url
print(page_url)
title = soup.find('dd','lemmaWgt-lemmaTitle-title').find('h1')
if (title != None):
data['title'] = title.get_text()
else:
data['title'] = '没有标题'
summary = soup.find('div','lemma-summary')
if (summary != None):
data['summary'] = summary.get_text()
else:
data['summary'] = '没有summary'
return data