爬虫系列18.下载框架

# -*- coding:utf8 -*-

import re
import urlparse
import urllib2
import time
from datetime import datetime
import robotparser
import Queue
import csv
import lxml


def link_crawler(seed_url, link_regex=None, delay=5, max_depth=5,
                 max_urls=-1, headers=None, user_agent='wswp', proxy=None,
                 num_retries=1, scrape_callback=None):
    """
    Crawl from the given seed URL following links matched by link_regex
    :param seed_url: 种子url
    :param link_regex: 匹配内容的正则表达式
    :param delay: 延迟
    :param max_depth: 最大深度
    :param max_urls: 最大网页数量
    :param headers: 请求头
    :param user_agent: 用户代理
    :param proxy: 代理人
    :param num_retries: 发生服务器错误重试次数
    :param scrape_callback: 回调函数
    """


crawl_queue = [seed_url]
# the URL's that have been seen and at what depth
# 爬取过的网页和其深度,就是爬几个网页
# seed : 0
# 通过seed爬取出来的网页:1
# 通过1爬取出来的网页设置为2
# 。。。
seen = {seed_url: 0}
# track how many URL's have been downloaded
# 记录有多少个网页被爬取,限制爬去数量
num_urls = 0
# 检测是否能够爬取
rp = get_robots(seed_url)
# 下载限速,记录两次爬取之间的时间间隔
throttle = Throttle(delay)
headers = headers or {}
# 如果设置了用户代理
if user_agent:
    headers['User-agent'] = user_agent
# 待爬取队列还有值
while crawl_queue:
    url = crawl_queue.pop()
# 记录深度
depth = seen[url]
# check url passes robots.txt restrictions
# 检测用户代理是否可以使用,没有被拉入黑名单
if rp.can_fetch(user_agent, url):
# 检测延迟时间
throttle.wait(url)
# 正式下载网页
html = download(url, headers, proxy=proxy, num_retries=num_retries)
links = []
# 设置回调函数了就执行回调函数
if scrape_callback:
    links.extend(scrape_callback(url, html) or [])
# 如果没有达到最大深度,就可以继续爬取
if depth != max_depth:
# can still crawl further
if link_regex:
# 链接匹配正则表达式,
# filter for links matching our regular expression
links.extend(link for link in get_links(html) if re.search(link_regex, link))
# print links
for link in links:
# 去除碎片
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
# print link
seen[link] = depth + 1
# check link is within same domain
# 检查两个网页的域名是否相同,防止爬到其他网页去了
if same_domain(seed_url, link):
# success! add this new link to queue
crawl_queue.append(link)
# check whether have reached downloaded maximum
num_urls += 1
# 检查是否达到了最大下载数量
if num_urls == max_urls:
    break
else:
    print 'Blocked by robots.txt:', url


class Throttle:
    """Throttle downloading by sleeping between requests to same domain
    """


def __init__(self, delay):


# amount of delay between downloads for each domain
self.delay = delay
# timestamp of when a domain was last accessed
self.domains = {}


def wait(self, url):
    """Delay if have accessed this domain recently
    """


# 主要域名www.google.com.hk:8080
domain = urlparse.urlsplit(url).netloc
# ParseResult(scheme='https', netloc='www.google.com.hk:8080',
# path='/home/search', params='12432', query='newwi.1.9.serpuc', fragment='1234')
last_accessed = self.domains.get(domain)
# 如果设置了延迟大于0且不是第一次爬取这个域名
if self.delay > 0 and last_accessed is not None:
# 睡眠时间等于延迟的时间-(现在的时间-上次登入的时间)
sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
# 如果延迟时间大于0,就等待几秒
if sleep_secs > 0:
    time.sleep(sleep_secs)
self.domains[domain] = datetime.now()


def download(url, headers, proxy, num_retries, data=None):
    print 'Downloading:', url


request = urllib2.Request(url, data, headers)
opener = urllib2.build_opener()
if proxy:
# 设置协议,传输协议:https:http
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
# 捕捉异常
try:
    response = opener.open(request)
html = response.read()
code = response.code
except urllib2.URLError as e:
print 'Download error:', e.reason
html = ''
if hasattr(e, 'code'):
    code = e.code
# 发生服务器错误时的重试次数
if num_retries > 0 and 500 <= code < 600:
# retry 5XX HTTP errors
html = download(url, headers, proxy, num_retries - 1, data)
else:
code = None
return html


def normalize(seed_url, link):
    """Normalize this URL by removing hash and adding domain
    """


# 去除碎片,如果没有碎片第二个参数为空, _占位符,舍弃
# remove hash to avoid duplicates
link, _ = urlparse.urldefrag(link)
# 获取一个干净的网页链接
return urlparse.urljoin(seed_url, link)


def same_domain(url1, url2):
    """Return True if both URL's belong to same domain
    """


return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc


def get_robots(url):
    """Initialize robots parser for this domain
    """


rp = robotparser.RobotFileParser()
rp.set_url(urlparse.urljoin(url, '/robots.txt'))
rp.read()
return rp


def get_links(html):
    """Return a list of links from html
    """


# a regular expression to extract all links from the webpage
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
# list of all links from the webpage
return webpage_regex.findall(html)
# class ScrapeCallback:
# def __init__(self):
# self.writer = csv.writer(open('countries.csv','w'))
# # 表头
# self.fields = ('area', 'ranked', 'university')
# self.writer.writerow(self.fields)
#
#
# def __call__(self, url, html):
# if re.search('/view/',url):
# tree = lxml.html.fromstring(html)
# row = []
# for field in self.fields:
# row.append(tree.cssselect('table > tr#places_{}_row > td.w2p_fw'.format(field))[0].text_content())
# self.writer.writerow(row)
if __name__ == '__main__':
# link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, user_agent='BadCrawler')
link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, max_depth=1,
             user_agent='GoodCrawler')
# -*- coding: utf-8 -*-
# 为链接爬虫添加缓存支持
# 在url下载之前检查。
# 将限速功能移至函数内部
# 只有在真正下载时才回触发,在加载缓存时不会触发
# 重构下载函数类
# 构建下载类
from link_crawer import Throttle
import random
import urllib2
import urlparse


class Downloader:
    def __init__(self, delay=5,
                 user_agent='wsgp', proxies=None,
                 num_retries=1, cache=None):

    self.throttle = Throttle(delay)


self.user_agent = user_agent
self.proxies = proxies
self.num_retries = num_retries
self.cache = cache


def __call__(self, url):
    result = None


# 首先检查缓存是否已经定义
if self.cache:
    try:
# 检查之前是否已经缓存了该url。
result = self.cache(url)
except KeyError:
# 网址在缓存中是不可用的
pass
else:
# 如果缓存中储存了该url,就检查是否发生了服务器错误,
if self.num_retries > 0 and \
                        500 <= result['code'] < 600:
# 服务器错误,忽略缓存结果,重新下载
result = None
# 如果缓存中没有该url或者下载url时发生了服务器错误,且没有超过重试次数
if result is None:
# 网页没有加载到缓存中去,仍然需要下载
self.throttle.wait(url)
# 设置延时
proxy = random.choice(self.proxies) if self.proxies else None
# 从代理库中随机选择一个代理
headers = {'User-agent': self.user_agent}
# 请求头
result = self.download(url, headers, proxy, self.num_retries)
# 如果外界传入了cache参数,就保存到那里去
if self.cache:
# 保存结果到缓存中
self.cache[url] = result
return result['html']


def download(self, url, headers, proxy, num_retries, data=None):
    print 'Downloading:', url


request = urllib2.Request(url, data, headers)
# __init__(self, url, data=None, headers={},
# origin_req_host=None, unverifiable=False):
# 原始请求主机,无法验证的
opener = urllib2.build_opener()
# Create an opener object from a list of handlers.
# The opener will use several default handlers, including support
# for HTTP, FTP and when applicable, HTTPS.
# If any of the handlers passed as arguments are subclasses of the
# default handlers, the default handlers will not be used.
if proxy:
# 设置协议,传输协议:https:http
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
# 捕捉异常
try:
    response = opener.open(request)
html = response.read()
code = response.code
except urllib2.URLError as e:
print 'Download error:', e.reason
html = ''
if hasattr(e, 'code'):
    code = e.code
# 发生服务器错误时的重试次数
if num_retries > 0 and 500 <= code < 600:
# retry 5XX HTTP errors
html = self.download(url, headers, proxy, num_retries - 1, data)
else:
code = None
return {'html': html, 'code': code}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

豆豆orz

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值