import re
import urlparse
import urllib2
import time
from datetime import datetime
import robotparser
import Queue
import csv
import lxml
def link_crawler(seed_url, link_regex=None, delay=5, max_depth=5,
max_urls=-1, headers=None, user_agent='wswp', proxy=None,
num_retries=1, scrape_callback=None):
"""
Crawl from the given seed URL following links matched by link_regex
:param seed_url: 种子url
:param link_regex: 匹配内容的正则表达式
:param delay: 延迟
:param max_depth: 最大深度
:param max_urls: 最大网页数量
:param headers: 请求头
:param user_agent: 用户代理
:param proxy: 代理人
:param num_retries: 发生服务器错误重试次数
:param scrape_callback: 回调函数
"""
crawl_queue = [seed_url]
seen = {seed_url: 0}
num_urls = 0
rp = get_robots(seed_url)
throttle = Throttle(delay)
headers = headers or {}
if user_agent:
headers['User-agent'] = user_agent
while crawl_queue:
url = crawl_queue.pop()
depth = seen[url]
if rp.can_fetch(user_agent, url):
throttle.wait(url)
html = download(url, headers, proxy=proxy, num_retries=num_retries)
links = []
if scrape_callback:
links.extend(scrape_callback(url, html) or [])
if depth != max_depth:
if link_regex:
links.extend(link for link in get_links(html) if re.search(link_regex, link))
for link in links:
link = normalize(seed_url, link)
if link not in seen:
seen[link] = depth + 1
if same_domain(seed_url, link):
crawl_queue.append(link)
num_urls += 1
if num_urls == max_urls:
break
else:
print 'Blocked by robots.txt:', url
class Throttle:
"""Throttle downloading by sleeping between requests to same domain
"""
def __init__(self, delay):
self.delay = delay
self.domains = {}
def wait(self, url):
"""Delay if have accessed this domain recently
"""
domain = urlparse.urlsplit(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
if sleep_secs > 0:
time.sleep(sleep_secs)
self.domains[domain] = datetime.now()
def download(url, headers, proxy, num_retries, data=None):
print 'Downloading:', url
request = urllib2.Request(url, data, headers)
opener = urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except urllib2.URLError as e:
print 'Download error:', e.reason
html = ''
if hasattr(e, 'code'):
code = e.code
if num_retries > 0 and 500 <= code < 600:
html = download(url, headers, proxy, num_retries - 1, data)
else:
code = None
return html
def normalize(seed_url, link):
"""Normalize this URL by removing hash and adding domain
"""
link, _ = urlparse.urldefrag(link)
return urlparse.urljoin(seed_url, link)
def same_domain(url1, url2):
"""Return True if both URL's belong to same domain
"""
return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc
def get_robots(url):
"""Initialize robots parser for this domain
"""
rp = robotparser.RobotFileParser()
rp.set_url(urlparse.urljoin(url, '/robots.txt'))
rp.read()
return rp
def get_links(html):
"""Return a list of links from html
"""
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
return webpage_regex.findall(html)
if __name__ == '__main__':
link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, max_depth=1,
user_agent='GoodCrawler')
from link_crawer import Throttle
import random
import urllib2
import urlparse
class Downloader:
def __init__(self, delay=5,
user_agent='wsgp', proxies=None,
num_retries=1, cache=None):
self.throttle = Throttle(delay)
self.user_agent = user_agent
self.proxies = proxies
self.num_retries = num_retries
self.cache = cache
def __call__(self, url):
result = None
if self.cache:
try:
result = self.cache(url)
except KeyError:
pass
else:
if self.num_retries > 0 and \
500 <= result['code'] < 600:
result = None
if result is None:
self.throttle.wait(url)
proxy = random.choice(self.proxies) if self.proxies else None
headers = {'User-agent': self.user_agent}
result = self.download(url, headers, proxy, self.num_retries)
if self.cache:
self.cache[url] = result
return result['html']
def download(self, url, headers, proxy, num_retries, data=None):
print 'Downloading:', url
request = urllib2.Request(url, data, headers)
opener = urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except urllib2.URLError as e:
print 'Download error:', e.reason
html = ''
if hasattr(e, 'code'):
code = e.code
if num_retries > 0 and 500 <= code < 600:
html = self.download(url, headers, proxy, num_retries - 1, data)
else:
code = None
return {'html': html, 'code': code}