用Beautiful拿到小说作者和文章(没有加入cookie元素,比较简单的完整代码)
# -*- coding=utf-8 -*-
import urllib2
from bs4 import BeautifulSoup
# 得到网页源代码
def download(url,user_agent = 'wswp',proxy = None,num_retries = 2):
print 'downloading:',url
headers = {'User-agent':user_agent} # 设置代理服务
request = urllib2.Request(url,headers = headers) # 发出请求
opener = urllib2.build_opener() # 创建一个opener
# 异常处理
try:
html = opener.open(request).read() # 拿到网页源代码
except urllib2.URLError as e:
print 'download error:',e.reason
html = None
if num_retries > 0:
if hasattr(e,'code') and 500 <= e.code < 600:
html = download(url,user_agent,proxy,num_retries-1) # 若是对方服务器问题,则重新爬取,最多再爬两次
return html
# 得到小说信息
def crawler_info(seed_url):
html = download(seed_url)
soup = BeautifulSoup(html) # 将拿到的网页源代码转换为BeautifulSoup的格式
title = soup.title # 文章名称
content = soup.find(id='content') # 文章内容
print '题目:',title.text # 以文本格式输出
print '文章:',content.text
# 测试
url = 'http://www.biquge5200.com/52_52542/20380548.html'
crawler_info(url)