python urllib2及beautifulsoup学习

本文介绍使用Python进行网页爬取的方法,包括使用urllib2下载网页、利用pip安装BeautifulSoup4、解析网页内容的基础语法及练习,并提供了一个完整的百度百科爬虫示例。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

1、python urllib2爬虫下载网页的三种方法
#-*-coding:utf-8 -*-
import urllib2
import cookielib

url = "http://www.baidu.com"
print '第一种方法'
response1 = urllib2.urlopen(url)
print response1.getcode()
print len(response1.read())

print "第二种方法"
request = urllib2.Request(url)
request.add_header("use-agent","Mozilla/5.0")
response2 = urllib2.urlopen(request)
print response2.getcode()
print len(response2.read())

print "第三种方法"
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
response3 = urllib2.urlopen(url)
print response3.getcode()
print cj
print response3.read()
2、python pip安装beautifulsoup4命令
windows:进入Python安装目录Scripts文件夹
dir命令查看是否安装pip
输入:pip install beautifulsoup4
dos命令

3、网页解析器 beautifulsoup语法基础

#根据HTML网页字符串创建BeautifulSoup对象
soup = BeautifulSoup(
                    html_doc,                   #HTML文档字符串
                    'html.parser',            #HTML解析器
                    from_encoding='utf8'       #HTML文档的编码
                    )

#方法:find_all(name,attrs,string) 查找所有标签为a的节点
soup.find_all('a')
#查找所有标签为a,链接符合/view/123.htm形式的节点
soup.find_all('a',href='/view/123.htm')
#查找所有标签为div class为abc 文字为python的节点
soup.find_all('div',class_='abc',string='Python')

#得到节点: <a href='1.html'>Python</a>
#获取查找到的节点的标签名称
node.name
#获取查找到的a节点的href属性
node['href']
#获取查找到的a节点的链接文字
node.get_text()

4、网页解析器 beautifulsoup语法练习

#-*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import re
html_doc="""百度新闻页面源码,篇幅原因源码不写进来了"""

soup = BeautifulSoup(html_doc, 'html.parser',from_encoding='utf-8')

print '获取所有的链接'
links = soup.find_all('a')
for link in links:
    #print link.name,link['href'],link.get_text()
    print link.name, link.get('href'), link.get_text()

print '获取http://news.baidu.com的链接'
link_node = soup.find('a',href='http://qijunjie.baijia.baidu.com/article/799732')
print link_node.name, link_node.get('href'), link_node.get_text()

print '正则匹配'
link_node = soup.find('a',href=re.compile(r"qijunjie"))
print link_node.name, link_node.get('href'), link_node.get_text()

print '获取a打开新页面的链接'
links= soup.find_all('a',target='_blank')
for link in links:
    print link.name, link.get_text()

5、网络爬虫示例
   5.1调度器  spider_main.py
#-*- coding:utf-8 -*-
from baike_spider import html_downloader,html_outputer,html_parser,url_manager

class SpiderMain(object):
    def __init__(self):
        self.urls = url_manager.UrlManage()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutputer()

    def craw(self,root_url):
        self.urls.add_new_url(root_url)
        count = 1
        while self.urls.has_new_url():
            try:
                new_url = self.urls.get_new_url()
                print 'craw %d : %s'%(count,new_url)
                html_cont = self.downloader.download(new_url)
                new_urls,new_data = self.parser.parse(new_url,html_cont)
                self.urls.add_new_urls(new_urls)
                self.outputer.collect_data(new_data)
                if count == 1000:
                    break
                count = count + 1
            except:
                print 'craw failed'
        self.outputer.output_html()

if __name__=="__main__":
    root_url = "http://baike.baidu.com/item/乌金木?sefr=cr"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)
   5.2 url管理器  url_manager.py
#-*- coding:utf-8 -*-
class UrlManage(object):
    def __init__(self):
        self.new_urls = set()
        self.old_urls = set()

    def add_new_url(self, url):
        if url is None:
            return
        if url not in self.new_urls and url not in self.old_urls:
            self.new_urls.add(url)

    def has_new_url(self):
        return len(self.new_urls) != 0

    def get_new_url(self):
        new_url = self.new_urls.pop()
        self.old_urls.add(new_url)
        return new_url

    def add_new_urls(self, urls):
        if urls is None or len(urls) == 0:
            return
        for url in urls:
            self.add_new_url(url)
   5.3 下载器  html_downloader.py
#-*- coding:utf-8 -*-
import urllib2

class HtmlDownloader(object):
    def download(self, url):
        if url is None:
            return None

        response = urllib2.urlopen(url)
        if response.getcode() != 200:
            return None
        return response.read()
   5.4 解析器  html_parser.py
#-*- coding:utf-8 -*-
import re
import urlparse

from bs4 import BeautifulSoup

class HtmlParser(object):
    def parse(self, page_url, html_cont):
        if page_url is None or html_cont is None:
            return

        soup = BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')
        new_urls = self._get_new_urls(page_url,soup)
        new_data = self._get_new_data(page_url,soup)
        return new_urls,new_data

    def _get_new_urls(self, page_url, soup):
        new_urls = set()
        links = soup.find_all('a',href=re.compile(r"/view/\d+\.htm"))
        for link in links:
            new_url = link['href']
            new_full_url = urlparse.urljoin(page_url,new_url)
            new_urls.add(new_full_url)
        return new_urls

    def _get_new_data(self, page_url, soup):
        res_data = {}

        res_data['url'] = page_url
        #<dd class="lemmaWgt-lemmaTitle-title"><h1>乌金木</h1>
        title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1")
        res_data['title'] = title_node.get_text()
        #<div class="lemma-summary" label-module="lemmaSummary">
        summary_node = soup.find('div',class_="lemma-summary")
        res_data['summary'] = summary_node.get_text()

        return res_data
   5.5 输出器  html_outputer.py
#-*- coding:utf-8 -*-
class HtmlOutputer(object):
    def __init__(self):
        self.datas = []

    def collect_data(self, data):
        if data is None:
            return
        self.datas.append(data)

    def output_html(self):
        fout = open('out.html','w')
        fout.write("<html>")
        fout.write("<head>")
        fout.write("<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />")
        fout.write("</head>")
        fout.write("<body>")
        fout.write("<table>")
        for data in self.datas:
            fout.write("<tr>")
            fout.write("<td>%s</td>" % data['url'])
            fout.write("<td>%s</td>" % data['title'].encode('utf-8'))
            fout.write("<td>%s</td>" % data['summary'].encode('utf-8'))
            fout.write("</tr>")

        fout.write("</table>")
        fout.write("</body>")
        fout.write("</html>")

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值