用Python制作简单的爬虫---爬虫基本思想

以http://rmfygg.court.gov.cn/psca/lgnot/bulletin/page/0_0.html
这个网站为例,我们爬取的深度只有一层,只是通过这个例子简单阐述爬虫的基本思想:
</p><p>先上图贴代码:</p><pre name="code" class="python"># -*- coding: utf-8 -*-

from  bs4 import BeautifulSoup
import requests
import re
import Queue
import pdb
import time 
import threading
import json
import codecs
"""
    isCourtPub = Field()       #是否是法院公告
    pubType = Field()          #公告类型
    pubPerson = Field()        #公告人
    client = Field()           #当事人
    pubDate = Field()          #发布时间
    pdfLink = Field()          #PDF下载网址
    detailLink= Field()        #公告链接地址
    collectTime = Field()      #采集时间
"""

url_queue = Queue.Queue()
url_set = set()
match_rule_suffix ='\d+_\d+.html'
start_urls = [
                 "http://rmfygg.court.gov.cn/psca/lgnot/bulletin/page/0_0.html",
                 "http://rmfygg.court.gov.cn/psca/lgnot/bulletin/page/0_1.html"
             ]
base_url = "http://rmfygg.court.gov.cn"

mutex = threading.Lock()

class CrawlSpider(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
    
    def run(self):
        while(url_queue.qsize()>0):
            if mutex.acquire(10):
                current_url = url_queue.get()                   #拿出队列中第一个的url
            mutex.release()
            follow_up_url_list = self.parse_html(current_url)               
            for url in follow_up_url_list:                      #将url放到队列和集合中
                if url not in url_set:      
                    url_set.add(url)
                    url_queue.put(url)
            
    def follow_up_url(self,url,css_soup):                       #寻找到跟进的url
         follow_up_url_list = []                                   
         extract_urls = css_soup.find_all('a')
         rule_match = '.+' + match_rule_suffix
         rule = re.compile(rule_match) 
         for i in range(len(extract_urls)):
             match_url = rule.match(extract_urls[i]['href'])
             if match_url :
                 specific_url = base_url + match_url.group()
                 follow_up_url_list.append(specific_url)
         return follow_up_url_list

    def extract_data(self,url,css_soup):                       #提取网页所需数据
        item = {}
        type_tag = css_soup.find_all('ul')
        if url.split('/')[-1][0] == '0':
            announcement_type = type_tag[0].find_all('li')[0].string
        if url.split('/')[-1][0] == '1':
            announcement_type = type_tag[0].find_all('li')[1].string
        contents = css_soup.find_all('tr')
        for i in range(len(contents[1:])):
            item["isCourtPub"] = announcement_type
            item["pubType"] =  contents[i+1].find_all('td')[0].string
            item["pubPerson"] = contents[i+1].find_all('td')[1].string
            item["client"]  = contents[i+1].find_all('td')[2].string
            item["pubDate"]  = contents[i+1].find_all('td')[3].string
            item["pdfLink"]  =base_url +  contents[i+1].find_all('td')[4].a['href']
            item["detailLink"]  = base_url + contents[i+1].find_all('td')[2].a['href']
            item["collectTime"]  = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
            self.save(item)
    
    def parse_html(self,url):                                
        css_soup = BeautifulSoup(requests.get(url).text) 
        follow_up_url_list = self.follow_up_url(url,css_soup)
        self.extract_data(url,css_soup)                         
        return follow_up_url_list
    
    def save(self,item):
        line = json.dumps(dict(item), ensure_ascii=False) + "\n"
        file = codecs.open('courtannounce.json', 'a+', encoding='utf-8')
        file.write(line)

def main():
    #将初始的链接放到一个队列中
    for url in start_urls:
        url_set.add(url)
        url_queue.put(url)
    for i in range(10):
        thread = CrawlSpider()
        thread.start()
        time.sleep(1)

if __name__ == "__main__":
    main()

上面是爬取公告的基本思想,先将初始链接地址放到一个队列中(用来对请求调度)和一个set集合中(用于去重)

爬取过程中不仅对请求的数据页面做解析(extract_data),提取数据,同时去匹配我们需要的跟进的链接,然后将

跟进的链接放到queue和set集合中

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值