实现多线程爬虫

1.单线程爬取糗百

  • 知道共有多少页 获取url列表
import requests
from lxml import etree

class QiubaiSpider:
    def __init__(self):
        self.url_temp = "https://www.qiushibaike.com/8hr/page/{}/"
        self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}

    def get_url_list(self):
        url_list = [self.url_temp.format(i+1) for i in range(12)]
        return url_list

    def parse_url(self, url):
        response = requests.get(url, self.headers)
        html_str = response.content
        return html_str

    def get_content_list(self, html_str):
        html = etree.HTML(html_str)
        li_list = html.xpath("//li[contains(@class, 'item typs_')]")
        # print(li_list)
        content_list = list()
        for li in li_list:
            item = dict()
            item["title"] = li.xpath(".//a[@class='recmd-content']/text()")[0].strip() if len(li.xpath(".//a[@class='recmd-content']/text()")) else None
            item["href"] = "https://www.qiushibaike.com" + li.xpath(".//a[@class='recmd-content']/@href")[0] if len(li.xpath(".//a[@class='recmd-content']/@href")) else None
            content_list.append(item)
        return content_list

    def save_content_list(self, content_list):
        for content in content_list:
            print(content)

    def run(self):
        # 获取url列表       已经知道共有多少页
        url_list = self.get_url_list()
        for url in url_list:
            # 发送请求 获取响应
            html_str = self.parse_url(url)
            # 提取内容
            content_list = self.get_content_list(html_str)
            # 保存
            self.save_content_list(content_list)


if __name__ == "__main__":
    qiubai = QiubaiSpider()
    qiubai.run()

2.多线程思路解析

  • 把爬虫的每个步骤封装成函数,分别用线程去执行
  • 不同函数通过列队相互通信,函数间解耦
    在这里插入图片描述

3.糗百多线程方式实现

  • 用到了守护线程
  • 竟然可以 q.join() 这里q是列队 很震惊
  • put是队列+1,get和task_done一起使用时队列才会-1
"""有个问题我不能解决 有时候打印结果会少一页"""
import requests
from lxml import etree
import threading
import queue
import time
import os


class QiubaiSpider:
    def __init__(self):
        self.url_temp = "https://www.qiushibaike.com/8hr/page/{}/"
        self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}
        self.url_queue = queue.Queue()
        self.html_str_queue = queue.Queue()
        self.content_list_queue = queue.Queue()

    def get_url_list(self):
        # url_list = [self.url_temp.format(i+1) for i in range(12)]
        # return url_list
        for i in range(1, 14):
            url = self.url_temp.format(i)
            self.url_queue.put(url)


    def parse_url(self):
        # response = requests.get(url, self.headers)
        # html_str = response.content
        # return html_str
        while True:
            # get方法和task_done搭配使用
            # 在put是队列+1,get和task_done一起使用时队列才会-1
            url = self.url_queue.get()
            print(url)
            response = requests.get(url, headers=self.headers)
            html_str = response.content
            self.html_str_queue.put(html_str)
            # task_done应放在put之后 防止空列队出现可能引起的程序结束 导致html_str 没放进去
            self.url_queue.task_done()

    def get_content_list(self):
        # html = etree.HTML(html_str)
        # li_list = html.xpath("//li[contains(@class, 'item typs_')]")
        # # print(li_list)
        # content_list = list()
        # for li in li_list:
        #     item = dict()
        #     item["title"] = li.xpath(".//a[@class='recmd-content']/text()")[0].strip() if len(li.xpath(".//a[@class='recmd-content']/text()")) else None
        #     item["href"] = "https://www.qiushibaike.com" + li.xpath(".//a[@class='recmd-content']/@href")[0] if len(li.xpath(".//a[@class='recmd-content']/@href")) else None
        #     content_list.append(item)
        # return content_list
        while True:
            html_str = self.html_str_queue.get()
            html = etree.HTML(html_str)
            li_list = html.xpath("//li[contains(@class, 'item typs_')]")
            # print(li_list)
            content_list = list()
            for li in li_list:
                item = dict()
                item["title"] = li.xpath(".//a[@class='recmd-content']/text()")[0].strip() if len(li.xpath(".//a[@class='recmd-content']/text()")) else None
                item["href"] = "https://www.qiushibaike.com" + li.xpath(".//a[@class='recmd-content']/@href")[0] if len(li.xpath(".//a[@class='recmd-content']/@href")) else None
                content_list.append(item)
            self.content_list_queue.put(content_list)
            self.html_str_queue.task_done()


    def save_content_list(self):
        # for content in content_list:
        #     print(content)
        while True:
            content_list = self.content_list_queue.get()
            for content in content_list:
                print(content)
            self.content_list_queue.task_done()


    def run(self):
        print(os.getppid())
        thread_list = []
        # 获取url列表       已经知道共有多少页
        # threading.Thread不需要传参数,参数都是从队列里面取得
        t_url = threading.Thread(target=self.get_url_list)
        thread_list.append(t_url)
        # for url in url_list:
        # 发送请求 获取响应
        for i in range(10): # 添加10个线程
            t_parse =  threading.Thread(target=self.parse_url)
            thread_list.append(t_parse)
        # 提取数据
        t_content = threading.Thread(target=self.get_content_list)
        thread_list.append(t_content)
        # 保存
        t_save = threading.Thread(target=self.save_content_list)
        thread_list.append(t_save)

        for t in thread_list:
            t.setDaemon(True)  # 把子线程设置为守护线程,该线程不重要,主线程结束,子线程结束(子线程是while true不会自己结束)
            t.start()

        for q in [self.url_queue, self.html_str_queue, self.content_list_queue]:
            q.join()    # 让主线程阻塞 等待所有队列计数为0后 再继续执行



if __name__ == "__main__":
    print(os.getppid())
    qiubai = QiubaiSpider()
    start_time = time.time()
    qiubai.run()
    print("total time :", time.time()-start_time)

4.糗百用多进程实现

  • 用到JoinableQueue模块 该队列相比普通的Queue的区别在于该对列额外增加的了join函数
  • 打印不出来 不知道什么问题
import requests
from lxml import etree
import multiprocessing
from multiprocessing import JoinableQueue
import time


class QiubaiSpider:
    def __init__(self):
        self.url_temp = "https://www.qiushibaike.com/8hr/page/{}/"
        self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}
        self.url_queue = JoinableQueue()
        self.html_str_queue = JoinableQueue()
        self.content_list_queue = JoinableQueue()

    def get_url_list(self):
        for i in range(1, 14):
            url = self.url_temp.format(i)
            self.url_queue.put(url)

    def parse_url(self):
        while True:
            url = self.url_queue.get()
            response = requests.get(url, headers=self.headers)
            html_str = response.content
            self.html_str_queue.put(html_str)
            self.url_queue.task_done()

    def get_content_list(self):
        while True:
            html_str = self.html_str_queue.get()
            html = etree.HTML(html_str)
            li_list = html.xpath("//li[contains(@class, 'item typs_')]")
            # print(li_list)
            content_list = list()
            for li in li_list:
                item = dict()
                item["title"] = li.xpath(".//a[@class='recmd-content']/text()")[0].strip() if len(li.xpath(".//a[@class='recmd-content']/text()")) else None
                item["href"] = "https://www.qiushibaike.com" + li.xpath(".//a[@class='recmd-content']/@href")[0] if len(li.xpath(".//a[@class='recmd-content']/@href")) else None
                content_list.append(item)
            self.content_list_queue.put(content_list)
            self.html_str_queue.task_done()

    def save_content_list(self):
        while True:
            content_list = self.content_list_queue.get()
            for content in content_list:
                print(content)
            self.content_list_queue.task_done()

    def run(self):
        process_list = []
        # 获取url
        t_url = multiprocessing.Process(target=self.get_url_list)
        process_list.append(t_url)
        # 发送请求 获取响应
        t_parse = multiprocessing.Process(target=self.parse_url)
        process_list.append(t_parse)
        # 提取内容
        t_content = multiprocessing.Process(target=self.get_content_list)
        process_list.append(t_content)
        # 保存
        t_save = multiprocessing.Process(target=self.save_content_list)
        process_list.append(t_save)

        for t in process_list:
            t.daemon = True
            t.start()

        for q in [self.url_queue, self.html_str_queue, self.content_list_queue]:
            q.join()    # 让主线程阻塞 等待所有队列计数为0后 再继续执行


if __name__ == "__main__":
    qiubai = QiubaiSpider()
    qiubai.run()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值