爬虫初阶(四)—— 贴吧单线程爬虫案例

本文介绍了一个贴吧爬虫的实现过程,包括如何爬取指定贴吧的所有列表页面和详情页面的图片及视频,通过Python的requests库和lxml库解析网页,提取数据并保存。
  • 在学习了之前的内容后, 可以尝试做一个贴吧爬虫的案例, 要求为:
    爬取指定贴吧中所有列表页面及详情页面的图片及视频,以下为详细代码。
import requests
from lxml import etree
import json
import re
from urllib import parse

class TiebaSpider:
    def __init__(self, tieba_name):
        self.tieba_name = tieba_name
        self.start_url = "https://tieba.baidu.com/f?kw={}".format(tieba_name)
        self.headers = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1"}
        self.page_num = 0
        self.total_page = 1

    def parse_url(self, url):
        """发送请求,获取响应"""
        response = requests.get(url, headers=self.headers)
        return response.content.decode()

    def get_content_list(self, html_str):
        """获取列表页内容"""
        html = etree.HTML(html_str)  # element对象
        div_list = html.xpath("//li[@class='tl_shadow tl_shadow_new ']")
        content_list = []
        for div in div_list:
            item = {}
            # title
            a = div.xpath("./a/div[contains(@class, 'ti_title')]/span/text()")
            if len(a) == 1:
                item['title'] = a[0]
            elif len(a) == 2:
                item['title'] = "[" + a[0] + "]" + a[1]
            else:
                item['title'] = None
            # href
            item['href'] = div.xpath("./a/@href")[0] if len("./a/@href")>0 else None
            content_list.append(item)
        # 获取总页码数
        p = re.compile(r""""total_page":(\d+)""")
        total_page = p.findall(str(html_str))

        return content_list, int(total_page[0])

    def get_image_list(self, detail_url):
        """获取帖子中所有图片、视频"""
        img_list = []
        # 3.2 请求列表页的url地址,获取详情页的第一页
        detail_html_str = self.parse_url("https://tieba.baidu.com" + detail_url)
        detail_html = etree.HTML(detail_html_str)
        # 获取楼主图片
        if len(detail_html.xpath("//div[@class='pb_img_item']/@data-url"))>0:
            subject_img_url = detail_html.xpath("//div[@class='pb_img_item']/@data-url")
            for subject_img in subject_img_url:
                ret0 = re.findall("src=(.+)", subject_img)
                img_list.append(parse.unquote(ret0[0]))
        # 获取楼主视频
        if len(detail_html.xpath("//div[@lz='1']/a/@data-vhsrc")) > 0:
            subject_mp4_url = detail_html.xpath("//div[@lz='1']/a/@data-vhsrc")
            img_list.append(subject_mp4_url)
        # 获取评论图片
        if len(detail_html.xpath("//div[@data-class='BDE_Image']/@data-url"))>0:
            img_original_list = detail_html.xpath("//div[@data-class='BDE_Image']/@data-url")
            for item in img_original_list:
                ret = re.findall("src=(.+)", item)
                img_list.append(parse.unquote(ret[0]))
        return img_list

    def save_content_list(self, content_list):
        """保存数据"""
        file_path = self.tieba_name + ".txt"
        with open(file_path, "a") as f:
            for content in content_list:
                f.write(json.dumps(content, ensure_ascii=False, indent=2))
                f.write("\n")

    def save_img(self, total_image_list):
        for image_list in total_image_list:
            for img_url in image_list:
                try:
                    img_file_name = re.findall(r"(\w+\.jpg)", img_url)
                    img_html = requests.get(img_url, headers=self.headers)
                    img = img_html.content
                    with open("./img/" + img_file_name[0], 'wb') as f:
                        f.write(img)
                except Exception:
                    img_file_name = re.findall(r"(\w+\.mp4)", img_url[0])
                    img_html = requests.get(img_url[0], headers=self.headers)
                    img = img_html.content
                    with open("./img/" + img_file_name[0], 'wb') as f:
                        f.write(img)

    def run(self):
        # 1.start_url
        print(self.start_url)
        next_url = self.start_url
        while self.page_num < self.total_page*30:
            # 2.发送请求,获取响应
            html_str = self.parse_url(next_url)
            # 3.提取数据
            content_list, self.total_page = self.get_content_list(html_str)
            print(content_list)
              # 3.1 提取列表页的url地址和数据
            total_image_list = []
            for content in content_list:
                image_list = self.get_image_list(content["href"])
                total_image_list.append(image_list)
            print(total_image_list)
            # 4.保存
            # self.save_content_list(content_list)
              # 4.1 保存图片
            self.save_img(total_image_list)
            # 5.构造下一页的url地址,循环2-5步
            self.page_num += 30
            next_url = "https://tieba.baidu.com/f?kw={}&pn={}".format(self.tieba_name, self.page_num)
            print(next_url)
            if self.page_num > self.total_page*30:
                break


if __name__ == '__main__':
    spider = TiebaSpider('中国好学姐')
    spider.run()
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值