用python爬取小说

从天籁上爬的,本来应该在加个多文本合成的.因为懒就没加.目前只是把每一章都保存为单独文本.想爬别的书就自己改链接.只要是在天籁里面的.理论上都可以爬.

运行删除并创建同书名的空文件夹,支持从哪里开始爬,自己改下标

import threading
import time
import os
import re
import requests
from bs4 import BeautifulSoup
import sys
import importlib
importlib.reload(sys)


class downLoader():
    def __init__(self):
        self.chapterList = []
        self.chapterName = []
        self.chapterNum = 0
        self.bookName = ''
        self.session = requests.session()

    def deleteFile(self):
        path = os.path.join(os.getcwd(), self.bookName)
        if os.path.exists(path):
            files = os.listdir(path)
            for pFile in files:
                try:
                    os.unlink(os.path.join(path, pFile))
                except IOError:
                    print('系统错误,无法删除文件-' + pFile + ',可能被占用')

            try:
                os.rmdir(path)
            except IOError:
                print('系统错误,无法删除文件夹-' + self.bookName + ',里面有文件被占用')
                time.sleep(1)
                self.deleteFile()
                return

            print('删除' + self.bookName + '文件夹完成')

        try:
            os.mkdir(self.bookName)
        except IOError:
            time.sleep(1)
            self.deleteFile()
            return

        print('创建' + self.bookName + '成功')

    def getChapter(self):
        print('获取章节')
        html = self.getHtml(
            'https://www.23txt.com/files/article/html/22/22295/')
        bf = BeautifulSoup(html, "html.parser")
        divs = bf.find_all('div', class_='box_con')
        self.bookName = divs[0].find('div', id='maininfo')
        self.bookName = self.bookName.find('div', id='info')
        self.bookName = self.bookName.find('h1').text
        print(self.bookName)
        self.deleteFile()

        aList = divs[1].find_all('a')
        for a in aList:
            self.chapterName.append(a.string)
            self.chapterList.append('https://www.23txt.com' + a.get('href'))

        self.chapterNum = len(aList)

    def getHtml(self, url):
        req = self.session.get(url=url)
        req.raise_for_status()
        req.encoding = req.apparent_encoding
        return req.text

    def begin(self, begin = 0):
        print('开始获取全部章节内容')
        for index in range(begin, self.chapterNum):
            thread = threading.Thread(target=self.getContent, args=(index,))
            thread.start()
            thread.join()

    def getContent(self, index):
        html = self.getHtml(self.chapterList[index])
        bf = BeautifulSoup(html, "html.parser")
        div = bf.find('div', id='wrapper')
        div = div.find('div', class_='content_read')
        div = div.find('div', class_='box_con')
        div = div.find('div', id='content')
        content = div.text
        content = re.sub(r'\s+', '\n', content)
        content = self.chapterName[index] + '\n' + content
        self.saveFile(content, self.chapterName[index])

    def saveFile(self, content, chapterName):
        print('写入章节:' + chapterName)
        chapterName = self.checkChapterName(chapterName)
        path = os.path.join(os.path.join(
            os.getcwd(), self.bookName), chapterName)
        f = open(path + '.txt', 'w+', encoding='utf-8')
        f.write(content)
        f.close()

    def checkChapterName(self, chapterName):
        # '\t' 是转义字符:空格
        # 在windows系统当中的文件命名,文件名称中不能包含 \ / : * ? " < > | 一共9个特殊字符
        strs = ['\t', '\\', '/', ':', '*', '?', '"', '<', '>', '|']
        for s in strs:
            chapterName = chapterName.replace(s, '')
        return chapterName

def main():
    dlObj = downLoader()
    dlObj.getChapter()
    dlObj.begin(0) #数字为从第几章开始

    sys.exit()


if __name__ == '__main__':
    main()

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值