作业7 爬取新笔趣阁分别使用单线程和多线程并保存文件

这篇博客介绍了如何爬取新笔趣阁的小说,包括从首页获取小说列表,进入小说详情页获取章节列表,并实现小说内容的下载。分别使用单线程和多线程两种方式进行,详细讲述了版本1.0和1.2的实现过程。

1,笔趣阁小说下载
url:http://www.xbiquge.la/xuanhuanxiaoshuo/
思路:进入首页,获取小说列表,进入小说,获取章节列表,在章节页面下载内容

	字段:
		小说名称
		章节名称
		章节内容
	保存:
		1.一本小说一个txt文件
		2.每本小说都是全的。
	进阶:
		1.用多线程---保证章节目录是有序的。

版本1.0

import requests
from lxml import etree
import os
def get_proxies():
    try:
        response = requests.get('http://localhost:5000/get')
        proxy = response.text
        proxies = {
            'http': 'http://' + proxy
        }
        return proxies
    except Exception:
        return None
def get_xpath_by_requests(url,proxies):
    '''

    :param url:
    :param proxies: 代理字典
    :return:
    '''
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
            'Cookie': '_abcde_qweasd=0; _abcde_qweasd=0; bdshare_firstime=1577178973028; Hm_lvt_169609146ffe5972484b0957bd1b46d6=1577178973,1577186563,1577186739,1577235413; BAIDU_SSP_lcr=https://www.baidu.com/link?url=AvLJGcMiHKBXi90P2T0xOluezhPz2PeeTLAbP75dmma&wd=&eqid=e131d391001338d8000000025e02b3d2; Hm_lpvt_169609146ffe5972484b0957bd1b46d6=1577235422',
            'Referer': 'http://www.xbiquge.la/'
        }
        response = requests.get(url, headers=headers, proxies=proxies)
        return etree.HTML(response.content.decode('utf-8'))
    except Exception:
        new_proxies = get_proxies()
        print('更换{}代理ip!'.format(new_proxies))
        return get_xpath_by_requests(url,new_proxies)
def get_text(text):
    if text:
        return text[0]
    return ''
def write_to_txt(text,book_name):
    filename = './book/'+book_name
    dirname = os.path.dirname(filename)
    if not os.path.exists(dirname):
        os.mkdir(dirname)
    with open(filename, 'a+', encoding='utf-8') as fp:
        fp.write(text)
def parse_chapter(url):
    url = 'http://www.xbiquge.la'+url
    html = get_xpath_by_requests(url,proxies)
    chapter_name = get_text(html.xpath('//div[@class="bookname"]/h1/text()'))
    book_name = get_text(html.xpath('//div[@class="con_top"]/a[last()]/text()'))
    # print(chapter_name,book_name)
    contents = html.xpath('//div[@id="content"]/text()')
    # print(type(contents))
    # content = ''
    content = ''.join(contents)
    text = chapter_name+r'\n'+content
    write_to_txt(text,book_name)
    # print(url)
    # print(''.join(contents))

def parse_novel(url):
    #获取页面xpath对象
    html = get_xpath_by_requests(url,proxies)
    chapters =html.xpath('//div[@id="list"]/dl/dd/a/@href')
    # print(chapters)
    for chapter in chapters:
        parse_chapter(chapter)
def main():
    base_url = 'http://www.xbiquge.la/xuanhuanxiaoshuo/'
    html = get_xpath_by_requests(base_url,proxies)
    novel_urls = html.xpath('//span[@class="s2"]/a/@href')
    # print(novel_urls)
    for url in novel_urls:
        parse_novel(url)




if __name__ == '__main__':
    proxies  =get_proxies()
    print('使用{}代理ip中!'.format(proxies))
    main()

版本1.2

import requests
from lxml import etree
import os
from queue import Queue
import threading
class Biquge(threading.Thread):
    def __init__(self,url=None,name=None,q_novels=None):
        super().__init__()
        self.url = url
        self.name = name
        self.q_novel = q_novels
        self.proxies = self.get_proxies()
        # self.parse()

    def get_proxies(self):
        try:
            response = requests.get('http://localhost:5000/get')
            proxy = response.text
            proxies = {
                'http': 'http://' + proxy
            }
            return proxies
        except Exception:
            return None

    def get_xpath_by_requests(self,url, proxies):
        '''

        :param url:
        :param proxies: 代理字典
        :return:
        '''
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
                'Cookie': '_abcde_qweasd=0; _abcde_qweasd=0; bdshare_firstime=1577178973028; Hm_lvt_169609146ffe5972484b0957bd1b46d6=1577178973,1577186563,1577186739,1577235413; BAIDU_SSP_lcr=https://www.baidu.com/link?url=AvLJGcMiHKBXi90P2T0xOluezhPz2PeeTLAbP75dmma&wd=&eqid=e131d391001338d8000000025e02b3d2; Hm_lpvt_169609146ffe5972484b0957bd1b46d6=1577235422',
                'Referer': 'http://www.xbiquge.la/'
            }
            response = requests.get(url, headers=headers, proxies=proxies)
            return etree.HTML(response.content.decode('utf-8'))
        except Exception:
            new_proxies = self.get_proxies()
            print('更换{}代理ip!'.format(new_proxies))
            return self.get_xpath_by_requests(url, new_proxies)

    def get_text(self,text):
        if text:
            return text[0]
        return ''

    def write_to_txt(self,text, book_name):
        filename = './book/' + book_name
        dirname = os.path.dirname(filename)
        if not os.path.exists(dirname):
            os.mkdir(dirname)
        with open(filename, 'a+', encoding='utf-8') as fp:
            fp.write(text)

    def parse_chapter(self,url):
        url = 'http://www.xbiquge.la' + url
        html = self.get_xpath_by_requests(url, self.proxies)
        chapter_name = self.get_text(html.xpath('//div[@class="bookname"]/h1/text()'))
        book_name = self.get_text(html.xpath('//div[@class="con_top"]/a[last()]/text()'))
        # print(chapter_name,book_name)
        contents = html.xpath('//div[@id="content"]/text()')
        # print(type(contents))
        # content = ''
        content = ''.join(contents)
        text = chapter_name + r'\n' + content
        self.write_to_txt(text, book_name)
        # print(url)
        # print(''.join(contents))

    def parse_novel(self,url):
        # 获取页面xpath对象
        html = self.get_xpath_by_requests(url, self.proxies)
        chapters = html.xpath('//div[@id="list"]/dl/dd/a/@href')
        # print(chapters)
        for chapter in chapters:
            self.parse_chapter(chapter)

    def get_novels(self):

        html = self.get_xpath_by_requests(self.url, self.proxies)
        novel_urls = html.xpath('//span[@class="s2"]/a/@href')
        # print(novel_urls)
        return novel_urls
        # for url in novel_urls:
        #     self.parse_novel(url)
    def run(self):
        while True:
            if self.q_novel.empty():
                break
            novel_url = self.q_novel.get()
            self.parse_novel(novel_url)
if __name__ == '__main__':
    base_url = 'http://www.xbiquge.la/xuanhuanxiaoshuo/'
    b = Biquge(url=base_url)
    novel_urs = b.get_novels()
    #初始化任务队列
    q_novels = Queue()
    for url in novel_urs:
        q_novels.put(url)
    #创建一个list,遍历这个list创建线程
    crawl_list = ['aa','bb','cc','dd']
    for crwal in crawl_list:
        t = Biquge(name = crwal,q_novels=q_novels)
        t.start()


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值