Python图片下载器(单线程PK多线程)_一蓑烟雨任平生

本文介绍了Python爬虫分别使用单线程和多线程下载Mzitu网站图片的过程。通过比较,展示了多线程在爬取速度上的优势。代码中使用了正则表达式进行网页解析,tqdm库实现进度条显示,以及threading模块实现多线程下载。此外,多线程版本还引入了代理以应对可能的IP限制问题。


前言

爬取图片很简单,但是单线程跟多线程的对比就不一样了,可以看到下载速度

一、单线程下载图片

# =============================================================================
# Mzitu图片爬取
# =============================================================================
import re
import os
import time
import queue
import requests
from tqdm import tqdm
from termcolor import *
from colorama import init

# 解决CMD无法显示颜色问题
init(autoreset=False)
    
class spider_Mzidu():
    def __init__(self):
        # 定义请求地址
        self.url_page = 'https://xxxxxxx/page/%d/' # 搜索页面(用以获取ID)
        self.url_taotu = 'https://xxxxxxxxx/%s'      # 页面(用以获取图片地址)
        
        # 定义请求头
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',
                        'Accept': '*/*',
                        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
                        'Accept-Encoding': 'gzip, deflate, br',
                        'X-Requested-With': 'XMLHttpRequest',
                        'Connection': 'keep-alive',
                        'Referer': 'https://www.xxxxx.com',
                       }
        
        # 定义正则表达式
        self.p_id = '<span><a href="https://xxxxxxxx/(\d*?)" target="_blank">(.*?)</a></span>'
        self.p_imgurl = '<img class="blur" src="(.*?)"'
        self.p_page = '…</span>.*?<span>(\d*?)</span>'
        
        # 存储变量
        self.queue_id = queue.Queue()
        
    def getPages(self): # 获取总页数
        res = requests.get(self.url_page%1,headers=self.headers)
        html = res.text
        N = re.findall('''class="page-numbers dots">[\s\S]*?>(\d*?)</a>[\s\S]*?"next page-numbers"''',html)[0]
        return int(N)
        
    def getID(self): # 获取ID
        page_range = input('请输入爬取页数(如1-10):')
        p_s = int(page_range.split('-')[0])
        p_e = int(page_range.split('-')[1])
        time.sleep(0.5)
        print(colored('开始获取套图ID'.center(50,'-'),'green'))
        bar = tqdm(range(p_s,p_e+1),ncols=60) # 进度条
        for p in bar:
            res = requests.get(self.url_page%p,headers=self.headers)
            html = res.text
            ids = re.findall(self.p_id,html)
            for i in ids:
                self.queue_id.put(i)
            bar.set_description('第%d页'%p)
    
    def downloadImg(self,imgurl): # 下载图片
        res = requests.get(imgurl,headers=self.headers)
        img = res.content
        return img

    def parseTaotu(self,taotuID): # 解析"图片数量",以及"图片地址"
        res = requests.get(self.url_taotu%taotuID,headers=self.headers)
        html = res.text
        page = int(re.findall(self.p_page,html)[0])
        imgurl = re.findall(self.p_imgurl,html)[0]
        imgurl = imgurl[:-6]+'%s'+imgurl[-4:]
        return(imgurl,page)
    
    def downloadTaotu(self): # 下载 
        while not self.queue_id.empty():
            taotu = self.queue_id.get()
            taotuID = taotu[0]
            taotuName = taotu[1]
            try:
                imgurl,page = self.parseTaotu(taotuID)
                path = '[P%d]'%page+taotuName
                if not os.path.exists(path):
                    os.mkdir(path)
                bar = tqdm(range(1,page+1),ncols=50) # 进度条
                for i in bar:
                    url = imgurl%(str(i).zfill(2))
                    img = self.downloadImg(url)
                    with open('./%s/%d.jpg'%(path,i),'wb') as f:
                        f.write(img)
                print('套图("'+colored(taotuName,'red')+'")爬取完成')
            except:
                time.sleep(3)
                self.queue_id.put(taotu)
        
    def run(self): # 主程序
        os.system('cls')  # 清空控制台
        print('*'*35)
        print('*'+'欢迎使用Mzitu下载器'.center(26)+'*')
        print('*'*35)
        N = self.getPages()
        print(('Mzitu当前共有%s页!'%colored(N,'red')).center(30))
        print('\n')
        self.getID()
        print('\n'+colored('开始爬取套图'.center(50,'-'),'green'))
        self.downloadTaotu()
            
spider = spider_Mzidu()
spider.run()

二、多线程爬取图片

# =============================================================================
# Mzitu图片爬取(多线程)
# =============================================================================
import re
import os
import time
import queue
import requests
import threading
from tqdm import tqdm
from termcolor import *
from colorama import init

# 解决CMD无法显示颜色问题
init(autoreset=False)

# 代理(XXX代理)
def Get_proxy():
    res = requests.get('xxxxxxxxxxxxxxxxxxx')
    html = res.text
    return html
    
class spider_Mzidu():
    def __init__(self):
        # 定义请求地址
        self.url_page = 'https://www.xxxx.com/page/%d/' # 搜索页面(用以获取ID)
        self.url_taotu = 'https://www.xxxx.com/%s'      # 页面(用以获取地址)
        
        # 定义请求头
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',
                        'Accept': '*/*',
                        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
                        'Accept-Encoding': 'gzip, deflate, br',
                        'X-Requested-With': 'XMLHttpRequest',
                        'Connection': 'keep-alive',
                        'Referer': 'https://www.xxxxxx.com',
                       }
        
        # 定义正则表达式
        self.p_id = '<span><a href="https://www.xxxxxx.com/(\d*?)" target="_blank">(.*?)</a></span>'
        self.p_imgurl = '<img class="blur" src="(.*?)"'
        self.p_page = '…</span>.*?<span>(\d*?)</span>'
        
        # 存储变量
        self.queue_id = queue.Queue()
        
        #HTTP代理
        proxy = Get_proxy()
        self.proxies = {'http':'http://'+proxy,
                        'https':'https://'+proxy} 
        
    def getPages(self): # 获取总页数
        res = requests.get(self.url_page%1,headers=self.headers,proxies=self.proxies,timeout=10)
        html = res.text
        N = re.findall('''class="page-numbers dots">[\s\S]*?>(\d*?)</a>[\s\S]*?"next page-numbers"''',html)[0]
        return int(N)
        
    def getID(self): # 获取ID
        page_range = input('请输入爬取页数(如1-10):')
        p_s = int(page_range.split('-')[0])
        p_e = int(page_range.split('-')[1])
        time.sleep(0.5)
        print(colored('开始获取套图ID'.center(50,'-'),'green'))
        bar = tqdm(range(p_s,p_e+1),ncols=60) # 进度条
        for p in bar:
            res = requests.get(self.url_page%p,headers=self.headers,proxies=self.proxies,timeout=10)
            html = res.text
            ids = re.findall(self.p_id,html)
            for i in ids:
                self.queue_id.put(i)
            bar.set_description('第%d页'%p)
    
    def downloadImg(self,imgurl,proxies): # 下载图片
        res = requests.get(imgurl,headers=self.headers,proxies=proxies,timeout=10)
        img = res.content
        return img

    def parseTaotu(self,taotuID,proxies): # 解析的"图片数量",以及"图片地址"
        res = requests.get(self.url_taotu%taotuID,headers=self.headers,proxies=proxies,timeout=10)
        html = res.text
        page = int(re.findall(self.p_page,html)[0])
        imgurl = re.findall(self.p_imgurl,html)[0]
        imgurl = imgurl[:-6]+'%s'+imgurl[-4:]
        return(imgurl,page)
    
    def downloadTaotu(self): # 下载 
        proxy = Get_proxy()
        proxies = {'http':'http://'+proxy,
                   'https':'https://'+proxy} 
        while not self.queue_id.empty():
            taotu = self.queue_id.get()
            taotuID = taotu[0]
            taotuName = taotu[1]
            try:
                imgurl,page = self.parseTaotu(taotuID,proxies)
                path = '[P%d]'%page+taotuName
                if not os.path.exists(path):
                    os.mkdir(path)
                bar = tqdm(range(1,page+1),ncols=50) # 进度条
                for i in bar:
                    url = imgurl%(str(i).zfill(2))
                    img = self.downloadImg(url,proxies)
                    with open('./%s/%d.jpg'%(path,i),'wb') as f:
                        f.write(img)
                print('套图("'+colored(taotuName,'red')+'")爬取完成')
            except:
                time.sleep(3)
                proxy = Get_proxy()
                proxies = {'http':'http://'+proxy,
                           'https':'https://'+proxy} 
                self.queue_id.put(taotu)
                
    def changeProxy(self): # 更换代理
        proxy = Get_proxy()
        self.proxies = {'http':'http://'+proxy,
                        'https':'https://'+proxy} 
        
    def run(self): # 主程序
        os.system('cls')  # 清空控制台
        print('*'*35)
        print('*'+'欢迎使用Mzitu下载器'.center(26)+'*')
        print('*'*35)
        N = self.getPages()
        print(('Mzitu当前共有%s页!'%colored(N,'red')).center(30))
        print('\n')
        self.getID()
        print('\n'+colored('开始爬取套图'.center(50,'-'),'green'))
        # 多线程下载
        N_thread = 3
        thread_list = []
        for i in range(N_thread):
            thread_list.append(threading.Thread(target=self.downloadTaotu))
        for t in thread_list:
            t.start()
        for t in thread_list:
            t.join()
            
spider = spider_Mzidu()
spider.run()

总结

如果大家对代码里的进度条或者输出的文字颜色感兴趣,让自己的代码输出更风骚,大家可以参考这里。

点这里
在这里插入图片描述

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值