多线程爬取百度图片

'''
爬取网页图片

'''
import urllib.parse
import requests
import os
import time
from concurrent.futures import ThreadPoolExecutor

headers = {
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36"
}
INPUT = input("要查询的图片")

# 创建文件夹
if not os.path.exists(f"./图片/{INPUT}"):
    os.mkdir(f"./图片/{INPUT}")
    print("已创建文件夹")

PAGES = int(input("要爬取的页数(1页30张)"))
INPUT_str = urllib.parse.quote(INPUT)

# PAGE 是一个接口存放30张
start_time = time.time()
urls = []
for i in range(1,PAGES+1):
    url = f"https://image.baidu.com/search/acjson?tn=resultjson_com&logid=5179920884740494226&ipn=rj&ct=201326592&is=&fp=result&queryWord={INPUT_str}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&word={INPUT_str}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&nojc=&pn={ i*30}&rn=30&gsm=1e&1635054081427= "
    urls.append(url)

def Request(url):
    global picture_num
    response = requests.get(url=url,headers = headers)
    # print(response.status_code)  # 状态码
    datas = response.json()['data']
    # print(len(datas)) # 长度

    for data in datas:
        try:
            url_every = data['thumbURL']
            url_type = data['type']
            response = requests.get(url=url_every,headers = headers).content
            # 多线程去执行的时候,为了防止覆盖,所以用时间戳代表每一张图片
            with open(f"./图片/{INPUT}/{int(time.time()*1000000)}.{url_type}","wb") as w:
                w.write(response)
        except:
            pass
# 多线程爬取图片
with ThreadPoolExecutor(max_workers=len(urls)) as e:
    for url in urls:
        e.submit(Request,url)



end_time = time.time()
print("用时:",round(end_time-start_time,2))





评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值