代码 复制即可使用!
pixabay 爬取思路,不能正常的使用 requests 爬取,因为他是https2 协议! 正常我们访问的互联网都是 http 1.1 协议
所以要换个思路爬取,使用 httpx 模块
注意我这里的COOKIE,值 一定是要 登录之后的。
#encoding:utf-8
import httpx
from bs4 import BeautifulSoup
import os
import zipfile
import time
import random
cookies = str(input("请输入登陆后的cookie值:")).replace('\n', '').replace('\t', '')
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
# 'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'cookie': cookies,
'referer': 'https://pixabay.com/photos/search/?pagi=2&',
'sec-ch-ua': '"Chromium";v="94", "Google Chrome";v="94", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36',
}
def getimgindex(page):
url = f"https://pixabay.com/zh/photos/search/?pagi={page}&"
client = httpx.Client(http2=True,verify=False)
res = client.get(url,headers=headers,timeout=20)
html = BeautifulSoup(res.content,'lxml')
imgurl = html.find_all('div',class_="row-masonry-cell-inner")
for img in imgurl:
photo = img.find_all('img')[0]
title = photo['alt']
if 'Sponsored image' not in title:
try:
data_lazy = photo['data-lazy']
except:
data_lazy = photo['src']
data_lazy = str(data_lazy).split('/')[-1].split('_')[0]
downloadUrl = f'https://pixabay.com/zh/images/download/container-ship-{data_lazy}.jpg?attachment'
filename = data_lazy.split('-')[-1]
downloadpath = f"data/{filename}" #下载路径
createFile(downloadpath) # 创建下载路径文件夹
taginfo = str(title).replace(', ',"_")
downloadfilename = f"data/{filename}/{filename}_{taginfo}" #文件夹名称 无后缀名
download(downloadUrl, downloadfilename)
def download(url,filename):
client = httpx.Client(http2=True, verify=False)
res = client.get(url, headers=headers, timeout=20)
with open(f'{filename}.jpg', 'wb') as f:
f.write(res.content)
f.close()
print(f'{filename} Download successful!')
def zipDir(dirpath,outFullName):
"""
压缩指定文件夹
:param dirpath: 目标文件夹路径
:param outFullName: 压缩文件保存路径+xxxx.zip
:return: 无
"""
zip = zipfile.ZipFile(outFullName,"w",zipfile.ZIP_DEFLATED)
for path,dirnames,filenames in os.walk(dirpath):
# 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩
fpath = path.replace(dirpath,'')
for filename in filenames:
zip.write(os.path.join(path,filename),os.path.join(fpath,filename))
zip.close()
def createFile(filename):
"""
创建文件夹
"""
while True:
filepath = f"{filename}"
if os.path.isdir(filepath):
break
else:
os.makedirs(filepath)
def wirteTxt(filename,content):
"""
写入数据
"""
with open(f'{filename}', 'a+', encoding='utf-8') as f:
f.write(str(content) + '\n')
f.close()
print(f'{content} 正在写入')
def runs():
while True:
try:
p = int(input("请输入开始页码:"))
except:
pass
else:
n = 0
for x in range(p,12221):
n += 1
if n % 15 == 0:
randomTime = random.randint(10, 30)
print(f"休息{randomTime}秒后继续爬取!")
time.sleep(randomTime)
randomTime = random.randint(1,5)
print(f"{randomTime}秒后开始爬取")
time.sleep(randomTime)
nowtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
text = f"{nowtime} ———————— 第{x}页"
wirteTxt('loging.txt',text)
getimgindex(x)
break
if __name__ == '__main__':
runs()
如果对你有帮助,点个关注呗。
成果图: