从爬取单个照片到爬取单个页面,再是无限爬取!
一、爬取单个图片
import re
import requests
#定义url
url = 'https://img0.baidu.com/it/u=728444420,2020970780&fm=253&fmt=auto&app=138&f=JPEG?w=667&h=500'
#设置请求头
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
}
#发送get请求
response = requests.get(url,headers=headers)
#变为bytes流数据
content = response.content
print(content)
#保存图片
with open('ORV.png','wb') as f: #
f.write(content)
二、爬取单个页面图片
import re
import requests
#设置请求头
headers = {
"User-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"Referer":"https://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&dyTabStr=MCwzLDEsMiw2LDQsNSw4LDcsOQ%3D%3D&word=%E6%B1%BD%E8%BD%A6"
}
#url
url = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1699329412872_R&pv=&ic=&nc=1&z=&hd=&latest=©right=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&dyTabStr=MCwzLDEsMiw2LDQsNSw4LDcsOQ%3D%3D&ie=utf-8&sid=&word=越野汽车'
#发送get请求
response = requests.get(url,headers =headers)
#对数据解析
content = response.content.decode('utf8')
#print(content)
#数据提取# #
detail_urls = re.findall('"thumbURL":"(.*?)"',content,re.DOTALL)
print(detail_urls)
i=0
#图片下载
for detail_url in detail_urls:
response = requests.get(detail_url,headers=headers)
content = response.content
path = r"E:\批量下载" #保存路径
with open(r'{}\{}.jpg'.format(path,i),'wb') as f:
f.write(content)
i+=1
三、爬取多个页面图片
import re
import requests
from urllib import error
from bs4 import BeautifulSoup
import os
num = 0
numPicture = 0
file = ''
List = []
def Find(url):
global List
print('正在检测图片总数,请稍等.....')
t = 0
i = 1
s = 0
while t < 2000:
Url = url + str(t)
try:
Result = requests.get(Url, timeout=7)
except BaseException:
t = t + 60
continue
else:
result = Result.text
pic_url = re.findall('"objURL":"(.*?)",', result, re.S) # 先利用正则表达式找到图片url
s += len(pic_url)
if len(pic_url) == 0:
break
else:
List.append(pic_url)
t = t + 60
return s
def recommend(url):
Re = []
try:
html = requests.get(url)
except error.HTTPError as e:
return
else:
html.encoding = 'utf-8'
bsObj = BeautifulSoup(html.text, 'html.parser')
div = bsObj.find('div', id='topRS')
if div is not None:
listA = div.findAll('a')
for i in listA:
if i is not None:
Re.append(i.get_text())
return Re
def dowmloadPicture(html, keyword):
global num
# t =0
pic_url = re.findall('"objURL":"(.*?)",', html, re.S) # 先利用正则表达式找到图片url
print('找到关键词:' + keyword + '的图片,即将开始下载图片...')
for each in pic_url:
print('正在下载第' + str(num + 1) + '张图片,图片地址:' + str(each))
try:
if each is not None:
pic = requests.get(each, timeout=7)
else:
continue
except BaseException:
print('错误,当前图片无法下载')
continue
else:
string = file + r'\\' + keyword + '_' + str(num) + '.jpg'
fp = open(string, 'wb')
fp.write(pic.content)
fp.close()
num += 1
if num >= numPicture:
return
if __name__ == '__main__': # 主函数入口
word = input("请输入搜索关键词(可以是人名,地名等): ")
url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + '&pn='#翻页流的网址
tot = Find(url)
Recommend = recommend(url) # 记录相关推荐
print('经过检测%s类图片共有%d张' % (word, tot))
numPicture = int(input('请输入想要下载的图片数量 '))
file = input('请建立一个存储图片的文件夹,输入文件夹名称即可')
y = os.path.exists(file)
if y == 1:
print('该文件已存在,请重新输入')
file = input('请建立一个存储图片的文件夹,输入文件夹名称即可')
os.mkdir(file)
else:
os.mkdir(file)
t = 0
tmp = url
while t < numPicture:
try:
url = tmp + str(t)
result = requests.get(url, timeout=10)
print(url)
except error.HTTPError as e:
print('网络错误,请调整网络后重试')
t = t + 60
else:
dowmloadPicture(result.text, word)
t = t + 60
print('当前搜索结束,感谢使用')
print('猜你喜欢')
for re in Recommend:
print(re, end=' ')
文章介绍了使用Python进行网页爬取,包括爬取单个图片的代码示例,以及如何爬取单个页面并下载所有图片,最后展示了爬取多个页面图片的方法,涉及requests库、正则表达式和BeautifulSoup解析。
2万+

被折叠的 条评论
为什么被折叠?



