import os
import requests
from lxml import etree
class Img():
def __init__(self):
self.headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36",
}
self.start_url = "http://www.netbian.com/"
def get_response(self, url):
res = requests.get(url, headers = self.headers)
return res.content
def trans_res_html(self, res):
html = etree.HTML(res)
return html
def run(self):
if not os.path.exists("tupian"):
# 创建文件夹
os.mkdir("tupian")
res = self.get_response(self.start_url)
html = self.trans_res_html(res)
list = html.xpath('//div[@id="main"]/div[@class="list"]/ul/li/a')
#遍历
for dz in list:
item = {}
item["a_href"] = "http://www.netbian.com/" + dz.xpath('./@href')[0]
res2 = self.get_response(item["a_href"])
html2 = self.trans_res_html(res2)
src = html2.xpath('//div[@id="main"]//div[@class="pic"]/p/a/img/@src')[0]
name = html2.xpath('//div[@id="main"]//div[@class="pic"]/p/a/img/@alt')[0]
img_res = self.get_response(src)
# 保存文件
f = open("tupian/%s.jpg" % name, "wb")
f.write(img_res)
# 关闭
f.close()
print(item)
if __name__ == '__main__':
img = Img()
img.run()
python 使用requests爬虫爬取图片
最新推荐文章于 2022-06-09 15:36:20 发布
本文介绍了一个Python脚本,它通过requests和lxml库爬取Netbian网站上的图片,自动化地下载并存储到本地文件夹。脚本首先定义了User-Agent头,然后获取初始页面的响应并解析为HTML。接着,它遍历列表项,获取每张图片的URL、alt文字,并下载保存为jpg格式。
917

被折叠的 条评论
为什么被折叠?



