这个网站图片资源比较丰富,而且还比较搞笑,先尝试着爬取这个网站的图片资源
我把下载图片的代码写在了模块化的py文件里:
my_down.py
import requests
import os
import re
def down_img(url, path):
if not url.startswith('http'):
url = "http:" + url
print('down_img:%s' % url)
try:
with requests.get(url) as r:
if r.status_code == 200:
data = r.content
if data != '':
with open(path, 'wb') as f:
f.write(data)
print('down_img success,path:%s'%path)
return path
else:
print('download image fail:%d' % r.status_code)
except requests.exceptions.ConnectionError:
print('down_img exception:%s' % url)
def down_images(url, folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
pattern = r'<img[^>]*?src="([^"]+)"[^>]*?>'
header = {'user_agent': 'Mozilla/5.0'}
with requests.get(url, headers=header) as r:
if r.status_code == 200:
images = re.findall(pattern, r.text)
print('images:%s' % images)
if len(images) == 0:
return
position = 1
for img_url in images:
filename=str(position) + ".jpg"
url_array = img_url.split('/')
if len(url_array) > 0:
filename=url_array[-1]
img_path = folder_path + filename
down_img(img_url, img_path)
position += 1
else:
print('access url fail:%d' % r.status_code)
调用方式代码:
import my_down
url = 'https://www.haha.mx/pic/new/%d'
path='E:/pic/haha/'
for i in range(10):
page=i+1
page_url=url%page
page_path=path+str(page)+'/'
my_down.down_images(page_url,page_path)
下载了好多的图片了: