爬虫-百度、搜狗、360图片

本文介绍了一种利用Python爬虫从百度、搜狗和360搜索引擎批量下载图片的方法。通过解析JSON数据,实现了图片的自动分类和保存,适用于大规模图片数据集的收集。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

爬取网页的图片,选择需要的图片类别

爬百度图片

#!/usr/bin/env python
# -*- coding:utf-8 -*-

import os
import re
import urllib
import json
import socket
import urllib.request
import urllib.parse
import urllib.error
# 设置超时
import time

timeout = 5
socket.setdefaulttimeout(timeout)


class Crawler:
    # 睡眠时长
    __time_sleep = 0.1
    __amount = 0
    __start_amount = 0
    __counter = 0
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}

    # 获取图片url内容等
    # t 下载图片时间间隔
    def __init__(self, t=0.1):
        self.time_sleep = t

    # 保存图片
    def __save_image(self, rsp_data, word):

        if not os.path.exists("./" + word):
            os.mkdir("./" + word)
        # 判断名字是否重复,获取图片长度
        self.__counter = len(os.listdir('./' + word)) + 1
        for image_info in rsp_data['imgs']:
            try:
                time.sleep(self.time_sleep)
                fix = self.__get_suffix(image_info['objURL'])
                urllib.request.urlretrieve(image_info['objURL'], './' + word + '/' + str(self.__counter) + str(fix))
            except urllib.error.HTTPError as urllib_err:
                print(urllib_err)
                continue
            except Exception as err:
                time.sleep(1)
                print(err)
                print("产生未知错误,放弃保存")
                continue
            else:
                print(word + "+1,已有" + str(self.__counter) + word)
                self.__counter += 1
        return

    # 获取后缀名
    @staticmethod
    def __get_suffix(name):
        m = re.search(r'\.[^\.]*$', name)
        if m.group(0) and len(m.group(0)) <= 5:
            return m.group(0)
        else:
            return '.jpeg'

    # 获取前缀
    @staticmethod
    def __get_prefix(name):
        return name[:name.find('.')]

    # 开始获取
    def __get_images(self, word='机场'):
        search = urllib.parse.quote(word)
        # pn int 图片数
        pn = self.__start_amount
        while pn < self.__amount:

            url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
                pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
            # 设置header防ban
            try:
                time.sleep(self.time_sleep)
                req = urllib.request.Request(url=url, headers=self.headers)
                page = urllib.request.urlopen(req)
                rsp = page.read().decode('unicode_escape')
            except UnicodeDecodeError as e:
                print(e)
                print('-----UnicodeDecodeErrorurl:', url)
            except urllib.error.URLError as e:
                print(e)
                print("-----urlErrorurl:", url)
            except socket.timeout as e:
                print(e)
                print("-----socket timout:", url)
            else:
                # 解析json
                rsp_data = json.loads(rsp)
                self.__save_image(rsp_data, word)
                # 读取下一页
                print("下载下一页")
                pn += 60
            finally:
                page.close()
        print("下载任务结束")
        return

    def start(self, word, spider_page_num=1, start_page=1):
        """
        爬虫入口
        :param word: 抓取的关键词
        :param spider_page_num: 需要抓取数据页数 总抓取图片数量为 页数x60
        :param start_page:起始页数
        :return:
        """
        self.__start_amount = (start_page - 1) * 60
        self.__amount = spider_page_num * 60 + self.__start_amount
        self.__get_images(word)


if __name__ == '__main__':
    crawler = Crawler(0.05)

    crawler.start('云彩',2)#更改爬的图片关键词

爬360图片

#-*- coding:GBK -*-
import requests
import json
import urllib

def getSogouImag(category,length,path):
    n = length
    cate = category
    num = 0
    m = 0
    while num < 2600:
        num = num + 48
        print(num)
        imgs = requests.get('http://pic.sogou.com/pics?query=%D4%C6%B2%CA&mode=1&start='+str(num)+'&reqType=ajax&reqFrom=result&tn=0')
        jd = json.loads(imgs.text)
        jd = jd['items']
        imgs_url = []
        for j in jd:
            imgs_url.append(j['thumbUrl'])
        for img_url in imgs_url:
            print('***** '+str(m)+'.jpg *****'+'   Downloading...')
            urllib.request.urlretrieve(img_url,path+str(m)+'.jpg')
            m = m + 1

    print('Download complete!')

getSogouImag('壁纸',2000,'C:/Users/Administrator/Desktop/云彩/')


爬360图片

from retrying import retry
import requests
import urllib.request
import random
import json
import time


# 创建Img 类
class Img:
    def __init__(self):  # 初始化函数
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
            , "Referer": "http://s.360.cn/0kee/a.html"
            , "Connection": "keep-alive"}
        self.num = 0

    def get_img_list(self, url):  # 获取 存放 图片url 的集合
        response = requests.get(url, headers=self.headers)
        html_str = response.content.decode()
        json_str = json.loads(html_str)
        img_str_list = json_str["list"]
        img_list = []
        for img_object in img_str_list:
            img_list.append(img_object["qhimg_url"])
        return img_list

    def save_img_list(self, img_list):
        for img in img_list:
            self.save_img(img)

    @retry(stop_max_attempt_number=3)  # 当保存图片出现异常的时候  就需要用retry   进行回滚  , 再次 保存当前图片 stop_max_attempt_number   重试的次数
    def save_img(self, img):  # j对获取的 图片url进行下载 保存到本地
        f = open("D:\Temp\imges\\" + str(self.num) + ".jpg", "wb")
        f.write((urllib.request.urlopen(img)).read())
        # time.sleep(10)
        print(str(self.num) + "保存成功")
        self.num += 1

    def run(self):  # 实现主要逻辑
        total = 1500
        while self.num <= total:
            # 我们将要访问的url        {}是用于接受参数的     当前一次 json 数据 有30 条  , 
            self.temp_url = "http://image.so.com/zj?ch=beauty&sn={}&listtype=new&temp=1"
            # 1获取链接
            url = self.temp_url
            # 获取数据
            img_list = self.get_img_list(url)
            # 保存数据
            self.save_img_list(img_list)
            # 不要获取数据过于频繁
            # time.sleep(60)
            print("先休息一会")


if __name__ == '__main__':
    img = Img()
    img.run()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值