ff

本文介绍了一种使用Python实现的汽车品牌爬虫程序,通过抓取汽车之家网站上的品牌及车型信息,包括图片下载和数据库插入操作。文章详细展示了如何解析网页结构、获取所需数据并进行有效存储。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

__author__ = 'siyecao'
# -*- coding:utf-8 -*-
# coding:utf-8
import urllib
from urllib.parse import urlparse
import urllib.request
import re
import io
import os
import sys
import pymysql
from bs4 import BeautifulSoup


class CatSpider():

    def __init__(self):
        self.siteURL = 'https://www.autohome.com.cn/grade/carhtml/'

        self.headers = {'User-Agent':'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0'}
        self.brand_list = []

    #
    def postData(self, url):
        request = urllib.request.Request(url,headers = self.headers)
        response = urllib.request.urlopen(request)
        html = response.read()
        return html

    #
    def getData(self, html):
        soup = BeautifulSoup(html, "html.parser")
        # print soup.prettify()
        dl_list = soup.select('dl')
        brand_list = []

        for i in range(len(dl_list)):
            brand = {}
            brand['initials'] = self.initials
            # tmp = filter(None, dl_list[i].contents)
            tmp = dl_list[i].select("dt")
            brand_data = tmp[0].select("a")

            pic_url = "https:" + brand_data[0].select("img")[0].attrs["src"].encode('ascii').decode()
            self.writeFilesImg(pic_url)
            brand["pic_url"] = self.writeFilesImg(pic_url)
            # brand["name"] = brand_data[1].string.encode('unicode_escape').decode('string_escape')
            brand["name"] = brand_data[1].string.encode('unicode_escape').decode('raw_unicode_escape')

            cat_type_data = dl_list[i].select("dd ul li")
            car = []
            # print cat_type_data
            # print type(cat_type_data)
            # print len(cat_type_data)
            # exit()
            for li in cat_type_data:
                # print type(li)
                # print li
                # exit()
                if li.select("h4") == []:
                    continue

                car_type = {}
                car_type["name"] = li.select("h4 a")[0].string

                cat_type_data_div_pic_url = "https:" + li.select("div a img")[0].attrs["src"]
                print ("Get url:" + cat_type_data_div_pic_url)

                car_type["pic_url"] = self.writeFilesImg(cat_type_data_div_pic_url, '/images/cartype')
                car.append(car_type)
                # print car_type
                # exit()
            brand["types"] = car
            brand_list.append(brand)
        return brand_list

    #
    def setPostString(self):
        initials_list = [chr(i).upper() for i in range(97, 123)]
        return initials_list

    def mkImagesDir(self, dir_string=""):
        if dir_string == "":
            dir_string = '/images/brand'
        folder = os.getcwd() + dir_string
        print ("Picture save directory:")
        print( folder)
        if not os.path.exists(folder):
           res = os.makedirs(folder)
        return folder

    def writeFilesImg(self, img_url, dir_string='/images/brand'):
        folder = self.mkImagesDir(dir_string)
        cat_img = self.postData(img_url)
        tmp_img_url = img_url.split('/')
        img_name = tmp_img_url[len(tmp_img_url) - 1]
        img = folder + '/' + img_name
        if os.path.exists(img):
            os.remove(img)
        with open(img, 'wb') as f:
            f.write(cat_img)
            #f.close()
        return img

    def getCarBrand(self):
        print("Spider start")
        post_url_list = self.setPostString()
        for initials in post_url_list:
            print(initials)
            url = self.siteURL + initials + "_photo.html"
            self.curr_post_url = url
            self.initials = initials
            post_html_data = self.postData(url)
            tmp = self.getData(post_html_data)
            self.brand_list.append(tmp)
        # print self.brand_list
        print("Spider end")
        self.inserDataBase(self.brand_list)

    def inserDataBase(self, brand_list):
        # ims_autoparts_car_brand
        # ims_autoparts_car_type
        print ("Start insert")
        conn = pymysql.connect(host='192.168.55.104', port=3306, user='root', passwd='root', db='weiqing',
                               charset='utf8')
        cursor = conn.cursor()

        for items in brand_list:
            #print items

            for item in items:
                name = item["name"].decode("gbk")
                print(name)
                brand = ("4", "0", item["initials"], name, item["pic_url"], "1", "1", "0", "1527063241")
                print ("Inserting brand data")
                effect_row = cursor.executemany(
                    "insert into ims_autoparts_car_brand(uniacid,parent_id,initials,name,pic_url,status,sort,is_hot,create_time)values(%s,%s,%s,%s,%s,%s,%s,%s,%s)",
                    [brand])
                pid = cursor.lastrowid
                for ite in item["types"]:
                    print ("Inserting car type data")
                    name = ite["name"].encode('unicode_escape').decode('string_escape').decode("gbk")
                    # print name
                    # exit()
                    type = ("4", pid, name, ite["pic_url"], "1", "1", "1527063241")
                    effect_row = cursor.executemany(
                        "insert into ims_autoparts_car_type(uniacid,brand_id,name,pic_url,status,sort,create_time)values(%s,%s,%s,%s,%s,%s,%s)",
                        [type])
                # conn.commit()
                # print pid
        cursor.close()
        conn.close()
        print ("Program finish!")
        exit()

if __name__ == '__main__':

    spider = CatSpider()
    spider.getCarBrand()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值