Python BeautifulSoup爬取当当网图书信息

# -*- coding: utf-8 -*-

import urllib.request
from bs4 import BeautifulSoup
import pandas as pd



path1 = r'C:\Users\name\Desktop\单品信息爬虫抓取\second_class.xlsx' 
# 我是有个要查的ID表,自己做着玩建议ID从25069999开始迭代
path2 = r'C:\Users\name\Desktop\单品信息爬虫抓取\spider1.xlsx'
get_id = pd.read_excel(path1)
frame = pd.DataFrame(columns=['商品ID','二级类'])
ser1 = pd.Series([],name='商品ID')
ser2 = pd.Series([],name='二级类')
ser3 = pd.Series([],name='三级类')
ser4 = pd.Series([],name='书名')
ser5 = pd.Series([],name='定价')
ser6 = pd.Series([],name='出版社')
ser7 = pd.Series([],name='评论数')
ser8 = pd.Series([],name='当当现价')
index = 0
for id in get_id.iloc[:,0]:
    url1 = "http://product.dangdang.com/"
    url2 = '.html'
    url = url1+str(id)+url2
    
    resp = urllib.request.urlopen(url)
    html = resp.read()
    soup = BeautifulSoup(html,"lxml")
    ser1[index] = id
    ser2[index] = soup.find('div',class_\
              ='breadcrumb').contents[3].string
    ser3[index] = soup.find('div',class_\
              ='breadcrumb').contents[5].string
    ser4[index] = list(soup.h1.stripped_strings)[0]
    ser5[index] = float(soup.find("div",attrs={"class":\
        "price_m",'id':"original-price"}).contents[-1])
    ser6[index] = soup.find('a',attrs={'target':"_blank",\
        'dd_name':"出版社"}).string
    ser7[index] = int(soup.find('a',dd_name="评论数").string)
    ser8[index] = float(soup.find('p',id='dd-price').contents[-1])
    index += 1

frame = pd.DataFrame({ser1.name:ser1,ser2.name:ser2,ser3.name:ser3,\
                      ser4.name:ser4,ser5.name:ser5,ser6.name:ser6,\
                     ser7.name:ser7,ser8.name:ser8})
    
frame.to_excel(path2,encoding='utf-8')
感觉soup确实好用,Python3处理中文确实轻松,以后也用3了,不用天天reload(sys)了
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值