今天发现当当的抢购价是覆盖在当当价上的,大概是页面加载js渲染展示新的抢购价。所以如果简单的去爬就会发现爬到的价格和网页显示的不一样。不得不吐槽,当当的网页加载真的慢,我眼睁睁看着当当价被替换成抢购价。不过无所谓了,我已经会用selenium爬加载js的动态网页了。等我再学学就是要解决多线程问题。不过发现跟机器学习一样,爬虫也有现成的好用的框架,自己写只是帮助理解原理,效果还是不如直接用框架
# -*- coding: utf-8 -*-
"""
Created on Thu May 10 10:27:57 2018
@author:
"""
import urllib
import pandas as pd
from urllib import request
from bs4 import BeautifulSoup
path1 = r'C:\Users\\Desktop\单品信息爬虫抓取\second_class.xlsx'
# 我是有个要查的ID表,自己做着玩建议ID从25069999开始迭代
path2 = r'C:\Users\\Desktop\单品信息爬虫抓取\spider2.xlsx'
get_id = pd.read_excel(path1)
ser1 = pd.Series([],name='商品ID')
ser2 = pd.Series([],name='二级类')
ser3 = pd.Series([],name='三级类')
ser4 = pd.Series([],name='书名')
ser5 = pd.Series([],name='定价')
ser6 = pd.Series([],name='出版社')
ser7 = pd.Series([],name='评论数')
ser8 = pd.Series([],name='当当现价')
ser9 = pd.Series([],name='ISBN')
ser10 = pd.Series([],name='京东价格')
index = 0
for id in get_id.iloc[:,0]:
url_l = "http://product.dangdang.com/"
url_r = '.html'
url = url_l+str(id)+url_r
resp = request.urlopen(url)
html = resp.read()
soup = BeautifulSoup(html,"lxml")
ser1[index] = id
ser2[index] = soup.find('div',class_\
='breadcrumb').contents[3].string
ser3[index] = soup.find('div',class_\
='breadcrumb').contents[5].string
ser4[index] = list(soup.h1.stripped_strings)[0]
ser5[index] = float(soup.find("div",attrs={"class":\
"price_m",'id':"original-price"}).contents[-1])
ser6[index] = soup.find('a',attrs={'target':"_blank",\
'dd_name':"出版社"}).string
ser7[index] = int(soup.find('a',dd_name="评论数").string)
ser8[index] = float(soup.find('p',id='dd-price').contents[-1])
ISBN = int(soup.find('ul',class_="key clearfix").\
contents[-3].string.split(':')[-1])
ser9[index] = ISBN
url1 = 'https://search.jd.com/Search?keyword='+str(ISBN)+'&enc=utf-8&psort=2'
req1 = request.Request(url1)
res1 = request.urlopen(req1)
result1 = res1.read()
soup1 = BeautifulSoup(result1,'lxml')
ser10[index] = float(soup1.find('strong',attrs={'data-done':"1"}).contents[-1].string)
index += 1
frame = pd.DataFrame({ser1.name:ser1,ser2.name:ser2,ser3.name:ser3,\
ser4.name:ser4,ser5.name:ser5,ser6.name:ser6,\
ser7.name:ser7,ser8.name:ser8,ser9.name:ser9,\
ser10.name:ser10})
frame.to_excel(path2,encoding='utf-8')
