转载
import requests
from lxml import etree
# 如果想通过xpath获取html源码中的内容,就要先将html源码转换成_Element对象,然后再使用xpath()方法进行解析。例如,这里有一段最简单的html源码:"<html><body><h1>This is a test</h1></body></html>",现在想要得到h1标签中的文本,可以这样实现:
#
# # encoding=utf8
# # from lxml import etree
# # html = '<html><body><h1>This is a test</h1></body></html>'
# # 将html转换成_Element对象
# # _element = etree.HTML(html)
# # 通过xpath表达式获取h1标签中的文本
# # text = _element.xpath('//h1/text()')print 'result is: ', text
import time
import csv
# 定义函数抓取每页前30条商品信息
def crow_first(n):
# 构造每一页的url变化
url = 'https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&cid2=653&cid3=655&page=' + str(
2 * n - 1)
#其实只用user-agent也没问题
head = {'authority': 'search.jd.com',
'method': 'GET',
'path': '/s_new.php?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E6%89%8B%E6%9C%BA&cid2=653&cid3=655&page=4&s=84&scrolling=y&log_id=1529828108.22071&tpl=3_M&show_items=7651927,7367120,7056868,7419252,6001239,5934182,4554969,3893501,7421462,6577495,26480543553,7345757,4483120,6176077,6932795,7336429,5963066,5283387,25722468892,7425622,4768461',
'scheme': 'https',
'referer': 'https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E6%89%8B%E6%9C%BA&cid2=653&cid3=655&page=3&s=58&click=0',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'Cookie': ''
}
r = requests.get(url, headers=head)
# 指定编码方式,不然会出现乱码
r.encoding = 'utf-8'
html1 = etree.HTML(r.text) #结构化以解析网页
# 定位到每一个商品标签li
datas = html1.xpath('//li[contains(@class,"gl-item")]')
# print(datas)
# 将抓取的结果保存到本地CSV文件中
#a表示,打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
#encoding 文件所使用的编码
#newline=''为分隔每行的字符,如果不加入,则excel文件中的数据会隔一个空行
with open('JD_Phone2.csv', 'a', newline='', encoding='gb18030')as f: #encoding指定为utf-8的话,保存的表格会出现乱码
write = csv.writer(f)
for data in datas:
p_price = data.xpath('div/div[@class="p-price"]/strong/i/text()')
# p_comment = data.xpath('div/div[5]/strong/a/text()')
p_name = data.xpath('div/div[@class="p-name p-name-type-2"]/a/em')
# 这个if判断用来处理那些价格可以动态切换的商品,比如上文提到的小米MIX2,他们的价格位置在属性中放了一个最低价
if len(p_price) == 0:
p_price = data.xpath('div/div[@class="p-price"]/strong/@data-price')
# xpath('string(.)')用来解析混夹在几个标签中的文本
write.writerow([p_name[0].xpath('string(.)'), p_price[0]])
f.close()
# 定义函数抓取每页后30条商品信息
def crow_last(n):
# 获取当前的Unix时间戳,并且保留小数点后5位
a = time.time()
b = '%.5f' % a
url = 'https://search.jd.com/s_new.php?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E6%89%8B%E6%9C%BA&cid2=653&cid3=655&page=' + str(
2 * n) + '&s=' + str(48 * n - 20) + '&scrolling=y&log_id=' + str(b)
head = {'authority': 'search.jd.com',
'method': 'GET',
'path': '/s_new.php?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E6%89%8B%E6%9C%BA',
'scheme': 'https',
'referer': 'https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E6%89%8B%E6%9C%BA&cid2=653&cid3=655&page=3&s=58&click=0',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'Cookie': ''
}
r = requests.get(url, headers=head)
r.encoding = 'utf-8'
html1 = etree.HTML(r.text)
# print(html1)
datas = html1.xpath('//li[contains(@class,"gl-item")]')
with open('JD_Phone2.csv', 'a', newline='', encoding='gb18030')as f:
write = csv.writer(f)
for data in datas:
p_price = data.xpath('div/div[@class="p-price"]/strong/i/text()')
# p_comment = data.xpath('div/div[5]/strong/a/text()')
p_name = data.xpath('div/div[@class="p-name p-name-type-2"]/a/em')
if len(p_price) == 0:
p_price = data.xpath('div/div[@class="p-price"]/strong/@data-price')
write.writerow([p_name[0].xpath('string(.)'), p_price[0]])
f.close()
if __name__ == '__main__':
for i in range(1, 3):
# 下面的print函数主要是为了方便查看当前抓到第几页了
print('***************************************************')
try:
print(' First_Page: ' + str(i))
crow_first(i)
print(' Finish')
except Exception as e:
print(e)
print('------------------')
try:
print(' Last_Page: ' + str(i))
crow_last(i)
print(' Finish')
except Exception as e:
print(e)
————————————————
版权声明:本文为优快云博主「Mars_DD」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.youkuaiyun.com/xing851483876/article/details/80817578
备注:博主之前获取的评论方法似乎不可用了,本文代码将有关评论的内容删掉后能正常运行。
转载:
博主:Mars_DD
原文链接:https://blog.youkuaiyun.com/xing851483876/article/details/80817578