原理:定位到对应标签,获取标签内的东西
解析三种方法:
1:正则
58二手房。获取图片列表,二进制content。展开,每个图片是一个内容,正则从头取到尾,不同之处用 .*? 替换掉。
import requests
import re
import os
if __name__ == "__main__":
# url='https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2898385373.jpg'
# img_data=requests.get(url=url).content
# with open("./pic.png","wb") as fp:
# fp.write(img_data)
if not os.path.exists("./pictures"):
os.makedirs("./pictures")
url='https://www.58.com/ershoufang/'
#2.进行UA伪装
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
}
#获取整个页面数据
respond_data=requests.get(url=url,headers=headers).text
#获取页面中的 img_class标签中的 src元素。收缩起来该标签 找到上层,然后比较内层异同
# <tr>
# <td class="img"><div><a href="//sh.58.com/ershoufang/45319596761223x.shtml" target="blank"><img src="//pic4.58cdn.com.cn/anjuke_58/4b7701ea9d4e5f5ecc7cb87e1d317920"></a></div></td>
# <td class="t"> <a href="//sh.58.com/ershoufang/45319596761223x.shtml" target="_blank" class ="t"> 建元景苑3室2厅1卫高层, 精装修, 满五唯一价</a><br> 廊下--2211133<br>楼层:6/6 ,房龄:年 ,发布时间:12-18 </td>
# <td class="pred"><b>98</b>万</td>
# <td class="pred">3室<br>(2厅1卫)</td>
# <td class="pred"><b>117</b>㎡</td>
# </tr>
ex='<tr>.*?<img src="(.*?)".*?</tr>'
#单行匹配
img_src_list=re.findall(ex,respond_data,re.S)
#print(img_src_list)
for img_src in img_src_list:
img_src="https:"+img_src
print(img_src)
#图片的二进制文件
imgs_data=requests.get(url=img_src,headers=headers).content
#生成图片名
img_name=img_src.split('/')[-1]
imgpath='./pictures/'+img_name+".png"
with open(imgpath,"wb") as fp:
fp.write(imgs_data)
print(img_name+"下载成功")
#遗留下 有的图片有?with=190 有的图片末尾有.gif之类的东西。 后续有机会处理下
2:bs4
嵌套标签 tr td a。获取属性.get和获取文本.text
from bs4 import BeautifulSoup
import requests
import re
if __name__ == "__main__":
url='https://www.58.com/ershoufang/'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
}
# 获取整个页面数据
respond_data = requests.get(url=url, headers=headers).content
soup=BeautifulSoup(respond_data,'lxml')
# 获取class值为t 的a标签,class属性需要在最后面加上_
# 获取标签内容则.text -- 建元景苑3室2厅1卫高层,精装修,满五唯一价
text=soup.find('a',class_="t").text
# 获取标签内特殊属性值 -- //sh.58.com/ershoufang/45319596761223x.shtml
# houseurls = soup.select('a', class_="t")#.get("href")
# for houseurl in houseurls:
# print(houseurl.get("href"))
#上面把不需要的标签,但是符合条件的也拿了
#嵌套标签 td[class="t"] 表示class属性为t的td标签
houseurls = soup.select('tr > td[class="t"] > a')#.get("href")
for houseurl in houseurls:
print(houseurl.get("href"))
3: xpath (重点)
from bs4 import BeautifulSoup
from lxml import etree
import requests
if __name__ == "__main__":
url='https://www.58.com/ershoufang/'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
}
# /@取属性 /text()取文本内容
# 获取整个页面数据
respond_data = requests.get(url=url, headers=headers).text
tree=etree.HTML(respond_data)
# //多层省略 /下属层
# /td[@class="eng"] class属性为eng的td标签
results=tree.xpath('//td[@class="t"]/a/text()')
resultsURL=tree.xpath('//td[@class="t"]/a/@href')
for result in resultsURL:
print(result)
过验证码和账号 还有滑块