1. find_all()和find()
find_all()⽅法以列表形式返回所有的搜索到的标签数据。
find()⽅法返回搜索到的第⼀条数据
find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs)
name : tag 名称
attrs :标签的属性
recursive : 是否递归
text : 文本内容
limit : 限制返回的条数
**kwargs :不定长参数 以关键字来传参
from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup=BeautifulSoup(html_doc,'lxml')
a_tags=soup.find_all('a')
print(a_tags)
p_tags=soup.find_all('p','title')
print(p_tags)
print(soup.find_all(id='link1'))
print(soup.find_all('a',limit=3))
print(soup.a)
print(soup.find('a'))
print(soup.find_all('a',recursive=True))
#print(soup.find_all('a',limit=1))[0]
2.find_parents() find_parent() find_next_siblings() find_next_sibling()
find_parents() 搜索所有⽗亲
find_parrent() 搜索单个⽗亲
find_next_siblings()搜索所有兄弟
find_next_sibling()搜索单个兄弟
title_tag=soup.title
print(title_tag.find_parent('head'))#<head><title>The Dormouse's story</title></head>
print(title_tag.find_parents('head'))
s=soup.find(text='Elsie')
print(s.find_previous('p'))
print(s.find_parents('p'))
a_tag=soup.a
print(a_tag.find_next_sibling('a'))
print(a_tag.find_next_siblings('a'))
3.find_previous_siblings() find_previous_sibling find_all_next() find_next()
find_previous_siblings() 往上搜索所有兄弟
find_previous_sibling() 往上搜索单个兄弟
find_all_next() 往下搜索所有元素
find_next()往下查找单个元素
a_tag=soup.find(id='link3')
# print(a_tag)
# print(a_tag.find_previous_sibling())
# print(a_tag.find_previous_siblings())
p_tag=soup.p
print(p_tag.find_all_next())
print(p_tag.find_next('a'))
# 爬取全国所有的城市名称以及对应的气温
import requests
from bs4 import BeautifulSoup
def parse_page(url):
response=requests.get(url)
text=response.content.decode('utf-8')
soup=BeautifulSoup(text,'html5lib')
conMidtab=soup.find('div',class_='conMidtab')
# print('conMidtab')
tables=conMidtab.find_all('table')
for table in tables:
trs=table.find_all('tr')[2:]
for index,tr in enumerate(trs):
tds=tr.find_all('td')
city_td=tds[0]
if index==0:
city_td=tds[1]
city=list(city_td.stripped_strings)[0]
temp_td=tds[-2]
temp=list(temp_td.stripped_strings)[0]
print('城市:',city,'温度:',temp)
def main():
#url = 'http://www.weather.com.cn/textFC/hb.shtml' # 华东
# url = 'http://www.weather.com.cn/textFC/db.shtml' # 东北
#url = 'http://www.weather.com.cn/textFC/gat.shtml' # 港澳台
urls = ['http://www.weather.com.cn/textFC/hb.shtml', 'http://www.weather.com.cn/textFC/db.shtml',
'http://www.weather.com.cn/textFC/gat.shtml']
for url in urls:
parse_page(url)
if __name__ == '__main__':
main( )