import re
import requests
from fake_useragent import UserAgent
ua=UserAgent()
headers={
'user-agent':ua.random
}
def getHTML(url):
# 获取这个url的响应
res = requests.get(url=url, headers=headers)
if res.status_code == 200:
# 如果响应的状态码是200,
return res.text
else:
return ""
# 返回空值
def get_son_url(html):
url_list=re.findall('<a.*?href="(.*?)".*?</a>',html,re.S)
print(url_list)
def deep_spider(url):
if deep_dict[url] >3:
return
html=getHTML(url)
# 得到当前页面的html
son_list=get_son_url(html)
# 得到当前页面的所有的url
for son_url in son_list:
# 把son_url的层级赋值加1
if son_url not in deep_dict:
# 是否已经爬取过
if son_url.startwith('http'):
# 查看son_url是否是以http开头的
deep_dict[son_url]=deep_dict[url]+1
deep_spider(son_url)
# 递归爬取 son_url
if __name__ == '__main__':
url="https://www.baidu.com/s?wd=南京"
deep_dict={}
deep_dict[url]=1
deep_spider(url)