分析网页源代码发现,所需的文本储存在div标签下的p标签,并且p标签基本上只储存了想要获取的内容
所以调用lxml会更省力
import requests,time,re
from lxml import etree
def get_text(url):
try:
r=requests.get(url,timeout=30)
r.raise_for_status()
r.encoding=r.apparent_encoding
ret=r.text
return ret
except:
return ""
def save(ret):
html = etree.HTML(ret,parser = etree.HTMLParser(encoding='gbk')) #etree获取网页源代码,非本地文档
results = html.xpath('//div[@class="read-content j_readContent"]/p/text()') #选取指定属性的div标签,获取到子节点p的内容
for result in results:
result = re.sub('\s','',result) #使用正则去替换全角空格
print(result)
time.sleep(1)
if __name__ == '__main__':
url = "https://read.qidian.com/chapter/T_qvdsDahpITFqQ-idajwA2/_P8EZSmpMaDwrjbX3WA1AA2"
ret=get_text(url)
save(ret)
结果展示