一、爬取后txt文件保存
需要先pip install requests和BeautifulSoup库
import requests
from bs4 import BeautifulSoup
# 定义要爬取的新闻网站URL
url = 'https://www.chinadaily.com.cn/' # China Daily 网站
# 发送请求获取页面内容
response = requests.get(url)
# 检查请求是否成功
if response.status_code == 200:
print('Successfully retrieved the website.')
# 解析网页内容
soup = BeautifulSoup(response.text, 'html.parser')
# 打开一个文件以写入爬取的数据
with open('news_data.txt', 'w', encoding='utf-8') as f:
# 选择网站上合适的新闻标签
for item in soup.find_all('a', href=True): # 这里使用<a>标签,因为它包含链接
title = item.get_text().strip() # 获取标题
link = item['href'] # 获取链接
# 过滤掉无效的标题或链接
if title and 'http' in link:
# 将标题和链接写入文件
f.write(f'链接标题: {title}\n链接地址: {link}\n\n')
print("Data saved to 'news_data.txt'.")
else:
print(f'Failed to retrieve the website. Status code