import requests
from lxml import etree
import re
# 存放重庆的市区拼写的列表
regions = ['yubei']
# 自定义一个爬虫函数
def findInfo(url):
with open(r'C:\Users\Administrator\Desktop\python爬虫\HouseData.csv', "a", encoding='utf-8') as f: # 打开一个csv文件将爬取的数据存放到文件中
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'
headers = {'User-Agent': user_agent}
resp = requests.get(url, headers=headers).text
# print(resp) # 打印网页代码内容
# 对网页进行解析,解析为Element对象,可以使用xpath方法
tree = etree.HTML(resp, etree.HTMLParser())
print("tree:", tree)
# 将找到的li标签(一套房子的信息)内容存放到列表中
infos = tree.xpath('//li[@class="clear"]')
print("infos:" , infos)
for info in infos:
# 详情页
# xpath("//a/@href") # www.some.com
Detail= str(info.xpath('./a/@href'))
print("Detail:",Detail)
# 一层
PYTHON_爬虫_贝壳二手房
最新推荐文章于 2025-04-02 14:10:24 发布