import requests
from bs4 import BeautifulSoup
import pandas as pd
将委托单位名称转化成utf-8格式,并用%连接转化后16进制,导入cabr-betc报告的查询网址url中
text = “山东鲁泰装配式装修材料有限公司”
print(f"根据委托单位名称进行查询:{text}“)
utf8_encoded_text = text.encode(‘utf-8’)
hex_representation = ‘%’.join(hex(b)[2:] for b in utf8_encoded_text)
#print(hex_representation)
url = “http://weixin.cabr-betc.com/gjwx/wxfind/findreport?prtnum=&xmname=&wtunit=%”+hex_representation+”&page=1&rows=1000"
print(f"查询网址:{url}")
发送 GET 请求获取网页内容
response = requests.get(url)
#print(response.text)
使用 BeautifulSoup 解析网页内容
soup = BeautifulSoup(response.text, ‘html.parser’)
#print(soup)
查找所有报告编号元素并打印
div_tags = soup.find_all(‘div’, class_=‘weui-cell__bd’)
#print(div_tags)
data_list = []
data = []
count = 0
div_tags = div_tags[3:]
for div_tag in div_tags:
data.append(div_tag.text.strip())
count +=1
if count == 10:
data_list.append(data)
print(data[:])
data = []
count = 0
#print(data_list)
data_list列表中内容,存储到csv文件中
column_names = [“报告编号”, “报告标题”, “委托单位”, “工程名称”, “样品名称”, “规格型号”, “数量”, “检验项目”, “报告日期”, “报告状态”]
df = pd.DataFrame(data_list, columns = column_names)
df.to_csv(‘data.csv’, index = False, encoding=‘utf-8-sig’)
#print(df) #打开CSV文件查看详细内容