import requests
from bs4 import BeautifulSoup
import pandas as pd
将委托单位名称转化成utf-8格式,并用%连接转化后16进制,导入cabr-betc报告的查询网址url中
text = “富思特新材料科技发展股份有限公司”
print(f"2025年启用的新三和系统根据委托单位名称进行查询:{text}“)
utf8_encoded_text = text.encode(‘utf-8’)
hex_representation = ‘%’.join(hex(b)[2:] for b in utf8_encoded_text)
#print(hex_representation)
url = “http://weixin.cabr-betc.com/gzsjcwx/WxMainGj/findreport?prtnum=&wtnum=&phnum=&gccode=&gcname=&wtunit=%”+hex_representation+”&wtdatee=&wtdater=&contractcode=&customerId=&xmName="
print(f"查询网址:{url}")
print(‘’)
发送 GET 请求获取网页内容
response = requests.get(url)
#print(response.text)
使用 BeautifulSoup 解析网页内容
soup = BeautifulSoup(response.text, ‘html.parser’)
#print(soup)
查找所有报告编号元素并打印
data_list = []
data = []
count = 0
boxes = soup.find_all(‘div’, class_=‘PrtInfoDiv-detail-box’)
#print(div_tags)
for box in boxes:
spans = box.find_all(‘span’)
for span in spans:
#print(span.get_text().strip())
data.append(span.get_text().strip())
count += 1
if count == 24:
data_list.append(data)
print(data[5::2])
data = []
count = 0
#print(data_list)
data_list列表中内容,存储到csv文件中
column_names = [“委托单位”, “报告标题”, “报告编号”, “委托日期”, “样品名称”, “规格型号”, “数量”, “检测项目”, “报告日期”, “数据状态”]
even_columns_list = [row[5::2] for row in data_list] #从第5列开始每隔2列提取数据
df = pd.DataFrame(even_columns_list, columns = column_names)
df.to_csv(‘data.csv’, index = False, encoding=‘utf-8-sig’)
#print(df) #打开CSV文件查看详细内容