仔细检查并修改你提供给我的代码中的所有问题,报错显示有大量未解析的引用 'self':import tensorflow as tf
import numpy as np
import os
# ====================== 核心配置(请根据自己的环境修改)======================
# 1. MNIST数据集本地路径(用户原有路径,保留)
MNIST_DATA_PATH = r"D:\Cadence\SPB_Data\.keras\datasets\Mnist\mnist.npz"
# 2. 输出根目录(改为纯英文、无空格的绝对路径,避免Vitis AI报错)
OUTPUT_ROOT = r"C:\Users\pnc\Desktop\mnist_cnn_vitis_2022"
# 3. 校准数据数量(Vitis AI量化推荐500-1000张,这里用1000)
CALIB_DATA_NUM = 1000
# ==============================================================================
# 确保输出目录存在
os.makedirs(OUTPUT_ROOT, exist_ok=True)
# 数据获取及预处理==================================================================
class MNISTLoader:
def __init__(data_path):
try:
mnist = tf.keras.datasets.mnist
# 加载本地MNIST数据集
(self.train_data, self.train_label), (self.test_data, self.test_label) = mnist.load_data(path=data_path)
print("本地MNIST数据集加载成功!")
# 预处理:归一化+增加通道维度(适配CNN和Vitis AI量化要求)
self.train_data = np.expand_dims(self.train_data.astype(np.float32) / 255.0, axis=-1) # [60000, 28, 28, 1]
self.test_data = np.expand_dims(self.test_data.astype(np.float32) / 255.0, axis=-1) # [10000, 28, 28, 1]
self.train_label = self.train_label.astype(np.int32) # [60000]
self.test_label = self.test_label.astype(np.int32) # [10000]
self.num_train_data, self.num_test_data = self.train_data.shape[0], self.test_data.shape[0]
except Exception as e:
print(f"数据集加载失败:{e}")
raise # 终止程序,避免后续步骤出错
def get_batch(self, batch_size_param):
# 随机获取批次数据
index = np.random.randint(0, self.num_train_data, batch_size_param)
return self.train_data[index, :], self.train_label[index]
def get_calib_data(self, calib_num):
# 生成Vitis AI量化所需的校准数据(取训练集前N张,无标签)
return self.train_data[:calib_num]
# ============== 模型构建修改点1:改用函数式API构建模型(解决子类模型量化问题)==============
# 删除原CNN类,改用函数式API构建模型(确保图结构明确,避免量化警告)
def build_cnn_model():
"""构建CNN模型(函数式API方式)"""
# 明确定义输入层(名称与量化要求一致)
inputs = tf.keras.Input(shape=(28, 28, 1), name='mnist_input')
# 卷积层1
x = tf.keras.layers.Conv2D(
filters=32,
kernel_size=(5, 5),
padding='same',
activation='relu',
name='conv1'
)(inputs)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2, name='pool1')(x)
# 卷积层2
x = tf.keras.layers.Conv2D(
filters=64,
kernel_size=(5, 5),
padding='same',
activation='relu',
name='conv2'
)(x)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2, name='pool2')(x)
# 全连接层
x = tf.keras.layers.Flatten(name='flatten')(x)
x = tf.keras.layers.Dense(units=1024, activation='relu', name='dense1')(x)
x = tf.keras.layers.Dense(units=10, name='dense2')(x)
# 输出层(明确命名)
outputs = tf.keras.layers.Softmax(name='softmax_output')(x)
return tf.keras.Model(inputs=inputs, outputs=outputs, name='mnist_cnn')
# ==============================================================================
# 主程序执行========================================================================
if __name__ == "__main__":
# 打印TensorFlow版本(Vitis AI对TF版本有严格要求,需匹配)
print(f"当前TensorFlow版本:{tf.__version__}")
print(f"建议使用TensorFlow 2.8/2.9(匹配Vitis AI 3.0/2.5)\n")
# 1. 初始化参数
num_epochs = 5
batch_size = 50
learning_rate = 0.001
# 2. 加载数据
data_loader = MNISTLoader(MNIST_DATA_PATH)
# 3. 构建模型(使用新的函数式API模型)
model = build_cnn_model()
# ============== 修改点2:添加模型编译步骤(解决保存警告)==============
# 显式编译模型(即使训练已完成,避免保存时出现"No training configuration"警告)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy']
)
# =================================================================
# 4. 模型训练
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
num_batches = int(data_loader.num_train_data // batch_size * num_epochs)
print("\n========== 开始训练模型 ==========")
for batch_index in range(num_batches):
X, y = data_loader.get_batch(batch_size)
with tf.GradientTape() as tape:
y_pred = model(X)
loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y, y_pred=y_pred)
loss = tf.reduce_mean(loss)
if batch_index % 100 == 0: # 每100批次打印一次,减少输出
print(f"batch {batch_index}: loss {loss.numpy():.4f}")
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# 5. 模型评估
print("\n========== 开始评估模型 ==========")
test_loss, test_acc = model.evaluate(
data_loader.test_data,
data_loader.test_label,
batch_size=batch_size,
verbose=2
)
print(f"test accuracy: {test_acc:.4f}")
# 6. 导出模型为SavedModel格式(Vitis AI量化必需)
print("\n========== 导出模型(SavedModel格式) ==========")
# ============== 修改点3:优化模型签名(解决追踪问题)==============
# 触发一次推理以构建完整计算图(解决"Found untraced functions"警告)
_ = model.predict(data_loader.train_data[:1]) # 使用单个样本
# 使用函数式API的模型可以直接保存
saved_model_dir = os.path.join(OUTPUT_ROOT, "mnist_cnn_savedmodel")
try:
# 直接保存模型(函数式API模型支持完整签名)
tf.keras.models.save_model(
model,
saved_model_dir,
save_format='tf'
)
print(f"模型已导出到:{saved_model_dir}")
except Exception as e:
print(f"模型导出失败:{e}")
raise
# =================================================================
# 7. 生成Vitis AI量化所需的校准数据(关键步骤)
print("\n========== 生成校准数据(Vitis AI量化用) ==========")
calib_data = data_loader.get_calib_data(CALIB_DATA_NUM)
calib_data_path = os.path.join(OUTPUT_ROOT, "calib_data.npy")
try:
np.save(calib_data_path, calib_data)
# 校准数据验证,确保格式符合Vitis AI要求
print(f"校准数据已保存到:{calib_data_path}")
print(f"校准数据形状:{calib_data.shape}({CALIB_DATA_NUM}张28x28x1的MNIST图像)")
print(f"校准数据类型:{calib_data.dtype}(Vitis AI要求float32)")
except Exception as e:
print(f"校准数据保存失败:{e}")
raise
# 8. 输出模型输入/输出节点名称(量化时必须指定,核心!)
print("\n========== Vitis AI量化关键信息 ==========")
# ============== 修改点4:直接获取输入输出名称(函数式API模型)==============
print(f"输入节点名称:{model.input_names[0]}") # 应为'mnist_input'
print(f"输出节点名称:{model.output_names[0]}") # 应为'softmax_output'
print(f"输入形状:{model.input_shape}(None表示任意batch size)")
# =================================================================
# 打印SavedModel的签名信息,方便量化时核对
print(f"\n可通过以下命令查看SavedModel签名(Linux/WSL2):")
print(f" saved_model_cli show --dir {saved_model_dir} --all")
最新发布