import os
import numpy as np
from onnxruntime.quantization import CalibrationDataReader, quantize_static, QuantType, QuantFormat, CalibrationMethod
import cv2
# Class for Callibration Data reading
class ImageCalibrationDataReader(CalibrationDataReader):
def __init__(self, image_paths):
# self.image_paths = image_paths # 单张图片
self.image_paths = self._get_all_images(image_paths, (".jpg", ".png"))#文件夹
self.idx = 0
self.input_name = 'input'
def _get_all_images(self, image_dir, valid_extensions):
"""获取文件夹中所有符合条件的图像文件路径"""
image_paths = []
# 遍历文件夹中的所有文件
for filename in os.listdir(image_dir):
# 检查文件扩展名是否符合要求
if filename.lower().endswith(valid_extensions):
# 构建完整文件路径
full_path = os.path.join(image_dir, filename)
image_paths.append(full_path)
return image_paths
def preprocess(self, frame):
# Same preprocessing that you do before feeding it to the model
frame = cv2.imread(frame)
X = cv2.resize(frame, (640, 640))
# image_data = np.array(X).astype(np.float32) / 255.0 # Normalize to [0, 1] range
image_data = np.array(X).astype(np.float32)
image_data = np.transpose(image_data, (2, 0, 1)) # (H, W, C) -> (C, H, W)
image_data = np.expand_dims(image_data, axis=0) # Add batch dimension
return image_data
def get_next(self):
# method to iterate through the data set
if self.idx >= len(self.image_paths):
return None
image_path = self.image_paths[self.idx]
input_data = self.preprocess(image_path)
self.idx += 1
return {self.input_name: input_data}
# Assuming you have a list of image paths for calibration
calibration_image_paths = [r"F:\man_data\mpii_human_pose_v1\images512\000004812.jpg"] # you can add more of the image paths
# # Create an instance of the ImageCalibrationDataReader
calibration_data_reader = ImageCalibrationDataReader(calibration_image_paths)
# Use the calibration_data_reader with quantize_static
quantize_static(
# r"yolov8n.onnx", "yolov8n_int8(3).onnx",
r"E:\Nexwise_Internship\mmdeploy\export\rtmo-s_8xb32-600e_body7-640x640.onnx",r"E:\Nexwise_Internship\mmdeploy\export\rtmo-s_8xb32-600e_body7-640x640_nonmsint8.onnx",
weight_type=QuantType.QInt8,
activation_type=QuantType.QUInt8,
calibration_data_reader=calibration_data_reader,
quant_format=QuantFormat.QDQ,
nodes_to_exclude=['/model.22/Concat_3', '/model.22/Split', '/model.22/Sigmoid'
'/model.22/dfl/Reshape', '/model.22/dfl/Transpose', '/model.22/dfl/Softmax',
'/model.22/dfl/conv/Conv', '/model.22/dfl/Reshape_1', '/model.22/Slice_1',
'/model.22/Slice', '/model.22/Add_1', '/model.22/Sub', '/model.22/Div_1',
'/model.22/Concat_4', '/model.22/Mul_2', '/model.22/Concat_5'],
per_channel=False,
calibrate_method=CalibrationMethod.Entropy,
reduce_range=True,)
yolov8.onnx的int8量化代码(去掉检测头)
最新推荐文章于 2025-12-17 09:28:41 发布
部署运行你感兴趣的模型镜像
您可能感兴趣的与本文相关的镜像
Yolo-v5
Yolo
YOLO(You Only Look Once)是一种流行的物体检测和图像分割模型,由华盛顿大学的Joseph Redmon 和Ali Farhadi 开发。 YOLO 于2015 年推出,因其高速和高精度而广受欢迎
4232

被折叠的 条评论
为什么被折叠?



