首先下载github仓库源代码:https://github.com/DylanWusee/pointconv_pytorch/tree/master
没有提供预训练模型,需要自己进行训练。
onnxruntime部署
onnx模型导出:
import torch
from model.pointconv import PointConvDensityClsSsg as PointConvClsSsg
num_class = 40
checkpoint = torch.load("./checkpoints/checkpoint.pth", weights_only=False)
classifier = PointConvClsSsg(num_class).cuda()
classifier.load_state_dict(checkpoint['model_state_dict'])
points, normals = torch.randn(32, 3, 1024).cuda(), torch.randn(32, 3, 1024).cuda()
torch.onnx.export(classifier, (points, normals), "model.onnx", opset_version = 13,
input_names=["points", "normals"], output_names=["pred"],
dynamic_axes = {"points": { 0: "batch_size"}, "normals": { 0: "batch_size"}, "pred": { 0: "batch_size"}})
onnx模型推理python脚本:
import numpy as np
import onnxruntime
npoints = 1024
onnx_session = onnxruntime.InferenceSession("model.onnx", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
input_name = []
for node in onnx_session.get_inputs():
input_name.append(node.name)
cloud = np.loadtxt("./data/modelnet40_normal_resampled/bed_0610.txt", delimiter=",")
cloud = cloud[0:npoints, :]
cloud = cloud.reshape(1, npoints, 6)
cloud = cloud.transpose(0, 2, 1)
points, normals = cloud[:, :3, :], cloud[:, 3:, :]
inputs = {}
inputs['points'] = points.astype(np.float32)
inputs['normals'] = normals.astype(np.float32)
outputs = onnx_session.run(None, inputs)
print(np.argmax(outputs[0].squeeze()))
tensorrt部署
onnx模型转tensorrt模型:
trtexec --onnx=model.onnx -saveEngine=model.engine
tensorrt模型推理脚本:
import numpy as np
import tensorrt as trt
import common
logger = trt.Logger(trt.Logger.WARNING)
with open("model.engine", "rb") as f, trt.Runtime(logger) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
context = engine.create_execution_context()
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
npoints = 1024
cloud = np.loadtxt("./data/modelnet40_normal_resampled/bed_0610.txt", delimiter=",")
cloud = cloud[0:npoints, :]
cloud = cloud.reshape(1, npoints, 6)
cloud = cloud.transpose(0, 2, 1)
points, normals = cloud[:, :3, :], cloud[:, 3:, :]
np.copyto(inputs[0].host, points.ravel())
np.copyto(inputs[1].host, normals.ravel())
output = common.do_inference(context,engine=engine, bindings=bindings,inputs=inputs, outputs=outputs, stream=stream)
print(np.argmax(outputs[0].host))

4459

被折叠的 条评论
为什么被折叠?



