/home/re/dev/sox/sox.py:182: DeprecationWarning: You are using the legacy TorchScript-based ONNX export. Starting in PyTorch 2.9, the new torch.export-based ONNX exporter will be the default. To switch now, set dynamo=True in torch.onnx.export. This new exporter supports features like exporting LLMs with DynamicCache. We encourage you to try it and share feedback to help improve the experience. Learn more about the new export logic: https://pytorch.org/docs/stable/onnx_dynamo.html. For exporting control flow: https://pytorch.org/tutorials/beginner/onnx/export_control_flow_model_to_onnx_tutorial.html.
torch.onnx.export(model, input_sample, "soc_model_weights.onnx", input_names=["input"], output_names=["output"], verbose=True)
Exported graph: graph(%input : Float(1, 4, strides=[4, 1], requires_grad=0, device=cpu),
%fc1.weight : Float(256, 4, strides=[4, 1], requires_grad=1, device=cpu),
%fc1.bias : Float(256, strides=[1], requires_grad=1, device=cpu),
%bn1.weight : Float(256, strides=[1], requires_grad=1, device=cpu),
%bn1.bias : Float(256, strides=[1], requires_grad=1, device=cpu),
%bn1.running_mean : Float(256, strides=[1], requires_grad=0, device=cpu),
%bn1.running_var : Float(256, strides=[1], requires_grad=0, device=cpu),
%fc2.weight : Float(128, 256, strides=[256, 1], requires_grad=1, device=cpu),
%fc2.bias : Float(128, strides=[1], requires_grad=1, device=cpu),
%bn2.weight : Float(128, strides=[1], requires_grad=1, device=cpu),
%bn2.bias : Float(128, strides=[1], requires_grad=1, device=cpu),
%bn2.running_mean : Float(128, strides=[1], requires_grad=0, device=cpu),
%bn2.running_var : Float(128, strides=[1], requires_grad=0, device=cpu),
%fc3.weight : Float(64, 128, strides=[128, 1], requires_grad=1, device=cpu),
%fc3.bias : Float(64, strides=[1], requires_grad=1, device=cpu),
%bn3.weight : Float(64, strides=[1], requires_grad=1, device=cpu),
%bn3.bias : Float(64, strides=[1], requires_grad=1, device=cpu),
%bn3.running_mean : Float(64, strides=[1], requires_grad=0, device=cpu),
%bn3.running_var : Float(64, strides=[1], requires_grad=0, device=cpu),
%fc4.weight : Float(32, 64, strides=[64, 1], requires_grad=1, device=cpu),
%fc4.bias : Float(32, strides=[1], requires_grad=1, device=cpu),
%bn4.weight : Float(32, strides=[1], requires_grad=1, device=cpu),
%bn4.bias : Float(32, strides=[1], requires_grad=1, device=cpu),
%bn4.running_mean : Float(32, strides=[1], requires_grad=0, device=cpu),
%bn4.running_var : Float(32, strides=[1], requires_grad=0, device=cpu),
%fc5.weight : Float(1, 32, strides=[32, 1], requires_grad=1, device=cpu),
%fc5.bias : Float(1, strides=[1], requires_grad=1, device=cpu)):
%/fc1/Gemm_output_0 : Float(1, 256, strides=[256, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/fc1/Gemm"](%input, %fc1.weight, %fc1.bias), scope: __main__.BatterySOCModel::/torch.nn.modules.linear.Linear::fc1 # /home/re/.local/lib/python3.10/site-packages/torch/nn/modules/linear.py:125:0
%/bn1/BatchNormalization_output_0 : Float(1, 256, strides=[256, 1], requires_grad=1, device=cpu) = onnx::BatchNormalization[epsilon=1.0000000000000001e-05, momentum=0.90000000000000002, training_mode=0, onnx_name="/bn1/BatchNormalization"](%/fc1/Gemm_output_0, %bn1.weight, %bn1.bias, %bn1.running_mean, %bn1.running_var), scope: __main__.BatterySOCModel::/torch.nn.modules.batchnorm.BatchNorm1d::bn1 # /home/re/.local/lib/python3.10/site-packages/torch/nn/functional.py:2817:0
%/Relu_output_0 : Float(1, 256, strides=[256, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/Relu"](%/bn1/BatchNormalization_output_0), scope: __main__.BatterySOCModel:: # /home/re/dev/sox/sox.py:36:0
%/fc2/Gemm_output_0 : Float(1, 128, strides=[128, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/fc2/Gemm"](%/Relu_output_0, %fc2.weight, %fc2.bias), scope: __main__.BatterySOCModel::/torch.nn.modules.linear.Linear::fc2 # /home/re/.local/lib/python3.10/site-packages/torch/nn/modules/linear.py:125:0
%/bn2/BatchNormalization_output_0 : Float(1, 128, strides=[128, 1], requires_grad=1, device=cpu) = onnx::BatchNormalization[epsilon=1.0000000000000001e-05, momentum=0.90000000000000002, training_mode=0, onnx_name="/bn2/BatchNormalization"](%/fc2/Gemm_output_0, %bn2.weight, %bn2.bias, %bn2.running_mean, %bn2.running_var), scope: __main__.BatterySOCModel::/torch.nn.modules.batchnorm.BatchNorm1d::bn2 # /home/re/.local/lib/python3.10/site-packages/torch/nn/functional.py:2817:0
%/Relu_1_output_0 : Float(1, 128, strides=[128, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/Relu_1"](%/bn2/BatchNormalization_output_0), scope: __main__.BatterySOCModel:: # /home/re/dev/sox/sox.py:38:0
%/fc3/Gemm_output_0 : Float(1, 64, strides=[64, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/fc3/Gemm"](%/Relu_1_output_0, %fc3.weight, %fc3.bias), scope: __main__.BatterySOCModel::/torch.nn.modules.linear.Linear::fc3 # /home/re/.local/lib/python3.10/site-packages/torch/nn/modules/linear.py:125:0
%/bn3/BatchNormalization_output_0 : Float(1, 64, strides=[64, 1], requires_grad=1, device=cpu) = onnx::BatchNormalization[epsilon=1.0000000000000001e-05, momentum=0.90000000000000002, training_mode=0, onnx_name="/bn3/BatchNormalization"](%/fc3/Gemm_output_0, %bn3.weight, %bn3.bias, %bn3.running_mean, %bn3.running_var), scope: __main__.BatterySOCModel::/torch.nn.modules.batchnorm.BatchNorm1d::bn3 # /home/re/.local/lib/python3.10/site-packages/torch/nn/functional.py:2817:0
%/Relu_2_output_0 : Float(1, 64, strides=[64, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/Relu_2"](%/bn3/BatchNormalization_output_0), scope: __main__.BatterySOCModel:: # /home/re/dev/sox/sox.py:40:0
%/fc4/Gemm_output_0 : Float(1, 32, strides=[32, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/fc4/Gemm"](%/Relu_2_output_0, %fc4.weight, %fc4.bias), scope: __main__.BatterySOCModel::/torch.nn.modules.linear.Linear::fc4 # /home/re/.local/lib/python3.10/site-packages/torch/nn/modules/linear.py:125:0
%/bn4/BatchNormalization_output_0 : Float(1, 32, strides=[32, 1], requires_grad=1, device=cpu) = onnx::BatchNormalization[epsilon=1.0000000000000001e-05, momentum=0.90000000000000002, training_mode=0, onnx_name="/bn4/BatchNormalization"](%/fc4/Gemm_output_0, %bn4.weight, %bn4.bias, %bn4.running_mean, %bn4.running_var), scope: __main__.BatterySOCModel::/torch.nn.modules.batchnorm.BatchNorm1d::bn4 # /home/re/.local/lib/python3.10/site-packages/torch/nn/functional.py:2817:0
%/Relu_3_output_0 : Float(1, 32, strides=[32, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/Relu_3"](%/bn4/BatchNormalization_output_0), scope: __main__.BatterySOCModel:: # /home/re/dev/sox/sox.py:42:0
%/fc5/Gemm_output_0 : Float(1, 1, strides=[1, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/fc5/Gemm"](%/Relu_3_output_0, %fc5.weight, %fc5.bias), scope: __main__.BatterySOCModel::/torch.nn.modules.linear.Linear::fc5 # /home/re/.local/lib/python3.10/site-packages/torch/nn/modules/linear.py:125:0
%output : Float(1, 1, strides=[1, 1], requires_grad=1, device=cpu) = onnx::Sigmoid[onnx_name="/Sigmoid"](%/fc5/Gemm_output_0), scope: __main__.BatterySOCModel:: # /home/re/dev/sox/sox.py:43:0
return (%output)
最新发布