Traceback (most recent call last):
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\utils\cpp_extension.py", line 2107, in _run_ninja_build
subprocess.run(
File "D:\anaconda\envs\pytorch2.3.1\lib\subprocess.py", line 528, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command '['ninja', '-v']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "D:\pycharm\pythonProject\ConvolutionalNeuralOperator-main\TrainCNO.py", line 160, in <module>
output_pred_batch = model(input_batch)
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\pycharm\pythonProject\ConvolutionalNeuralOperator-main\CNOModule.py", line 476, in forward
x = self.lift(x)
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\pycharm\pythonProject\ConvolutionalNeuralOperator-main\CNOModule.py", line 151, in forward
x = self.inter_CNOBlock(x)
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\pycharm\pythonProject\ConvolutionalNeuralOperator-main\CNOModule.py", line 104, in forward
return self.activation(x)
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\pycharm\pythonProject\ConvolutionalNeuralOperator-main\training\filtered_networks.py", line 392, in forward
x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype),
File "D:\pycharm\pythonProject\ConvolutionalNeuralOperator-main\torch_utils\ops\filtered_lrelu.py", line 114, in filtered_lrelu
if impl == 'cuda' and x.device.type == 'cuda' and _init():
File "D:\pycharm\pythonProject\ConvolutionalNeuralOperator-main\torch_utils\ops\filtered_lrelu.py", line 26, in _init
_plugin = custom_ops.get_plugin(
File "D:\pycharm\pythonProject\ConvolutionalNeuralOperator-main\torch_utils\custom_ops.py", line 136, in get_plugin
torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\utils\cpp_extension.py", line 1309, in load
return _jit_compile(
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\utils\cpp_extension.py", line 1719, in _jit_compile
_write_ninja_file_and_build_library(
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\utils\cpp_extension.py", line 1832, in _write_ninja_file_and_build_library
_run_ninja_build(
File "D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\utils\cpp_extension.py", line 2123, in _run_ninja_build
raise RuntimeError(message) from e
RuntimeError: Error building extension 'filtered_lrelu_plugin': [1/4] C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\bin\nvcc --generate-dependencies-with-compile --dependency-output filtered_lrelu_ns.cuda.o.d -Xcudafe --diag_suppress=dll_interface_conflict_dllexport_assumed -Xcudafe --diag_suppress=dll_interface_conflict_none_assumed -Xcudafe --diag_suppress=field_without_dll_interface -Xcudafe --diag_suppress=base_class_has_different_dll_interface -Xcompiler /EHsc -Xcompiler /wd4068 -Xcompiler /wd4067 -Xcompiler /wd4624 -Xcompiler /wd4190 -Xcompiler /wd4018 -Xcompiler /wd4275 -Xcompiler /wd4267 -Xcompiler /wd4244 -Xcompiler /wd4251 -Xcompiler /wd4819 -Xcompiler /MD -DTORCH_EXTENSION_NAME=filtered_lrelu_plugin -DTORCH_API_INCLUDE_EXTENSION_H -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\torch\csrc\api\include -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\TH -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\THC "-IC:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\include" -ID:\anaconda\envs\pytorch2.3.1\Include -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_89,code=compute_89 -gencode=arch=compute_89,code=sm_89 -std=c++17 --use_fast_math --allow-unsupported-compiler -c C:\Users\Administrator\AppData\Local\torch_extensions\torch_extensions\Cache\py39_cu118\filtered_lrelu_plugin\2e9606d7cf844ec44b9f500eaacd35c0-nvidia-geforce-rtx-4060-ti\filtered_lrelu_ns.cu -o filtered_lrelu_ns.cuda.o
[2/4] C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\bin\nvcc --generate-dependencies-with-compile --dependency-output filtered_lrelu_rd.cuda.o.d -Xcudafe --diag_suppress=dll_interface_conflict_dllexport_assumed -Xcudafe --diag_suppress=dll_interface_conflict_none_assumed -Xcudafe --diag_suppress=field_without_dll_interface -Xcudafe --diag_suppress=base_class_has_different_dll_interface -Xcompiler /EHsc -Xcompiler /wd4068 -Xcompiler /wd4067 -Xcompiler /wd4624 -Xcompiler /wd4190 -Xcompiler /wd4018 -Xcompiler /wd4275 -Xcompiler /wd4267 -Xcompiler /wd4244 -Xcompiler /wd4251 -Xcompiler /wd4819 -Xcompiler /MD -DTORCH_EXTENSION_NAME=filtered_lrelu_plugin -DTORCH_API_INCLUDE_EXTENSION_H -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\torch\csrc\api\include -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\TH -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\THC "-IC:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\include" -ID:\anaconda\envs\pytorch2.3.1\Include -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_89,code=compute_89 -gencode=arch=compute_89,code=sm_89 -std=c++17 --use_fast_math --allow-unsupported-compiler -c C:\Users\Administrator\AppData\Local\torch_extensions\torch_extensions\Cache\py39_cu118\filtered_lrelu_plugin\2e9606d7cf844ec44b9f500eaacd35c0-nvidia-geforce-rtx-4060-ti\filtered_lrelu_rd.cu -o filtered_lrelu_rd.cuda.o
[3/4] C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\bin\nvcc --generate-dependencies-with-compile --dependency-output filtered_lrelu_wr.cuda.o.d -Xcudafe --diag_suppress=dll_interface_conflict_dllexport_assumed -Xcudafe --diag_suppress=dll_interface_conflict_none_assumed -Xcudafe --diag_suppress=field_without_dll_interface -Xcudafe --diag_suppress=base_class_has_different_dll_interface -Xcompiler /EHsc -Xcompiler /wd4068 -Xcompiler /wd4067 -Xcompiler /wd4624 -Xcompiler /wd4190 -Xcompiler /wd4018 -Xcompiler /wd4275 -Xcompiler /wd4267 -Xcompiler /wd4244 -Xcompiler /wd4251 -Xcompiler /wd4819 -Xcompiler /MD -DTORCH_EXTENSION_NAME=filtered_lrelu_plugin -DTORCH_API_INCLUDE_EXTENSION_H -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\torch\csrc\api\include -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\TH -ID:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\include\THC "-IC:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\include" -ID:\anaconda\envs\pytorch2.3.1\Include -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_89,code=compute_89 -gencode=arch=compute_89,code=sm_89 -std=c++17 --use_fast_math --allow-unsupported-compiler -c C:\Users\Administrator\AppData\Local\torch_extensions\torch_extensions\Cache\py39_cu118\filtered_lrelu_plugin\2e9606d7cf844ec44b9f500eaacd35c0-nvidia-geforce-rtx-4060-ti\filtered_lrelu_wr.cu -o filtered_lrelu_wr.cuda.o
[4/4] "D:\Microsoft Visual Studio\2019\Professional\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64/link.exe" filtered_lrelu.o filtered_lrelu_wr.cuda.o filtered_lrelu_rd.cuda.o filtered_lrelu_ns.cuda.o /nologo /DLL c10.lib c10_cuda.lib torch_cpu.lib torch_cuda.lib -INCLUDE:?warp_size@cuda@at@@YAHXZ torch.lib /LIBPATH:D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\lib torch_python.lib /LIBPATH:D:\anaconda\envs\pytorch2.3.1\libs "/LIBPATH:C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\lib\x64" cudart.lib /out:filtered_lrelu_plugin.pyd
FAILED: filtered_lrelu_plugin.pyd
"D:\Microsoft Visual Studio\2019\Professional\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64/link.exe" filtered_lrelu.o filtered_lrelu_wr.cuda.o filtered_lrelu_rd.cuda.o filtered_lrelu_ns.cuda.o /nologo /DLL c10.lib c10_cuda.lib torch_cpu.lib torch_cuda.lib -INCLUDE:?warp_size@cuda@at@@YAHXZ torch.lib /LIBPATH:D:\anaconda\envs\pytorch2.3.1\lib\site-packages\torch\lib torch_python.lib /LIBPATH:D:\anaconda\envs\pytorch2.3.1\libs "/LIBPATH:C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\lib\x64" cudart.lib /out:filtered_lrelu_plugin.pyd
正在创建库 filtered_lrelu_plugin.lib 和对象 filtered_lrelu_plugin.exp
filtered_lrelu.o : error LNK2019: 无法解析的外部符号 __std_find_trivial_1,函数 "char const * __cdecl std::_Find_vectorized<char const ,char>(char const * const,char const * const,char)" (??$_Find_vectorized@$$CBDD@std@@YAPEBDQEBD0D@Z) 中引用了该符号
ninja: build stopped: subcommand failed.这是怎么回事
最新发布