PS C:\Users\Administrator\Desktop> cd E:\PyTorch_Build\pytorch
PS E:\PyTorch_Build\pytorch> # 1. 激活虚拟环境
PS E:\PyTorch_Build\pytorch> .\pytorch_env\Scripts\activate
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 2. 修复conda路径(执行一次即可)
(pytorch_env) PS E:\PyTorch_Build\pytorch> $condaPath = "${env:USERPROFILE}\miniconda3\Scripts"
(pytorch_env) PS E:\PyTorch_Build\pytorch> $env:PATH += ";$condaPath"
(pytorch_env) PS E:\PyTorch_Build\pytorch> [Environment]::SetEnvironmentVariable("PATH", $env:PATH, "Machine")
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 3. 验证修复
(pytorch_env) PS E:\PyTorch_Build\pytorch> conda --version # 应显示conda版本
conda: The term 'conda' is not recognized as a name of a cmdlet, function, script file, or executable program.
Check the spelling of the name, or if a path was included, verify that the path is correct and try again.
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 1. 安装正确版本的MKL
(pytorch_env) PS E:\PyTorch_Build\pytorch> pip uninstall -y mkl-static mkl-include
Found existing installation: mkl-static 2024.1.0
Uninstalling mkl-static-2024.1.0:
Successfully uninstalled mkl-static-2024.1.0
Found existing installation: mkl-include 2024.1.0
Uninstalling mkl-include-2024.1.0:
Successfully uninstalled mkl-include-2024.1.0
(pytorch_env) PS E:\PyTorch_Build\pytorch> pip install mkl-static==2024.1 mkl-include==2024.1
Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple
Collecting mkl-static==2024.1
Using cached https://pypi.tuna.tsinghua.edu.cn/packages/d8/f0/3b9976df82906d8f3244213b6d8beb67cda19ab5b0645eb199da3c826127/mkl_static-2024.1.0-py2.py3-none-win_amd64.whl (220.8 MB)
Collecting mkl-include==2024.1
Using cached https://pypi.tuna.tsinghua.edu.cn/packages/06/1b/f05201146f7f12bf871fa2c62096904317447846b5d23f3560a89b4bbaae/mkl_include-2024.1.0-py2.py3-none-win_amd64.whl (1.3 MB)
Requirement already satisfied: intel-openmp==2024.* in e:\pytorch_build\pytorch\pytorch_env\lib\site-packages (from mkl-static==2024.1) (2024.2.1)
Requirement already satisfied: tbb-devel==2021.* in e:\pytorch_build\pytorch\pytorch_env\lib\site-packages (from mkl-static==2024.1) (2021.13.1)
Requirement already satisfied: intel-cmplr-lib-ur==2024.2.1 in e:\pytorch_build\pytorch\pytorch_env\lib\site-packages (from intel-openmp==2024.*->mkl-static==2024.1) (2024.2.1)
Requirement already satisfied: tbb==2021.13.1 in e:\pytorch_build\pytorch\pytorch_env\lib\site-packages (from tbb-devel==2021.*->mkl-static==2024.1) (2021.13.1)
Installing collected packages: mkl-include, mkl-static
Successfully installed mkl-include-2024.1.0 mkl-static-2024.1.0
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 2. 安装libuv
(pytorch_env) PS E:\PyTorch_Build\pytorch> conda install -c conda-forge libuv=1.46
conda: The term 'conda' is not recognized as a name of a cmdlet, function, script file, or executable program.
Check the spelling of the name, or if a path was included, verify that the path is correct and try again.
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 3. 安装OpenSSL
(pytorch_env) PS E:\PyTorch_Build\pytorch> conda install -c conda-forge openssl=3.1
conda: The term 'conda' is not recognized as a name of a cmdlet, function, script file, or executable program.
Check the spelling of the name, or if a path was included, verify that the path is correct and try again.
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 4. 验证安装
(pytorch_env) PS E:\PyTorch_Build\pytorch> python -c "import mkl; print('MKL版本:', mkl.__version__)"
Traceback (most recent call last):
File "<string>", line 1, in <module>
ModuleNotFoundError: No module named 'mkl'
(pytorch_env) PS E:\PyTorch_Build\pytorch> conda list | Select-String "libuv|openssl"
conda: The term 'conda' is not recognized as a name of a cmdlet, function, script file, or executable program.
Check the spelling of the name, or if a path was included, verify that the path is correct and try again.
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 验证所有关键组件
(pytorch_env) PS E:\PyTorch_Build\pytorch> python -c "import mkl; print('✓ MKL已安装')"
Traceback (most recent call last):
File "<string>", line 1, in <module>
ModuleNotFoundError: No module named 'mkl'
(pytorch_env) PS E:\PyTorch_Build\pytorch> conda list | Select-String "libuv|openssl"
conda: The term 'conda' is not recognized as a name of a cmdlet, function, script file, or executable program.
Check the spelling of the name, or if a path was included, verify that the path is correct and try again.
(pytorch_env) PS E:\PyTorch_Build\pytorch> dir "E:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0\bin\cudnn*"
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 验证环境变量
(pytorch_env) PS E:\PyTorch_Build\pytorch> python -c "import os; print('环境变量检查:');
>> print('CUDNN_PATH:', os.getenv('CUDA_PATH'));
>> print('CONDA_PREFIX:', os.getenv('CONDA_PREFIX'))"
环境变量检查:
CUDNN_PATH: E:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0
CONDA_PREFIX: None
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 清理并重建
(pytorch_env) PS E:\PyTorch_Build\pytorch> Remove-Item -Recurse -Force build
(pytorch_env) PS E:\PyTorch_Build\pytorch> python setup.py install
Building wheel torch-2.9.0a0+git2d31c3d
-- Building version 2.9.0a0+git2d31c3d
E:\PyTorch_Build\pytorch\pytorch_env\lib\site-packages\setuptools\_distutils\_msvccompiler.py:12: UserWarning: _get_vc_env is private; find an alternative (pypa/distutils#340)
warnings.warn(
-- Checkout nccl release tag: v2.27.5-1
cmake -GNinja -DBUILD_PYTHON=True -DBUILD_TEST=True -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=E:\PyTorch_Build\pytorch\torch -DCMAKE_PREFIX_PATH=E:\PyTorch_Build\pytorch\pytorch_env\Lib\site-packages -DPython_EXECUTABLE=E:\PyTorch_Build\pytorch\pytorch_env\Scripts\python.exe -DTORCH_BUILD_VERSION=2.9.0a0+git2d31c3d -DUSE_NUMPY=True E:\PyTorch_Build\pytorch
CMake Deprecation Warning at CMakeLists.txt:18 (cmake_policy):
The OLD behavior for policy CMP0126 will be removed from a future version
of CMake.
The cmake-policies(7) manual explains that the OLD behaviors of all
policies are deprecated and that a policy should be set to OLD only under
specific short-term circumstances. Projects should be ported to the NEW
behavior and not rely on setting a policy to OLD.
-- The CXX compiler identification is MSVC 19.44.35215.0
-- The C compiler identification is MSVC 19.44.35215.0
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64/cl.exe - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64/cl.exe - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Not forcing any particular BLAS to be found
CMake Warning at CMakeLists.txt:425 (message):
TensorPipe cannot be used on Windows. Set it to OFF
CMake Warning at CMakeLists.txt:427 (message):
KleidiAI cannot be used on Windows. Set it to OFF
CMake Warning at CMakeLists.txt:439 (message):
Libuv is not installed in current conda env. Set USE_DISTRIBUTED to OFF.
Please run command 'conda install -c conda-forge libuv=1.39' to install
libuv.
-- Performing Test C_HAS_AVX_1
-- Performing Test C_HAS_AVX_1 - Success
-- Performing Test C_HAS_AVX2_1
-- Performing Test C_HAS_AVX2_1 - Success
-- Performing Test C_HAS_AVX512_1
-- Performing Test C_HAS_AVX512_1 - Success
-- Performing Test CXX_HAS_AVX_1
-- Performing Test CXX_HAS_AVX_1 - Success
-- Performing Test CXX_HAS_AVX2_1
-- Performing Test CXX_HAS_AVX2_1 - Success
-- Performing Test CXX_HAS_AVX512_1
-- Performing Test CXX_HAS_AVX512_1 - Success
-- Current compiler supports avx2 extension. Will build perfkernels.
-- Performing Test COMPILER_SUPPORTS_HIDDEN_VISIBILITY
-- Performing Test COMPILER_SUPPORTS_HIDDEN_VISIBILITY - Failed
-- Performing Test COMPILER_SUPPORTS_HIDDEN_INLINE_VISIBILITY
-- Performing Test COMPILER_SUPPORTS_HIDDEN_INLINE_VISIBILITY - Failed
-- Could not find hardware support for NEON on this machine.
-- No OMAP3 processor on this machine.
-- No OMAP4 processor on this machine.
-- Compiler does not support SVE extension. Will not build perfkernels.
CMake Warning at CMakeLists.txt:845 (message):
x64 operating system is required for FBGEMM. Not compiling with FBGEMM.
Turn this warning off by USE_FBGEMM=OFF.
-- Performing Test HAS/UTF_8
-- Performing Test HAS/UTF_8 - Success
-- Found CUDA: E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0 (found version "13.0")
-- The CUDA compiler identification is NVIDIA 13.0.48 with host compiler MSVC 19.44.35215.0
-- Detecting CUDA compiler ABI info
-- Detecting CUDA compiler ABI info - done
-- Check for working CUDA compiler: E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/bin/nvcc.exe - skipped
-- Detecting CUDA compile features
-- Detecting CUDA compile features - done
-- Found CUDAToolkit: E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/include (found version "13.0.48")
-- PyTorch: CUDA detected: 13.0
-- PyTorch: CUDA nvcc is: E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/bin/nvcc.exe
-- PyTorch: CUDA toolkit directory: E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0
-- PyTorch: Header version is: 13.0
-- Found Python: E:\PyTorch_Build\pytorch\pytorch_env\Scripts\python.exe (found version "3.10.10") found components: Interpreter
CMake Warning at cmake/public/cuda.cmake:140 (message):
Failed to compute shorthash for libnvrtc.so
Call Stack (most recent call first):
cmake/Dependencies.cmake:44 (include)
CMakeLists.txt:873 (include)
-- Could NOT find CUDNN (missing: CUDNN_LIBRARY_PATH CUDNN_INCLUDE_PATH)
CMake Warning at cmake/public/cuda.cmake:201 (message):
Cannot find cuDNN library. Turning the option off
Call Stack (most recent call first):
cmake/Dependencies.cmake:44 (include)
CMakeLists.txt:873 (include)
-- Could NOT find CUSPARSELT (missing: CUSPARSELT_LIBRARY_PATH CUSPARSELT_INCLUDE_PATH)
CMake Warning at cmake/public/cuda.cmake:226 (message):
Cannot find cuSPARSELt library. Turning the option off
Call Stack (most recent call first):
cmake/Dependencies.cmake:44 (include)
CMakeLists.txt:873 (include)
-- Could NOT find CUDSS (missing: CUDSS_LIBRARY_PATH CUDSS_INCLUDE_PATH)
CMake Warning at cmake/public/cuda.cmake:242 (message):
Cannot find CUDSS library. Turning the option off
Call Stack (most recent call first):
cmake/Dependencies.cmake:44 (include)
CMakeLists.txt:873 (include)
-- USE_CUFILE is set to 0. Compiling without cuFile support
-- Autodetected CUDA architecture(s): 12.0
CMake Warning at cmake/public/cuda.cmake:317 (message):
pytorch is not compatible with `CMAKE_CUDA_ARCHITECTURES` and will ignore
its value. Please configure `TORCH_CUDA_ARCH_LIST` instead.
Call Stack (most recent call first):
cmake/Dependencies.cmake:44 (include)
CMakeLists.txt:873 (include)
-- Added CUDA NVCC flags for: -gencode;arch=compute_120,code=sm_120
CMake Warning at cmake/Dependencies.cmake:95 (message):
Not compiling with XPU. Could NOT find SYCL. Suppress this warning with
-DUSE_XPU=OFF.
Call Stack (most recent call first):
CMakeLists.txt:873 (include)
-- Building using own protobuf under third_party per request.
-- Use custom protobuf build.
CMake Warning at cmake/ProtoBuf.cmake:37 (message):
Ancient protobuf forces CMake compatibility
Call Stack (most recent call first):
cmake/ProtoBuf.cmake:87 (custom_protobuf_find)
cmake/Dependencies.cmake:107 (include)
CMakeLists.txt:873 (include)
CMake Deprecation Warning at third_party/protobuf/cmake/CMakeLists.txt:2 (cmake_minimum_required):
Compatibility with CMake < 3.10 will be removed from a future version of
CMake.
Update the VERSION argument <min> value. Or, use the <min>...<max> syntax
to tell CMake that the project requires at least <min> but has been updated
to work with policies introduced by <max> or earlier.
--
-- 3.13.0.0
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed
-- Looking for pthread_create in pthreads
-- Looking for pthread_create in pthreads - not found
-- Looking for pthread_create in pthread
-- Looking for pthread_create in pthread - not found
-- Found Threads: TRUE
-- Caffe2 protobuf include directory: $<BUILD_INTERFACE:E:/PyTorch_Build/pytorch/third_party/protobuf/src>$<INSTALL_INTERFACE:include>
-- Trying to find preferred BLAS backend of choice: MKL
-- MKL_THREADING = OMP
-- Looking for sys/types.h
-- Looking for sys/types.h - found
-- Looking for stdint.h
-- Looking for stdint.h - found
-- Looking for stddef.h
-- Looking for stddef.h - found
-- Check size of void*
-- Check size of void* - done
-- MKL_THREADING = OMP
CMake Warning at cmake/Dependencies.cmake:213 (message):
MKL could not be found. Defaulting to Eigen
Call Stack (most recent call first):
CMakeLists.txt:873 (include)
CMake Warning at cmake/Dependencies.cmake:279 (message):
Preferred BLAS (MKL) cannot be found, now searching for a general BLAS
library
Call Stack (most recent call first):
CMakeLists.txt:873 (include)
-- MKL_THREADING = OMP
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_sequential - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_sequential - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - libiomp5md - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - libiomp5md - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl - guide - pthread - m]
-- Library mkl: not found
-- MKL library not found
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Checking for [Accelerate]
-- Library Accelerate: BLAS_Accelerate_LIBRARY-NOTFOUND
-- Checking for [vecLib]
-- Library vecLib: BLAS_vecLib_LIBRARY-NOTFOUND
-- Checking for [flexiblas]
-- Library flexiblas: BLAS_flexiblas_LIBRARY-NOTFOUND
-- Checking for [openblas]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m - gomp]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [libopenblas]
-- Library libopenblas: BLAS_libopenblas_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran - pthread]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [acml - gfortran]
-- Library acml: BLAS_acml_LIBRARY-NOTFOUND
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Could NOT find Atlas (missing: Atlas_CBLAS_INCLUDE_DIR Atlas_CLAPACK_INCLUDE_DIR Atlas_CBLAS_LIBRARY Atlas_BLAS_LIBRARY Atlas_LAPACK_LIBRARY)
-- Checking for [ptf77blas - atlas - gfortran]
-- Library ptf77blas: BLAS_ptf77blas_LIBRARY-NOTFOUND
-- Checking for []
-- Looking for sgemm_
-- Looking for sgemm_ - not found
-- Cannot find a library with BLAS API. Not using BLAS.
-- Using pocketfft in directory: E:/PyTorch_Build/pytorch/third_party/pocketfft/
CMake Deprecation Warning at third_party/pthreadpool/CMakeLists.txt:1 (CMAKE_MINIMUM_REQUIRED):
Compatibility with CMake < 3.10 will be removed from a future version of
CMake.
Update the VERSION argument <min> value. Or, use the <min>...<max> syntax
to tell CMake that the project requires at least <min> but has been updated
to work with policies introduced by <max> or earlier.
CMake Deprecation Warning at third_party/FXdiv/CMakeLists.txt:1 (CMAKE_MINIMUM_REQUIRED):
Compatibility with CMake < 3.10 will be removed from a future version of
CMake.
Update the VERSION argument <min> value. Or, use the <min>...<max> syntax
to tell CMake that the project requires at least <min> but has been updated
to work with policies introduced by <max> or earlier.
CMake Deprecation Warning at third_party/cpuinfo/CMakeLists.txt:1 (CMAKE_MINIMUM_REQUIRED):
Compatibility with CMake < 3.10 will be removed from a future version of
CMake.
Update the VERSION argument <min> value. Or, use the <min>...<max> syntax
to tell CMake that the project requires at least <min> but has been updated
to work with policies introduced by <max> or earlier.
-- The ASM compiler identification is MSVC
CMake Warning (dev) at pytorch_env/Lib/site-packages/cmake/data/share/cmake-4.1/Modules/CMakeDetermineASMCompiler.cmake:234 (message):
Policy CMP194 is not set: MSVC is not an assembler for language ASM. Run
"cmake --help-policy CMP194" for policy details. Use the cmake_policy
command to set the policy and suppress this warning.
Call Stack (most recent call first):
third_party/XNNPACK/CMakeLists.txt:18 (PROJECT)
This warning is for project developers. Use -Wno-dev to suppress it.
-- Found assembler: C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64/cl.exe
-- Building for XNNPACK_TARGET_PROCESSOR: x86_64
-- Generating microkernels.cmake
Duplicate microkernel definition: src\qs8-qc4w-packw\gen\qs8-qc4w-packw-x8c8-gemm-goi-avx256vnni.c and src\qs8-qc4w-packw\gen\qs8-qc4w-packw-x8c8-gemm-goi-avxvnni.c (1th function)
Duplicate microkernel definition: src\qs8-qc4w-packw\gen\qs8-qc4w-packw-x8c8-gemm-goi-avxvnni.c and src\qs8-qc4w-packw\gen\qs8-qc4w-packw-x8c8-gemm-goi-scalar.c
No microkernel found in src\reference\binary-elementwise.cc
No microkernel found in src\reference\packing.cc
No microkernel found in src\reference\unary-elementwise.cc
-- Found Git: E:/Program Files/Git/cmd/git.exe (found version "2.51.0.windows.1")
-- Google Benchmark version: v1.9.3, normalized to 1.9.3
-- Looking for shm_open in rt
-- Looking for shm_open in rt - not found
-- Performing Test HAVE_CXX_FLAG_WX
-- Performing Test HAVE_CXX_FLAG_WX - Success
-- Compiling and running to test HAVE_STD_REGEX
-- Performing Test HAVE_STD_REGEX -- success
-- Compiling and running to test HAVE_GNU_POSIX_REGEX
-- Performing Test HAVE_GNU_POSIX_REGEX -- failed to compile
-- Compiling and running to test HAVE_POSIX_REGEX
-- Performing Test HAVE_POSIX_REGEX -- failed to compile
-- Compiling and running to test HAVE_STEADY_CLOCK
-- Performing Test HAVE_STEADY_CLOCK -- success
-- Compiling and running to test HAVE_PTHREAD_AFFINITY
-- Performing Test HAVE_PTHREAD_AFFINITY -- failed to compile
CMake Deprecation Warning at third_party/ittapi/CMakeLists.txt:7 (cmake_minimum_required):
Compatibility with CMake < 3.10 will be removed from a future version of
CMake.
Update the VERSION argument <min> value. Or, use the <min>...<max> syntax
to tell CMake that the project requires at least <min> but has been updated
to work with policies introduced by <max> or earlier.
CMake Warning at cmake/Dependencies.cmake:749 (message):
FP16 is only cmake-2.8 compatible
Call Stack (most recent call first):
CMakeLists.txt:873 (include)
CMake Deprecation Warning at third_party/FP16/CMakeLists.txt:1 (CMAKE_MINIMUM_REQUIRED):
Compatibility with CMake < 3.10 will be removed from a future version of
CMake.
Update the VERSION argument <min> value. Or, use the <min>...<max> syntax
to tell CMake that the project requires at least <min> but has been updated
to work with policies introduced by <max> or earlier.
CMake Deprecation Warning at third_party/psimd/CMakeLists.txt:1 (CMAKE_MINIMUM_REQUIRED):
Compatibility with CMake < 3.10 will be removed from a future version of
CMake.
Update the VERSION argument <min> value. Or, use the <min>...<max> syntax
to tell CMake that the project requires at least <min> but has been updated
to work with policies introduced by <max> or earlier.
-- Using third party subdirectory Eigen.
-- Found Python: E:\PyTorch_Build\pytorch\pytorch_env\Scripts\python.exe (found version "3.10.10") found components: Interpreter Development.Module missing components: NumPy
CMake Warning at cmake/Dependencies.cmake:826 (message):
NumPy could not be found. Not building with NumPy. Suppress this warning
with -DUSE_NUMPY=OFF
Call Stack (most recent call first):
CMakeLists.txt:873 (include)
-- Using third_party/pybind11.
-- pybind11 include dirs: E:/PyTorch_Build/pytorch/cmake/../third_party/pybind11/include
-- Could NOT find OpenTelemetryApi (missing: OpenTelemetryApi_INCLUDE_DIRS)
-- Using third_party/opentelemetry-cpp.
-- opentelemetry api include dirs: E:/PyTorch_Build/pytorch/cmake/../third_party/opentelemetry-cpp/api/include
-- Could NOT find MPI_C (missing: MPI_C_LIB_NAMES MPI_C_HEADER_DIR MPI_C_WORKS)
-- Could NOT find MPI_CXX (missing: MPI_CXX_LIB_NAMES MPI_CXX_HEADER_DIR MPI_CXX_WORKS)
-- Could NOT find MPI (missing: MPI_C_FOUND MPI_CXX_FOUND)
CMake Warning at cmake/Dependencies.cmake:894 (message):
Not compiling with MPI. Suppress this warning with -DUSE_MPI=OFF
Call Stack (most recent call first):
CMakeLists.txt:873 (include)
-- MKL_THREADING = OMP
-- Check OMP with lib C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/lib/x64/libomp.lib and flags -openmp:experimental
-- MKL_THREADING = OMP
-- Check OMP with lib C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/lib/x64/libomp.lib and flags -openmp:experimental
-- Found OpenMP_C: -openmp:experimental
-- Found OpenMP_CXX: -openmp:experimental
-- Found OpenMP: TRUE
-- Adding OpenMP CXX_FLAGS: -openmp:experimental
-- Will link against OpenMP libraries: C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/lib/x64/libomp.lib
-- Found nvtx3: E:/PyTorch_Build/pytorch/third_party/NVTX/c/include
-- ROCM_PATH environment variable is not set and C:/opt/rocm does not exist.
Building without ROCm support.
-- Found Python3: E:\PyTorch_Build\pytorch\pytorch_env\Scripts\python.exe (found version "3.10.10") found components: Interpreter
-- ONNX_PROTOC_EXECUTABLE: $<TARGET_FILE:protobuf::protoc>
-- Protobuf_VERSION: Protobuf_VERSION_NOTFOUND
Generated: E:/PyTorch_Build/pytorch/build/third_party/onnx/onnx/onnx_onnx_torch-ml.proto
Generated: E:/PyTorch_Build/pytorch/build/third_party/onnx/onnx/onnx-operators_onnx_torch-ml.proto
Generated: E:/PyTorch_Build/pytorch/build/third_party/onnx/onnx/onnx-data_onnx_torch.proto
--
-- ******** Summary ********
-- CMake version : 4.1.0
-- CMake command : E:/PyTorch_Build/pytorch/pytorch_env/Lib/site-packages/cmake/data/bin/cmake.exe
-- System : Windows
-- C++ compiler : C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64/cl.exe
-- C++ compiler version : 19.44.35215.0
-- CXX flags : /DWIN32 /D_WINDOWS /EHsc /Zc:__cplusplus /bigobj /FS /utf-8 -DUSE_PTHREADPOOL /EHsc /wd26812
-- Build type : Release
-- Compile definitions : ONNX_ML=1;ONNXIFI_ENABLE_EXT=1
-- CMAKE_PREFIX_PATH : E:\PyTorch_Build\pytorch\pytorch_env\Lib\site-packages;E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0;E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0;E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0
-- CMAKE_INSTALL_PREFIX : E:/PyTorch_Build/pytorch/torch
-- CMAKE_MODULE_PATH : E:/PyTorch_Build/pytorch/cmake/Modules;E:/PyTorch_Build/pytorch/cmake/public/../Modules_CUDA_fix
--
-- ONNX version : 1.18.0
-- ONNX NAMESPACE : onnx_torch
-- ONNX_USE_LITE_PROTO : OFF
-- USE_PROTOBUF_SHARED_LIBS : OFF
-- ONNX_DISABLE_EXCEPTIONS : OFF
-- ONNX_DISABLE_STATIC_REGISTRATION : OFF
-- ONNX_WERROR : OFF
-- ONNX_BUILD_TESTS : OFF
-- BUILD_SHARED_LIBS : OFF
--
-- Protobuf compiler : $<TARGET_FILE:protobuf::protoc>
-- Protobuf includes :
-- Protobuf libraries :
-- ONNX_BUILD_PYTHON : OFF
-- Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor
-- Adding -DNDEBUG to compile flags
-- Checking prototype magma_get_sgeqrf_nb for MAGMA_V2
-- Checking prototype magma_get_sgeqrf_nb for MAGMA_V2 - False
-- MAGMA not found. Compiling without MAGMA support
-- Could not find hardware support for NEON on this machine.
-- No OMAP3 processor on this machine.
-- No OMAP4 processor on this machine.
-- MKL_THREADING = OMP
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_sequential - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_sequential - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - libiomp5md - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - libiomp5md - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl - guide - pthread - m]
-- Library mkl: not found
-- MKL library not found
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Checking for [Accelerate]
-- Library Accelerate: BLAS_Accelerate_LIBRARY-NOTFOUND
-- Checking for [vecLib]
-- Library vecLib: BLAS_vecLib_LIBRARY-NOTFOUND
-- Checking for [flexiblas]
-- Library flexiblas: BLAS_flexiblas_LIBRARY-NOTFOUND
-- Checking for [openblas]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m - gomp]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [libopenblas]
-- Library libopenblas: BLAS_libopenblas_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran - pthread]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [acml - gfortran]
-- Library acml: BLAS_acml_LIBRARY-NOTFOUND
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Could NOT find Atlas (missing: Atlas_CBLAS_INCLUDE_DIR Atlas_CLAPACK_INCLUDE_DIR Atlas_CBLAS_LIBRARY Atlas_BLAS_LIBRARY Atlas_LAPACK_LIBRARY)
-- Checking for [ptf77blas - atlas - gfortran]
-- Library ptf77blas: BLAS_ptf77blas_LIBRARY-NOTFOUND
-- Checking for []
-- Cannot find a library with BLAS API. Not using BLAS.
-- LAPACK requires BLAS
-- Cannot find a library with LAPACK API. Not using LAPACK.
disabling ROCM because NOT USE_ROCM is set
-- MIOpen not found. Compiling without MIOpen support
disabling MKLDNN because USE_MKLDNN is not set
-- {fmt} version: 11.2.0
-- Build type: Release
-- Using Kineto with CUPTI support
-- Configuring Kineto dependency:
-- KINETO_SOURCE_DIR = E:/PyTorch_Build/pytorch/third_party/kineto/libkineto
-- KINETO_BUILD_TESTS = OFF
-- KINETO_LIBRARY_TYPE = static
-- CUDA_SOURCE_DIR = E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0
-- CUDA_INCLUDE_DIRS = E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/include
-- CUPTI_INCLUDE_DIR = E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/extras/CUPTI/include
-- CUDA_cupti_LIBRARY = E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/extras/CUPTI/lib64/cupti.lib
-- Found CUPTI
CMake Deprecation Warning at third_party/kineto/libkineto/CMakeLists.txt:7 (cmake_minimum_required):
Compatibility with CMake < 3.10 will be removed from a future version of
CMake.
Update the VERSION argument <min> value. Or, use the <min>...<max> syntax
to tell CMake that the project requires at least <min> but has been updated
to work with policies introduced by <max> or earlier.
CMake Warning (dev) at third_party/kineto/libkineto/CMakeLists.txt:15 (find_package):
Policy CMP0148 is not set: The FindPythonInterp and FindPythonLibs modules
are removed. Run "cmake --help-policy CMP0148" for policy details. Use
the cmake_policy command to set the policy and suppress this warning.
This warning is for project developers. Use -Wno-dev to suppress it.
-- Found PythonInterp: E:/PyTorch_Build/pytorch/pytorch_env/Scripts/python.exe (found version "3.10.10")
-- ROCM_SOURCE_DIR =
-- Kineto: FMT_SOURCE_DIR = E:/PyTorch_Build/pytorch/third_party/fmt
-- Kineto: FMT_INCLUDE_DIR = E:/PyTorch_Build/pytorch/third_party/fmt/include
-- CUPTI_INCLUDE_DIR = E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/extras/CUPTI/include
-- ROCTRACER_INCLUDE_DIR = /include/roctracer
-- DYNOLOG_INCLUDE_DIR = E:/PyTorch_Build/pytorch/third_party/kineto/libkineto/third_party/dynolog/
-- IPCFABRIC_INCLUDE_DIR = E:/PyTorch_Build/pytorch/third_party/kineto/libkineto/third_party/dynolog//dynolog/src/ipcfabric/
-- Configured Kineto
-- Performing Test HAS/WD4624
-- Performing Test HAS/WD4624 - Success
-- Performing Test HAS/WD4068
-- Performing Test HAS/WD4068 - Success
-- Performing Test HAS/WD4067
-- Performing Test HAS/WD4067 - Success
-- Performing Test HAS/WD4267
-- Performing Test HAS/WD4267 - Success
-- Performing Test HAS/WD4661
-- Performing Test HAS/WD4661 - Success
-- Performing Test HAS/WD4717
-- Performing Test HAS/WD4717 - Success
-- Performing Test HAS/WD4244
-- Performing Test HAS/WD4244 - Success
-- Performing Test HAS/WD4804
-- Performing Test HAS/WD4804 - Success
-- Performing Test HAS/WD4273
-- Performing Test HAS/WD4273 - Success
-- Performing Test HAS_WNO_STRINGOP_OVERFLOW
-- Performing Test HAS_WNO_STRINGOP_OVERFLOW - Failed
--
-- Architecture: x64
-- Use the C++ compiler to compile (MI_USE_CXX=ON)
--
-- Library name : mimalloc
-- Version : 2.2.4
-- Build type : release
-- C++ Compiler : C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64/cl.exe
-- Compiler flags : /Zc:__cplusplus
-- Compiler defines : MI_CMAKE_BUILD_TYPE=release;MI_BUILD_RELEASE
-- Link libraries : psapi;shell32;user32;advapi32;bcrypt
-- Build targets : static
--
CMake Error at CMakeLists.txt:1264 (add_subdirectory):
The source directory
E:/PyTorch_Build/pytorch/torch/headeronly
does not contain a CMakeLists.txt file.
-- don't use NUMA
-- Looking for backtrace
-- Looking for backtrace - not found
-- Could NOT find Backtrace (missing: Backtrace_LIBRARY Backtrace_INCLUDE_DIR)
-- Autodetected CUDA architecture(s): 12.0
-- Autodetected CUDA architecture(s): 12.0
-- Autodetected CUDA architecture(s): 12.0
-- headers outputs:
torch\csrc\inductor\aoti_torch\generated\c_shim_cpu.h not found
torch\csrc\inductor\aoti_torch\generated\c_shim_aten.h not found
torch\csrc\inductor\aoti_torch\generated\c_shim_cuda.h not found
-- sources outputs:
-- declarations_yaml outputs:
-- Performing Test COMPILER_SUPPORTS_NO_AVX256_SPLIT
-- Performing Test COMPILER_SUPPORTS_NO_AVX256_SPLIT - Failed
-- Using ATen parallel backend: OMP
-- Could NOT find OpenSSL, try to set the path to OpenSSL root folder in the system variable OPENSSL_ROOT_DIR (missing: OPENSSL_CRYPTO_LIBRARY OPENSSL_INCLUDE_DIR)
-- Check size of long double
-- Check size of long double - done
-- Performing Test COMPILER_SUPPORTS_FLOAT128
-- Performing Test COMPILER_SUPPORTS_FLOAT128 - Failed
-- Performing Test COMPILER_SUPPORTS_SSE2
-- Performing Test COMPILER_SUPPORTS_SSE2 - Success
-- Performing Test COMPILER_SUPPORTS_SSE4
-- Performing Test COMPILER_SUPPORTS_SSE4 - Success
-- Performing Test COMPILER_SUPPORTS_AVX
-- Performing Test COMPILER_SUPPORTS_AVX - Success
-- Performing Test COMPILER_SUPPORTS_FMA4
-- Performing Test COMPILER_SUPPORTS_FMA4 - Success
-- Performing Test COMPILER_SUPPORTS_AVX2
-- Performing Test COMPILER_SUPPORTS_AVX2 - Success
-- Performing Test COMPILER_SUPPORTS_AVX512F
-- Performing Test COMPILER_SUPPORTS_AVX512F - Success
-- Found OpenMP_C: -openmp:experimental (found version "2.0")
-- Found OpenMP_CXX: -openmp:experimental (found version "2.0")
-- Found OpenMP_CUDA: -openmp (found version "2.0")
-- Found OpenMP: TRUE (found version "2.0")
-- Performing Test COMPILER_SUPPORTS_OPENMP
-- Performing Test COMPILER_SUPPORTS_OPENMP - Success
-- Performing Test COMPILER_SUPPORTS_OMP_SIMD
-- Performing Test COMPILER_SUPPORTS_OMP_SIMD - Failed
-- Performing Test COMPILER_SUPPORTS_WEAK_ALIASES
-- Performing Test COMPILER_SUPPORTS_WEAK_ALIASES - Failed
-- Performing Test COMPILER_SUPPORTS_BUILTIN_MATH
-- Performing Test COMPILER_SUPPORTS_BUILTIN_MATH - Failed
-- Performing Test COMPILER_SUPPORTS_SYS_GETRANDOM
-- Performing Test COMPILER_SUPPORTS_SYS_GETRANDOM - Failed
-- Configuring build for SLEEF-v3.8.0
Target system: Windows-10.0.26100
Target processor: AMD64
Host system: Windows-10.0.26100
Host processor: AMD64
Detected C compiler: MSVC @ C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64/cl.exe
CMake: 4.1.0
Make program: E:/PyTorch_Build/pytorch/pytorch_env/Scripts/ninja.exe
-- Using option `/D_CRT_SECURE_NO_WARNINGS /D_CRT_NONSTDC_NO_DEPRECATE ` to compile libsleef
-- Building shared libs : OFF
-- Building static test bins: OFF
-- MPFR : LIB_MPFR-NOTFOUND
-- GMP : LIBGMP-NOTFOUND
-- RT :
-- FFTW3 : LIBFFTW3-NOTFOUND
-- OPENSSL :
-- SDE : SDE_COMMAND-NOTFOUND
-- COMPILER_SUPPORTS_OPENMP : FALSE
AT_INSTALL_INCLUDE_DIR include/ATen/core
core header install: E:/PyTorch_Build/pytorch/build/aten/src/ATen/core/aten_interned_strings.h
core header install: E:/PyTorch_Build/pytorch/build/aten/src/ATen/core/enum_tag.h
core header install: E:/PyTorch_Build/pytorch/build/aten/src/ATen/core/TensorBody.h
CMake Error: File E:/PyTorch_Build/pytorch/torch/_utils_internal.py does not exist.
CMake Error at caffe2/CMakeLists.txt:241 (configure_file):
configure_file Problem configuring file
CMake Error: File E:/PyTorch_Build/pytorch/torch/csrc/api/include/torch/version.h.in does not exist.
CMake Error at caffe2/CMakeLists.txt:246 (configure_file):
configure_file Problem configuring file
-- NVSHMEM not found, not building with NVSHMEM support.
CMake Error at caffe2/CMakeLists.txt:1398 (add_subdirectory):
The source directory
E:/PyTorch_Build/pytorch/torch
does not contain a CMakeLists.txt file.
CMake Warning at CMakeLists.txt:1285 (message):
Generated cmake files are only fully tested if one builds with system glog,
gflags, and protobuf. Other settings may generate files that are not well
tested.
--
-- ******** Summary ********
-- General:
-- CMake version : 4.1.0
-- CMake command : E:/PyTorch_Build/pytorch/pytorch_env/Lib/site-packages/cmake/data/bin/cmake.exe
-- System : Windows
-- C++ compiler : C:/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64/cl.exe
-- C++ compiler id : MSVC
-- C++ compiler version : 19.44.35215.0
-- Using ccache if found : OFF
-- CXX flags : /DWIN32 /D_WINDOWS /EHsc /Zc:__cplusplus /bigobj /FS /utf-8 -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DLIBKINETO_NOXPUPTI=ON -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE /wd4624 /wd4068 /wd4067 /wd4267 /wd4661 /wd4717 /wd4244 /wd4804 /wd4273
-- Shared LD flags : /machine:x64 /ignore:4049 /ignore:4217 /ignore:4099
-- Static LD flags : /machine:x64 /ignore:4049 /ignore:4217 /ignore:4099
-- Module LD flags : /machine:x64 /ignore:4049 /ignore:4217 /ignore:4099
-- Build type : Release
-- Compile definitions : ONNX_ML=1;ONNXIFI_ENABLE_EXT=1;ONNX_NAMESPACE=onnx_torch;_CRT_SECURE_NO_DEPRECATE=1;USE_EXTERNAL_MZCRC;MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS;EXPORT_AOTI_FUNCTIONS;WIN32_LEAN_AND_MEAN;_UCRT_LEGACY_INFINITY;NOMINMAX;USE_MIMALLOC
-- CMAKE_PREFIX_PATH : E:\PyTorch_Build\pytorch\pytorch_env\Lib\site-packages;E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0;E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0;E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0
-- CMAKE_INSTALL_PREFIX : E:/PyTorch_Build/pytorch/torch
-- USE_GOLD_LINKER : OFF
--
-- TORCH_VERSION : 2.9.0
-- BUILD_STATIC_RUNTIME_BENCHMARK: OFF
-- BUILD_BINARY : OFF
-- BUILD_CUSTOM_PROTOBUF : ON
-- Link local protobuf : ON
-- BUILD_PYTHON : True
-- Python version : 3.10.10
-- Python executable : E:\PyTorch_Build\pytorch\pytorch_env\Scripts\python.exe
-- Python library : E:/Python310/libs/python310.lib
-- Python includes : E:/Python310/Include
-- Python site-package : E:\PyTorch_Build\pytorch\pytorch_env\Lib\site-packages
-- BUILD_SHARED_LIBS : ON
-- CAFFE2_USE_MSVC_STATIC_RUNTIME : OFF
-- BUILD_TEST : True
-- BUILD_JNI : OFF
-- BUILD_MOBILE_AUTOGRAD : OFF
-- BUILD_LITE_INTERPRETER: OFF
-- INTERN_BUILD_MOBILE :
-- TRACING_BASED : OFF
-- USE_BLAS : 0
-- USE_LAPACK : 0
-- USE_ASAN : OFF
-- USE_TSAN : OFF
-- USE_CPP_CODE_COVERAGE : OFF
-- USE_CUDA : ON
-- CUDA static link : OFF
-- USE_CUDNN : OFF
-- USE_CUSPARSELT : OFF
-- USE_CUDSS : OFF
-- USE_CUFILE : OFF
-- CUDA version : 13.0
-- USE_FLASH_ATTENTION : OFF
-- USE_MEM_EFF_ATTENTION : ON
-- CUDA root directory : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0
-- CUDA library : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/lib/x64/cuda.lib
-- cudart library : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/lib/x64/cudart.lib
-- cublas library : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/lib/x64/cublas.lib
-- cufft library : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/lib/x64/cufft.lib
-- curand library : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/lib/x64/curand.lib
-- cusparse library : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/lib/x64/cusparse.lib
-- nvrtc : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/lib/x64/nvrtc.lib
-- CUDA include path : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/include
-- NVCC executable : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/bin/nvcc.exe
-- CUDA compiler : E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/bin/nvcc.exe
-- CUDA flags : -DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS -Xcompiler /Zc:__cplusplus -Xcompiler /w -w -Xcompiler /FS -Xfatbin -compress-all -DONNX_NAMESPACE=onnx_torch --use-local-env -gencode arch=compute_120,code=sm_120 -Xcudafe --diag_suppress=cc_clobber_ignored,--diag_suppress=field_without_dll_interface,--diag_suppress=base_class_has_different_dll_interface,--diag_suppress=dll_interface_conflict_none_assumed,--diag_suppress=dll_interface_conflict_dllexport_assumed,--diag_suppress=bad_friend_decl --Werror cross-execution-space-call --no-host-device-move-forward --expt-relaxed-constexpr --expt-extended-lambda -Xcompiler=/wd4819,/wd4503,/wd4190,/wd4244,/wd4251,/wd4275,/wd4522 -Wno-deprecated-gpu-targets --expt-extended-lambda -DCUB_WRAPPED_NAMESPACE=at_cuda_detail -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__
-- CUDA host compiler :
-- CUDA --device-c : OFF
-- USE_TENSORRT :
-- USE_XPU : OFF
-- USE_ROCM : OFF
-- BUILD_NVFUSER :
-- USE_EIGEN_FOR_BLAS : ON
-- USE_EIGEN_FOR_SPARSE : OFF
-- USE_FBGEMM : OFF
-- USE_KINETO : ON
-- USE_GFLAGS : OFF
-- USE_GLOG : OFF
-- USE_LITE_PROTO : OFF
-- USE_PYTORCH_METAL : OFF
-- USE_PYTORCH_METAL_EXPORT : OFF
-- USE_MPS : OFF
-- CAN_COMPILE_METAL :
-- USE_MKL : OFF
-- USE_MKLDNN : OFF
-- USE_UCC : OFF
-- USE_ITT : ON
-- USE_XCCL : OFF
-- USE_NCCL : OFF
-- Found NVSHMEM :
-- USE_NNPACK : OFF
-- USE_NUMPY : OFF
-- USE_OBSERVERS : ON
-- USE_OPENCL : OFF
-- USE_OPENMP : ON
-- USE_MIMALLOC : ON
-- USE_MIMALLOC_ON_MKL : OFF
-- USE_VULKAN : OFF
-- USE_PROF : OFF
-- USE_PYTORCH_QNNPACK : OFF
-- USE_XNNPACK : ON
-- USE_DISTRIBUTED : OFF
-- Public Dependencies :
-- Private Dependencies : Threads::Threads;pthreadpool;cpuinfo;XNNPACK;microkernels-prod;ittnotify;fp16;caffe2::openmp;fmt::fmt-header-only;kineto
-- Public CUDA Deps. :
-- Private CUDA Deps. : caffe2::curand;caffe2::cufft;caffe2::cublas;fmt::fmt-header-only;E:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/lib/x64/cudart_static.lib;CUDA::cusparse;CUDA::cufft;CUDA::cusolver;ATEN_CUDA_FILES_GEN_LIB
-- USE_COREML_DELEGATE : OFF
-- BUILD_LAZY_TS_BACKEND : ON
-- USE_ROCM_KERNEL_ASSERT : OFF
-- Performing Test HAS_WMISSING_PROTOTYPES
-- Performing Test HAS_WMISSING_PROTOTYPES - Failed
-- Performing Test HAS_WERROR_MISSING_PROTOTYPES
-- Performing Test HAS_WERROR_MISSING_PROTOTYPES - Failed
-- Configuring incomplete, errors occurred!
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 永久修复conda命令不可用问题
(pytorch_env) PS E:\PyTorch_Build\pytorch> $condaPaths = @(
>> "$env:USERPROFILE\miniconda3\Scripts",
>> "$env:USERPROFILE\anaconda3\Scripts",
>> "C:\ProgramData\miniconda3\Scripts"
>> )
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> foreach ($path in $condaPaths) {
>> if (Test-Path $path) {
>> $env:PATH = "$path;$env:PATH"
>> [Environment]::SetEnvironmentVariable("PATH", $env:PATH, "Machine")
>> break
>> }
>> }
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 验证修复
(pytorch_env) PS E:\PyTorch_Build\pytorch> conda --version
conda: The term 'conda' is not recognized as a name of a cmdlet, function, script file, or executable program.
Check the spelling of the name, or if a path was included, verify that the path is correct and try again.
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 设置 cuDNN v9.12 路径
(pytorch_env) PS E:\PyTorch_Build\pytorch> $cudnnPath = "E:\Program Files\NVIDIA\CUNND\v9.12"
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 添加到环境变量
(pytorch_env) PS E:\PyTorch_Build\pytorch> $env:CUDNN_ROOT_DIR = $cudnnPath
(pytorch_env) PS E:\PyTorch_Build\pytorch> $env:CUDNN_INCLUDE_DIR = "$cudnnPath\include"
(pytorch_env) PS E:\PyTorch_Build\pytorch> $env:CUDNN_LIBRARY = "$cudnnPath\lib\x64\cudnn.lib"
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 永久生效
(pytorch_env) PS E:\PyTorch_Build\pytorch> [Environment]::SetEnvironmentVariable("CUDNN_ROOT_DIR", $cudnnPath, "Machine")
(pytorch_env) PS E:\PyTorch_Build\pytorch> [Environment]::SetEnvironmentVariable("CUDNN_INCLUDE_DIR", "$cudnnPath\include", "Machine")
(pytorch_env) PS E:\PyTorch_Build\pytorch> [Environment]::SetEnvironmentVariable("CUDNN_LIBRARY", "$cudnnPath\lib\x64\cudnn.lib", "Machine")
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 原始代码大约在 190 行左右
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 替换为以下内容强制使用 v9.12:
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> set(CUDNN_VERSION "9.12.0") # 手动指定版本
CUDNN_VERSION: The term 'CUDNN_VERSION' is not recognized as a name of a cmdlet, function, script file, or executable program.
Check the spelling of the name, or if a path was included, verify that the path is correct and try again.
(pytorch_env) PS E:\PyTorch_Build\pytorch> set(CUDNN_FOUND TRUE)
CUDNN_FOUND: The term 'CUDNN_FOUND' is not recognized as a name of a cmdlet, function, script file, or executable program.
Check the spelling of the name, or if a path was included, verify that the path is correct and try again.
(pytorch_env) PS E:\PyTorch_Build\pytorch> set(CUDNN_INCLUDE_DIR $ENV{CUDNN_INCLUDE_DIR})
InvalidOperation: The variable '$ENV' cannot be retrieved because it has not been set.
(pytorch_env) PS E:\PyTorch_Build\pytorch> set(CUDNN_LIBRARY $ENV{CUDNN_LIBRARY})
InvalidOperation: The variable '$ENV' cannot be retrieved because it has not been set.
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> message(STATUS "Using manually configured cuDNN v${CUDNN_VERSION}")
InvalidOperation: The variable '$CUDNN_VERSION' cannot be retrieved because it has not been set.
(pytorch_env) PS E:\PyTorch_Build\pytorch> message(STATUS " Include path: ${CUDNN_INCLUDE_DIR}")
InvalidOperation: The variable '$CUDNN_INCLUDE_DIR' cannot be retrieved because it has not been set.
(pytorch_env) PS E:\PyTorch_Build\pytorch> message(STATUS " Library path: ${CUDNN_LIBRARY}")
InvalidOperation: The variable '$CUDNN_LIBRARY' cannot be retrieved because it has not been set.
(pytorch_env) PS E:\PyTorch_Build\pytorch> # 精确查找 conda.bat
(pytorch_env) PS E:\PyTorch_Build\pytorch> $condaPath = Get-ChildItem -Path C:\ -Recurse -Filter conda.bat -ErrorAction SilentlyContinue |
>> Select-Object -First 1 |
>> ForEach-Object { $_.DirectoryName }
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch> if ($condaPath) {
>> $env:PATH = "$condaPath;$env:PATH"
>> [Environment]::SetEnvironmentVariable("PATH", $env:PATH, "Machine")
>> Write-Host "Conda found at: $condaPath" -ForegroundColor Green
>> } else {
>> Write-Host "Conda not found! Installing miniconda..." -ForegroundColor Yellow
>> # 自动安装 miniconda
>> Invoke-WebRequest -Uri "https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe" -OutFile "$env:TEMP\miniconda.exe"
>> Start-Process -FilePath "$env:TEMP\miniconda.exe" -ArgumentList "/S", "/AddToPath=1", "/InstallationType=AllUsers", "/D=C:\Miniconda3" -Wait
>> $env:PATH = "C:\Miniconda3\Scripts;$env:PATH"
>> }
Conda not found! Installing miniconda...
/AddToPath=1 is disabled and ignored in 'All Users' installations
Welcome to Miniconda3 py313_25.7.0-2
By continuing this installation you are accepting this license agreement:
C:\Miniconda3\EULA.txt
Please run the installer in GUI mode to read the details.
Miniconda3 will now be installed into this location:
C:\Miniconda3
Unpacking payload...
Setting up the package cache...
Setting up the base environment...
Installing packages for base, creating shortcuts if necessary...
Initializing conda directories...
Setting installation directory permissions...
Done!
(pytorch_env) PS E:\PyTorch_Build\pytorch>
(pytorch_env) PS E:\PyTorch_Build\pytorch>