flash.system.Capabilities 一个有用的API

本文介绍了一种在ActionScript(AS)中获取当前网页宽度的方法,通过使用Capabilities.screenResolutionX来获取屏幕分辨率的宽度,而Capabilities.screenResolutionY则用于获取高度。
用AS怎么获取当前网页宽度。


Capabilities.screenResolutionX;
Capabilities.screenResolutionY;
/* * INCLUDE FILES **************************************************************************************** */ #include <stdio.h> #include <string.h> #include "gap_api.h" #include "gatt_api.h" #include "ble_stack.h" #include "app_config.h" #include "jump_table.h" #include "co_log.h" #include "plf.h" #include "driver_system.h" #include "driver_pmu.h" #include "driver_uart.h" #include "ble_simple_central.h" #include "simple_gatt_service.h" #undef LOG_LOCAL_LEVEL #define LOG_LOCAL_LEVEL (LOG_LEVEL_INFO) const char *app_tag = "project"; #define SYSTEM_STACK_SIZE 0x800 void patch_init(void); /* * LOCAL VARIABLES */ __attribute__((section("stack_section"))) static uint32_t system_stack[SYSTEM_STACK_SIZE/sizeof(uint32_t)]; const struct jump_table_version_t _jump_table_version __attribute__((section("jump_table_3"))) = { .stack_top_address = &system_stack[SYSTEM_STACK_SIZE/sizeof(uint32_t)], .firmware_version = 0x00000000, }; const struct jump_table_image_t _jump_table_image __attribute__((section("jump_table_1"))) = { .image_type = IMAGE_TYPE_APP, .image_size = 0x19000, }; /********************************************************************* * @fn user_entry_before_sleep_imp * * @brief Before system goes to sleep mode, user_entry_before_sleep_imp() * will be called, MCU peripherals can be configured properly before * system goes to sleep, for example, some MCU peripherals need to be * used during the system is in sleep mode. * * @param None. * * * @return None. */ __attribute__((section("ram_code"))) void user_entry_before_sleep_imp(void) { pmu_calibration_stop(); uart_putc_noint_no_wait(UART0, 's'); co_delay_100us(1); pmu_set_pin_to_PMU(GPIO_PORT_A, (1<<GPIO_BIT_0)); pmu_set_pin_dir(GPIO_PORT_A, (1<<GPIO_BIT_0), GPIO_DIR_IN); pmu_set_pin_pull(GPIO_PORT_A, (1<<GPIO_BIT_0),GPIO_PULL_NONE); pmu_oscap_set(0); } /********************************************************************* * @fn user_entry_after_sleep_imp * * @brief After system wakes up from sleep mode, user_entry_after_sleep_imp() * will be called, MCU peripherals need to be initialized again, * this can be done in user_entry_after_sleep_imp(). MCU peripherals * status will not be kept during the sleep. * * @param None. * * * @return None. */ __attribute__((section("ram_code"))) void user_entry_after_sleep_imp(void) { pmu_set_pin_to_CPU(GPIO_PORT_A, (1<<GPIO_BIT_0)); system_set_port_mux(GPIO_PORT_A, GPIO_BIT_0, PORTA0_FUNC_UART0_RXD); system_set_port_mux(GPIO_PORT_A, GPIO_BIT_1, PORTA1_FUNC_UART0_TXD); uart_init(UART0, 1152); fr_uart_enableIrq(UART0, Uart_irq_erbfi); /* RC calibration start. Ensure the accuracy of sleep wake time */ pmu_calibration_start(PMU_CALI_SEL_RCLFOSC, LP_RC_CALIB_CNT); uart_putc_noint_no_wait(UART0, 'w'); NVIC_EnableIRQ(PMU_IRQn); } __attribute__((section("ram_code"))) void main_loop(void) { while(1) { if(ble_stack_schedule_allow()) { /*user code should be add here*/ /* schedule internal stack event */ ble_stack_schedule(); } GLOBAL_INT_DISABLE(); switch(ble_stack_sleep_check()) { case 2: { ble_stack_enter_sleep(); __jump_table.diag_port = 0x00008300; *(volatile uint32_t *)0x50000024 = 0xeeeeeeee; } break; default: break; } GLOBAL_INT_RESTORE(); ble_stack_schedule_backward(); } } /********************************************************************* * @fn proj_init * * @brief Main entrancy of user application. This function is called after BLE stack * is initialized, and all the application code will be executed from here. * In that case, application layer initializtion can be startd here. * * @param None. * * * @return None. */ void proj_init(void) { LOG_INFO(app_tag, "BLE Central\r\n"); // Application layer initialization, can included bond manager init, // advertising parameters init, scanning parameter init, GATT service adding, etc. simple_central_init(); } /********************************************************************* * @fn user_entry_before_ble_init * * @brief Code to be executed before BLE stack to be initialized. * Power mode configurations, PMU part driver interrupt enable, MCU * peripherals init, etc. * * @param None. * * * @return None. */ void user_main(void) { /* initialize log module */ log_init(); /* initialize PMU module at the beginning of this program */ pmu_sub_init(); patch_init(); /* set system clock */ system_set_clock(SYSTEM_CLOCK_SEL); /* set local BLE address. use the SOC unique ID as the address */ mac_addr_t mac_addr; system_get_unique_ID(mac_addr.addr); mac_addr.addr[5] = 0x20|0xC0; gap_address_set(&mac_addr, BLE_ADDR_TYPE_PRIVATE); /* configure ble stack capabilities */ ble_stack_configure(BLE_STACK_ENABLE_MESH, BLE_STACK_ENABLE_CONNECTIONS, BLE_STACK_RX_BUFFER_CNT, BLE_STACK_RX_BUFFER_SIZE, BLE_STACK_TX_BUFFER_CNT, BLE_STACK_TX_BUFFER_SIZE, BLE_STACK_ADV_BUFFER_SIZE, BLE_STACK_RETENTION_RAM_SIZE, BLE_STACK_KEY_STORAGE_OFFSET); /* initialize ble stack */ ble_stack_init(); /* initialize SMP */ gap_bond_manager_init(BLE_BONDING_INFO_SAVE_ADDR, BLE_REMOTE_SERVICE_SAVE_ADDR, 8, true); proj_init(); /* enter main loop */ main_loop(); } 详细解析以上代码
06-12
PowerShell 7 环境已加载 (版本: 7.5.2) PowerShell 7 环境已加载 (版本: 7.5.2) PS C:\Users\Administrator\Desktop> cd E:\PyTorch_Build\pytorch PS E:\PyTorch_Build\pytorch> python -m venv rtx5070_env PS E:\PyTorch_Build\pytorch> .\rtx5070_env\Scripts\activate (rtx5070_env) PS E:\PyTorch_Build\pytorch> python fixed_diagnostic_test.py ================================================== CUDA Toolkit 验证: ✅ NVCC 版本: nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2025 NVIDIA Corporation Built on Wed_Jul_16_20:06:48_Pacific_Daylight_Time_2025 Cuda compilation tools, release 13.0, V13.0.48 Build cuda_13.0.r13.0/compiler.36260728_0 ✅ NVIDIA-SMI 输出: Mon Sep 1 21:01:44 2025 +-----------------------------------------------------------------------------------------+ | NVIDIA-SMI 580.97 Driver Version: 580.97 CUDA Version: 13.0 | +-----------------------------------------+------------------------+----------------------+ | GPU Name Driver-Model | Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |=========================================+========================+======================| | 0 NVIDIA GeForce RTX 5070 WDDM | 00000000:01:00.0 On | N/A | | 0% 35C P8 9W / 250W | 1266MiB / 12227MiB | 0% Default | | | | N/A | +-----------------------------------------+------------------------+----------------------+ +-----------------------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=========================================================================================| | 0 N/A N/A 1288 C+G ...les\Tencent\Weixin\Weixin.exe N/A | | 0 N/A N/A 1776 C+G C:\Windows\System32\dwm.exe N/A | | 0 N/A N/A 2272 C+G ...t\Edge\Application\msedge.exe N/A | | 0 N/A N/A 3268 C+G ...em32\ApplicationFrameHost.exe N/A | | 0 N/A N/A 5188 C+G ...yb3d8bbwe\WindowsTerminal.exe N/A | | 0 N/A N/A 7860 C+G C:\Windows\explorer.exe N/A | | 0 N/A N/A 8004 C+G ...indows\System32\ShellHost.exe N/A | | 0 N/A N/A 8852 C+G ..._cw5n1h2txyewy\SearchHost.exe N/A | | 0 N/A N/A 8876 C+G ...y\StartMenuExperienceHost.exe N/A | | 0 N/A N/A 10540 C+G ...0.3405.125\msedgewebview2.exe N/A | | 0 N/A N/A 12380 C+G ...5n1h2txyewy\TextInputHost.exe N/A | | 0 N/A N/A 15340 C+G ...acted\runtime\WeChatAppEx.exe N/A | | 0 N/A N/A 18600 C+G ...ntrolPanel\SystemSettings.exe N/A | +-----------------------------------------------------------------------------------------+ ✅ CUDA_PATH: E:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0 ================================================== PyTorch 安装诊断: ✅ torch 模块已导入: E:\PyTorch_Build\pytorch\rtx5070_env\lib\site-packages\torch\__init__.py PyTorch 版本: 2.6.0.dev20241112+cu121 编译 CUDA 版本: 12.1 cuDNN 版本: 90100 CUDA 可用: True 检测到 GPU 数量: 1 E:\PyTorch_Build\pytorch\rtx5070_env\lib\site-packages\torch\cuda\__init__.py:235: UserWarning: NVIDIA GeForce RTX 5070 with CUDA capability sm_120 is not compatible with the current PyTorch installation. The current PyTorch install supports CUDA capabilities sm_50 sm_60 sm_61 sm_70 sm_75 sm_80 sm_86 sm_90. If you want to use the NVIDIA GeForce RTX 5070 GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/ warnings.warn( GPU 0: NVIDIA GeForce RTX 5070, 计算能力: 12.0 ================================================== GPU 计算能力测试: ❌ 计算测试失败: CUDA error: no kernel image is available for execution on the device CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1 Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. 按 Enter 键退出... (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 卸载旧版本 (rtx5070_env) PS E:\PyTorch_Build\pytorch> pip uninstall -y torch torchvision torchaudio Found existing installation: torch 2.6.0.dev20241112+cu121 Uninstalling torch-2.6.0.dev20241112+cu121: Successfully uninstalled torch-2.6.0.dev20241112+cu121 Found existing installation: torchvision 0.20.0.dev20241112+cu121 Uninstalling torchvision-0.20.0.dev20241112+cu121: Successfully uninstalled torchvision-0.20.0.dev20241112+cu121 Found existing installation: torchaudio 2.5.0.dev20241112+cu121 Uninstalling torchaudio-2.5.0.dev20241112+cu121: Successfully uninstalled torchaudio-2.5.0.dev20241112+cu121 (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 安装支持 RTX 5070 的预编译版本 (rtx5070_env) PS E:\PyTorch_Build\pytorch> pip install --pre torch torchvision torchaudio ` >> --index-url https://download.pytorch.org/whl/nightly/cu121 ` >> --no-deps Looking in indexes: https://download.pytorch.org/whl/nightly/cu121 Collecting torch Using cached https://download.pytorch.org/whl/nightly/cu121/torch-2.6.0.dev20241112%2Bcu121-cp310-cp310-win_amd64.whl (2456.2 MB) Collecting torchvision Using cached https://download.pytorch.org/whl/nightly/cu121/torchvision-0.20.0.dev20241112%2Bcu121-cp310-cp310-win_amd64.whl (6.2 MB) Collecting torchaudio Using cached https://download.pytorch.org/whl/nightly/cu121/torchaudio-2.5.0.dev20241112%2Bcu121-cp310-cp310-win_amd64.whl (4.2 MB) Installing collected packages: torchaudio, torchvision, torch Successfully installed torch-2.6.0.dev20241112+cu121 torchaudio-2.5.0.dev20241112+cu121 torchvision-0.20.0.dev20241112+cu121 (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 安装运行时依赖 (rtx5070_env) PS E:\PyTorch_Build\pytorch> pip install pyyaml numpy typing_extensions mkl mkl-include intel-openmp Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple Requirement already satisfied: pyyaml in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (6.0.2) Requirement already satisfied: numpy in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (2.2.6) Requirement already satisfied: typing_extensions in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (4.15.0) Requirement already satisfied: mkl in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (2025.2.0) Requirement already satisfied: mkl-include in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (2025.2.0) Requirement already satisfied: intel-openmp in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (2025.2.1) Requirement already satisfied: tbb==2022.* in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (from mkl) (2022.2.0) Requirement already satisfied: intel-cmplr-lib-ur==2025.2.1 in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (from intel-openmp) (2025.2.1) Requirement already satisfied: umf==0.11.* in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (from intel-cmplr-lib-ur==2025.2.1->intel-openmp) (0.11.0) Requirement already satisfied: tcmlib==1.* in e:\pytorch_build\pytorch\rtx5070_env\lib\site-packages (from tbb==2022.*->mkl) (1.4.0) (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 修复 cuDNN 路径配置 (rtx5070_env) PS E:\PyTorch_Build\pytorch> $env:CUDA_PATH = "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1" (rtx5070_env) PS E:\PyTorch_Build\pytorch> $env:PATH = "$env:CUDA_PATH\bin;" + $env:PATH (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 执行诊断测试 (rtx5070_env) PS E:\PyTorch_Build\pytorch> python fixed_diagnostic_test.py ================================================== CUDA Toolkit 验证: ✅ NVCC 版本: nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2025 NVIDIA Corporation Built on Wed_Jul_16_20:06:48_Pacific_Daylight_Time_2025 Cuda compilation tools, release 13.0, V13.0.48 Build cuda_13.0.r13.0/compiler.36260728_0 ✅ NVIDIA-SMI 输出: Mon Sep 1 21:03:21 2025 +-----------------------------------------------------------------------------------------+ | NVIDIA-SMI 580.97 Driver Version: 580.97 CUDA Version: 13.0 | +-----------------------------------------+------------------------+----------------------+ | GPU Name Driver-Model | Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |=========================================+========================+======================| | 0 NVIDIA GeForce RTX 5070 WDDM | 00000000:01:00.0 On | N/A | | 0% 35C P0 10W / 250W | 1248MiB / 12227MiB | 0% Default | | | | N/A | +-----------------------------------------+------------------------+----------------------+ +-----------------------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=========================================================================================| | 0 N/A N/A 1288 C+G ...les\Tencent\Weixin\Weixin.exe N/A | | 0 N/A N/A 1776 C+G C:\Windows\System32\dwm.exe N/A | | 0 N/A N/A 2272 C+G ...t\Edge\Application\msedge.exe N/A | | 0 N/A N/A 3268 C+G ...em32\ApplicationFrameHost.exe N/A | | 0 N/A N/A 5188 C+G ...yb3d8bbwe\WindowsTerminal.exe N/A | | 0 N/A N/A 7860 C+G C:\Windows\explorer.exe N/A | | 0 N/A N/A 8004 C+G ...indows\System32\ShellHost.exe N/A | | 0 N/A N/A 8852 C+G ..._cw5n1h2txyewy\SearchHost.exe N/A | | 0 N/A N/A 8876 C+G ...y\StartMenuExperienceHost.exe N/A | | 0 N/A N/A 10540 C+G ...0.3405.125\msedgewebview2.exe N/A | | 0 N/A N/A 12380 C+G ...5n1h2txyewy\TextInputHost.exe N/A | | 0 N/A N/A 15340 C+G ...acted\runtime\WeChatAppEx.exe N/A | | 0 N/A N/A 18600 C+G ...ntrolPanel\SystemSettings.exe N/A | +-----------------------------------------------------------------------------------------+ ✅ CUDA_PATH: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1 ================================================== PyTorch 安装诊断: ✅ torch 模块已导入: E:\PyTorch_Build\pytorch\rtx5070_env\lib\site-packages\torch\__init__.py PyTorch 版本: 2.6.0.dev20241112+cu121 编译 CUDA 版本: 12.1 cuDNN 版本: 90100 CUDA 可用: True 检测到 GPU 数量: 1 E:\PyTorch_Build\pytorch\rtx5070_env\lib\site-packages\torch\cuda\__init__.py:235: UserWarning: NVIDIA GeForce RTX 5070 with CUDA capability sm_120 is not compatible with the current PyTorch installation. The current PyTorch install supports CUDA capabilities sm_50 sm_60 sm_61 sm_70 sm_75 sm_80 sm_86 sm_90. If you want to use the NVIDIA GeForce RTX 5070 GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/ warnings.warn( GPU 0: NVIDIA GeForce RTX 5070, 计算能力: 12.0 ================================================== GPU 计算能力测试: ❌ 计算测试失败: CUDA error: no kernel image is available for execution on the device CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1 Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. 按 Enter 键退出... (rtx5070_env) PS E:\PyTorch_Build\pytorch>
最新发布
09-02
PowerShell 7 环境已加载 (版本: 7.5.2) PS C:\Users\Administrator\Desktop> cd E:\PyTorch_Build\pytorch PS E:\PyTorch_Build\pytorch> .\pytorch_env\Scripts\activate (pytorch_env) PS E:\PyTorch_Build\pytorch> # 移除可能导致冲突的镜像源 (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --remove-key channels (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --remove-key default_channels CondaKeyError: 'default_channels': undefined in config (pytorch_env) PS E:\PyTorch_Build\pytorch> (pytorch_env) PS E:\PyTorch_Build\pytorch> # 设置官方通道优先级 (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --add channels pytorch-nightly C:\Miniconda3\Lib\site-packages\conda\base\context.py:211: FutureWarning: Adding 'defaults' to channel list implicitly is deprecated and will be removed in 25.9. To remove this warning, please choose a default channel explicitly with conda's regular configuration system, e.g. by adding 'defaults' to the list of channels: conda config --add channels defaults For more information see https://docs.conda.io/projects/conda/en/stable/user-guide/configuration/use-condarc.html deprecated.topic( (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --add channels nvidia (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --add channels conda-forge (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --add channels defaults Warning: 'defaults' already in 'channels' list, moving to the top (pytorch_env) PS E:\PyTorch_Build\pytorch> (pytorch_env) PS E:\PyTorch_Build\pytorch> # 设置通道优先级为 strict(避免混合来源包) (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --set channel_priority strict (pytorch_env) PS E:\PyTorch_Build\pytorch> (pytorch_env) PS E:\PyTorch_Build\pytorch> # 验证配置 (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --show channels channels: - defaults - conda-forge - nvidia - pytorch-nightly (pytorch_env) PS E:\PyTorch_Build\pytorch> conda config --show channel_priority channel_priority: strict (pytorch_env) PS E:\PyTorch_Build\pytorch> # 1. 安装基础依赖 (pytorch_env) PS E:\PyTorch_Build\pytorch> conda install -y python=3.10 cudatoolkit=12.1 cudnn numpy ninja 3 channel Terms of Service accepted Channels: - defaults - conda-forge - nvidia - pytorch-nightly Platform: win-64 Collecting package metadata (repodata.json): done Solving environment: failed LibMambaUnsatisfiableError: Encountered problems while solving: - unsupported request - package mkl-service-2.5.2-py313haca3b5c_0 requires python_abi 3.13.* *_cp313, but none of the providers can be instd Could not solve for environment specs The following packages are incompatible ├─ cudatoolkit =12.1 * does not exist (perhaps a typo or a missing channel); ├─ mkl-service =* * is installable with the potential options │ ├─ mkl-service 2.5.2 would require │ │ └─ python_abi =3.13 *_cp313 with the potential options │ │ ├─ python_abi 3.13 would require │ │ │ └─ python =3.13 *_cp313, which can be installed; │ │ └─ python_abi 3.13 conflicts with any installable versions previously reported; │ ├─ mkl-service 1.1.2 would require │ │ └─ mkl >=2019.1,<2021.0a0 *, which can be installed; │ ├─ mkl-service 1.1.2 would require │ │ └─ mkl >=2018.0.0,<2019.0a0 *, which can be installed; │ ├─ mkl-service 1.1.2 would require │ │ └─ mkl >=2018.0.3,<2019.0a0 *, which can be installed; │ ├─ mkl-service 2.0.2 would require │ │ └─ mkl >=2019.3,<2021.0a0 *, which can be installed; │ ├─ mkl-service 2.3.0 would require │ │ └─ mkl >=2019.4,<2021.0a0 *, which can be installed; │ ├─ mkl-service [2.3.0|2.4.0] would require │ │ └─ mkl >=2021.2.0,<2022.0a0 *, which can be installed; │ ├─ mkl-service 2.4.0 would require │ │ └─ mkl >=2021.4.0,<2022.0a0 *, which can be installed; │ ├─ mkl-service 2.4.0 would require │ │ └─ mkl >=2023.1.0,<2024.0a0 *, which can be installed; │ ├─ mkl-service 2.4.0 would require │ │ └─ mkl >=2025.0.0,<2026.0a0 *, which can be installed; │ └─ mkl-service [2.0.1|2.0.2|...|2.5.2] conflicts with any installable versions previously reported; ├─ mkl ==2024.2.2 * is not installable because it conflicts with any installable versions previously reported; └─ python =3.10 * is not installable because it conflicts with any installable versions previously reported. (pytorch_env) PS E:\PyTorch_Build\pytorch> (pytorch_env) PS E:\PyTorch_Build\pytorch> # 2. 单独安装 PyTorch (pytorch_env) PS E:\PyTorch_Build\pytorch> conda install -y pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch-nightly -c nvidia 3 channel Terms of Service accepted Channels: - pytorch-nightly - nvidia - defaults - conda-forge Platform: win-64 Collecting package metadata (repodata.json): done Solving environment: failed LibMambaUnsatisfiableError: Encountered problems while solving: - package torchvision-0.20.0.dev20241112-py310_cu124 requires python >=3.10,<3.11.0a0, but none of the providers can d - package pytorch-2.5.0.dev20240618-py3.11_cuda12.4_cudnn8_0 requires mkl 2021.4.*, but none of the providers can be d - nothing provides pytorch 2.1.0.dev20230523 needed by torchaudio-2.1.0.dev20230523-py311_cu117 Could not solve for environment specs The following packages are incompatible ├─ libuv =1.44 * is requested and can be installed; ├─ mkl ==2024.2.2 * is requested and can be installed; ├─ pin on python 3.13.* =* * is installable and it requires │ └─ python =3.13 *, which can be installed; ├─ pytorch =* * is not installable because there are no viable options │ ├─ pytorch [2.5.0.dev20240618|2.5.0.dev20240619] would require │ │ └─ mkl =2021.4 *, which conflicts with any installable versions previously reported; │ ├─ pytorch [2.5.0.dev20240618|2.5.0.dev20240619|2.5.0.dev20240730|2.5.0.dev20240731|2.6.0.dev20241111] would require │ │ └─ mkl =2023.1 *, which conflicts with any installable versions previously reported; │ ├─ pytorch 2.6.0.dev20241112 would require │ │ ├─ libuv >=1.48.0,<2.0a0 *, which conflicts with any installable versions previously reported; │ │ └─ mkl =2023.1 *, which conflicts with any installable versions previously reported; │ └─ pytorch [1.0.1|1.10.2|...|2.7.1] conflicts with any installable versions previously reported; ├─ torchaudio =* * is not installable because there are no viable options │ ├─ torchaudio 2.1.0.dev20230523 would require │ │ └─ pytorch ==2.1.0.0dev20230523 *, which does not exist (perhaps a missing channel); │ ├─ torchaudio 2.4.0.dev20240729 would require │ │ └─ pytorch ==2.5.0.0dev20240726 *, which does not exist (perhaps a missing channel); │ ├─ torchaudio 2.4.0.dev20240729 would require │ │ └─ pytorch ==2.5.0.0dev20240729 *, which does not exist (perhaps a missing channel); │ ├─ torchaudio 2.4.0.dev20240729 would require │ │ └─ pytorch ==2.5.0.0dev20240728 *, which does not exist (perhaps a missing channel); │ ├─ torchaudio [2.5.0.dev20241112|2.5.0.dev20241113|...|2.5.0.dev20241118] would require │ │ └─ pytorch ==2.6.0.0dev20241112 *, which cannot be installed (as previously explained); │ └─ torchaudio 2.5.1 conflicts with any installable versions previously reported; └─ torchvision =* * is not installable because there are no viable options ├─ torchvision [0.20.0.dev20241112|0.20.0.dev20241113|...|0.20.0.dev20241118] would require │ └─ python >=3.9,<3.10.0a0 *, which conflicts with any installable versions previously reported; ├─ torchvision [0.20.0.dev20241112|0.20.0.dev20241113|...|0.20.0.dev20241118] would require │ └─ python >=3.10,<3.11.0a0 *, which conflicts with any installable versions previously reported; ├─ torchvision [0.20.0.dev20241112|0.20.0.dev20241113|...|0.20.0.dev20241118] would require │ └─ python >=3.11,<3.12.0a0 *, which conflicts with any installable versions previously reported; ├─ torchvision [0.20.0.dev20241112|0.20.0.dev20241113|...|0.20.0.dev20241118] would require │ └─ python >=3.12,<3.13.0a0 *, which conflicts with any installable versions previously reported; └─ torchvision [0.11.3|0.13.1|...|0.22.0] conflicts with any installable versions previously reported. Pins seem to be involved in the conflict. Currently pinned specs: - python=3.13 (pytorch_env) PS E:\PyTorch_Build\pytorch> (pytorch_env) PS E:\PyTorch_Build\pytorch> # 3. 安装补充依赖 (pytorch_env) PS E:\PyTorch_Build\pytorch> conda install -y pyyaml mkl mkl-include setuptools cmake cffi typing_extensions 3 channel Terms of Service accepted Channels: - defaults - conda-forge - nvidia - pytorch-nightly Platform: win-64 Collecting package metadata (repodata.json): done Solving environment: done ## Package Plan ## environment location: C:\Miniconda3 added / updated specs: - cffi - cmake - mkl - mkl-include - pyyaml - setuptools - typing_extensions The following packages will be downloaded: package | build ---------------------------|----------------- cmake-3.26.4 | h693b641_0 12.0 MB defaults pyyaml-6.0.2 | py313h827c3e9_0 198 KB defaults yaml-0.2.5 | he774522_0 62 KB defaults ------------------------------------------------------------ Total: 12.2 MB The following NEW packages will be INSTALLED: cmake pkgs/main/win-64::cmake-3.26.4-h693b641_0 pyyaml pkgs/main/win-64::pyyaml-6.0.2-py313h827c3e9_0 yaml pkgs/main/win-64::yaml-0.2.5-he774522_0 Downloading and Extracting Packages: Preparing transaction: done Verifying transaction: done Executing transaction: done (pytorch_env) PS E:\PyTorch_Build\pytorch> python cuda_test.py ================================================== PyTorch 版本: 2.6.0.dev20241112+cu121 CUDA 可用: True CUDA 版本: 12.1 cuDNN 版本: 90100 E:\PyTorch_Build\pytorch\pytorch_env\lib\site-packages\torch\cuda\__init__.py:235: UserWarning: NVIDIA GeForce RTX 5070 with CUDA capability sm_120 is not compatible with the current PyTorch installation. The current PyTorch install supports CUDA capabilities sm_50 sm_60 sm_61 sm_70 sm_75 sm_80 sm_86 sm_90. If you want to use the NVIDIA GeForce RTX 5070 GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/ warnings.warn( GPU 型号: NVIDIA GeForce RTX 5070 计算能力: (12, 0) Traceback (most recent call last): File "E:\PyTorch_Build\pytorch\cuda_test.py", line 25, in <module> check_cuda() File "E:\PyTorch_Build\pytorch\cuda_test.py", line 16, in check_cuda a = torch.randn(1000, 1000, device='cuda') RuntimeError: CUDA error: no kernel image is available for execution on the device CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1 Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. (pytorch_env) PS E:\PyTorch_Build\pytorch> (pytorch_env) PS E:\PyTorch_Build\pytorch> # 创建新的虚拟环境 (pytorch_env) PS E:\PyTorch_Build\pytorch> python -m venv cuda_env (pytorch_env) PS E:\PyTorch_Build\pytorch> .\cuda_env\Scripts\activate (cuda_env) PS E:\PyTorch_Build\pytorch> (cuda_env) PS E:\PyTorch_Build\pytorch> # 安装基础依赖 (cuda_env) PS E:\PyTorch_Build\pytorch> pip install numpy==1.26.4 ninja pyyaml mkl mkl-include setuptools cmake Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple Collecting numpy==1.26.4 Downloading https://pypi.tuna.tsinghua.edu.cn/packages/19/77/538f202862b9183f54108557bfda67e17603fc560c384559e769321c9d92/numpy-1.26.4-cp310-cp310-win_amd64.whl (15.8 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 15.8/15.8 MB 34.6 MB/s eta 0:00:00 Collecting ninja Using cached https://pypi.tuna.tsinghua.edu.cn/packages/29/45/c0adfbfb0b5895aa18cec400c535b4f7ff3e52536e0403602fc1a23f7de9/ninja-1.13.0-py3-none-win_amd64.whl (309 kB) Collecting pyyaml Using cached https://pypi.tuna.tsinghua.edu.cn/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl (161 kB) Collecting mkl Downloading https://pypi.tuna.tsinghua.edu.cn/packages/91/ae/025174ee141432b974f97ecd2aea529a3bdb547392bde3dd55ce48fe7827/mkl-2025.2.0-py2.py3-none-win_amd64.whl (153.6 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 153.6/153.6 MB 24.2 MB/s eta 0:00:00 Collecting mkl-include Downloading https://pypi.tuna.tsinghua.edu.cn/packages/06/87/3eee37bf95c6b820b6394ad98e50132798514ecda1b2584c71c2c96b973c/mkl_include-2025.2.0-py2.py3-none-win_amd64.whl (1.3 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.3/1.3 MB 87.9 MB/s eta 0:00:00 Requirement already satisfied: setuptools in e:\pytorch_build\pytorch\cuda_env\lib\site-packages (65.5.0) Collecting cmake Using cached https://pypi.tuna.tsinghua.edu.cn/packages/7c/d0/73cae88d8c25973f2465d5a4457264f95617c16ad321824ed4c243734511/cmake-4.1.0-py3-none-win_amd64.whl (37.6 MB) Collecting tbb==2022.* Downloading https://pypi.tuna.tsinghua.edu.cn/packages/4e/d2/01e2a93f9c644585088188840bf453f23ed1a2838ec51d5ba1ada1ebca71/tbb-2022.2.0-py3-none-win_amd64.whl (420 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 420.6/420.6 kB ? eta 0:00:00 Collecting intel-openmp<2026,>=2024 Downloading https://pypi.tuna.tsinghua.edu.cn/packages/89/ed/13fed53fcc7ea17ff84095e89e63418df91d4eeefdc74454243d529bf5a3/intel_openmp-2025.2.1-py2.py3-none-win_amd64.whl (34.0 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 34.0/34.0 MB 43.5 MB/s eta 0:00:00 Collecting tcmlib==1.* Downloading https://pypi.tuna.tsinghua.edu.cn/packages/91/7b/e30c461a27b97e0090e4db822eeb1d37b310863241f8c3ee56f68df3e76e/tcmlib-1.4.0-py2.py3-none-win_amd64.whl (370 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 370.3/370.3 kB ? eta 0:00:00 Collecting intel-cmplr-lib-ur==2025.2.1 Downloading https://pypi.tuna.tsinghua.edu.cn/packages/a8/70/938e81f58886fd4e114d5a5480d98c1396e73e40b7650f566ad0c4395311/intel_cmplr_lib_ur-2025.2.1-py2.py3-none-win_amd64.whl (1.2 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 72.4 MB/s eta 0:00:00 Collecting umf==0.11.* Downloading https://pypi.tuna.tsinghua.edu.cn/packages/33/a0/c8d755f08f50ddd99cb4a29a7e950ced7a0903cb72253e57059063609103/umf-0.11.0-py2.py3-none-win_amd64.whl (231 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 231.7/231.7 kB ? eta 0:00:00 Installing collected packages: tcmlib, mkl-include, umf, tbb, pyyaml, numpy, ninja, cmake, intel-cmplr-lib-ur, intel-openmp, mkl Successfully installed cmake-4.1.0 intel-cmplr-lib-ur-2025.2.1 intel-openmp-2025.2.1 mkl-2025.2.0 mkl-include-2025.2.0 ninja-1.13.0 numpy-1.26.4 pyyaml-6.0.2 tbb-2022.2.0 tcmlib-1.4.0 umf-0.11.0 [notice] A new release of pip available: 22.3.1 -> 25.2 [notice] To update, run: python.exe -m pip install --upgrade pip (cuda_env) PS E:\PyTorch_Build\pytorch> (cuda_env) PS E:\PyTorch_Build\pytorch> # 安装 PyTorch Nightly (cuda_env) PS E:\PyTorch_Build\pytorch> pip install --pre torch torchvision torchaudio ` >> --index-url https://download.pytorch.org/whl/nightly/cu121 ` >> --no-deps Looking in indexes: https://download.pytorch.org/whl/nightly/cu121 Collecting torch Using cached https://download.pytorch.org/whl/nightly/cu121/torch-2.6.0.dev20241112%2Bcu121-cp310-cp310-win_amd64.whl (2456.2 MB) Collecting torchvision Using cached https://download.pytorch.org/whl/nightly/cu121/torchvision-0.20.0.dev20241112%2Bcu121-cp310-cp310-win_amd64.whl (6.2 MB) Collecting torchaudio Using cached https://download.pytorch.org/whl/nightly/cu121/torchaudio-2.5.0.dev20241112%2Bcu121-cp310-cp310-win_amd64.whl (4.2 MB) Installing collected packages: torchaudio, torchvision, torch Successfully installed torch-2.6.0.dev20241112+cu121 torchaudio-2.5.0.dev20241112+cu121 torchvision-0.20.0.dev20241112+cu121 [notice] A new release of pip available: 22.3.1 -> 25.2 [notice] To update, run: python.exe -m pip install --upgrade pip (cuda_env) PS E:\PyTorch_Build\pytorch> (cuda_env) PS E:\PyTorch_Build\pytorch> # 安装补充依赖 (cuda_env) PS E:\PyTorch_Build\pytorch> pip install typing_extensions future six requests dataclasses Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple Collecting typing_extensions Using cached https://pypi.tuna.tsinghua.edu.cn/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl (44 kB) Collecting future Downloading https://pypi.tuna.tsinghua.edu.cn/packages/da/71/ae30dadffc90b9006d77af76b393cb9dfbfc9629f339fc1574a1c52e6806/future-1.0.0-py3-none-any.whl (491 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 491.3/491.3 kB 1.5 MB/s eta 0:00:00 Collecting six Using cached https://pypi.tuna.tsinghua.edu.cn/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl (11 kB) Collecting requests Using cached https://pypi.tuna.tsinghua.edu.cn/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl (64 kB) Collecting dataclasses Downloading https://pypi.tuna.tsinghua.edu.cn/packages/26/2f/1095cdc2868052dd1e64520f7c0d5c8c550ad297e944e641dbf1ffbb9a5d/dataclasses-0.6-py3-none-any.whl (14 kB) Collecting charset_normalizer<4,>=2 Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e2/c6/f05db471f81af1fa01839d44ae2a8bfeec8d2a8b4590f16c4e7393afd323/charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl (107 kB) Collecting idna<4,>=2.5 Using cached https://pypi.tuna.tsinghua.edu.cn/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl (70 kB) Collecting urllib3<3,>=1.21.1 Using cached https://pypi.tuna.tsinghua.edu.cn/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl (129 kB) Collecting certifi>=2017.4.17 Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl (161 kB) Installing collected packages: dataclasses, urllib3, typing_extensions, six, idna, future, charset_normalizer, certifi, requests ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. torch 2.6.0.dev20241112+cu121 requires filelock, which is not installed. torch 2.6.0.dev20241112+cu121 requires fsspec, which is not installed. torch 2.6.0.dev20241112+cu121 requires jinja2, which is not installed. torch 2.6.0.dev20241112+cu121 requires networkx, which is not installed. torch 2.6.0.dev20241112+cu121 requires sympy==1.13.1; python_version >= "3.9", which is not installed. Successfully installed certifi-2025.8.3 charset_normalizer-3.4.3 dataclasses-0.6 future-1.0.0 idna-3.10 requests-2.32.5 six-1.17.0 typing_extensions-4.15.0 urllib3-2.5.0 [notice] A new release of pip available: 22.3.1 -> 25.2 [notice] To update, run: python.exe -m pip install --upgrade pip (cuda_env) PS E:\PyTorch_Build\pytorch> (cuda_env) PS E:\PyTorch_Build\pytorch> # 运行验证脚本 (cuda_env) PS E:\PyTorch_Build\pytorch> python cuda_test.py ================================================== PyTorch 版本: 2.6.0.dev20241112+cu121 CUDA 可用: True CUDA 版本: 12.1 cuDNN 版本: 90100 E:\PyTorch_Build\pytorch\cuda_env\lib\site-packages\torch\cuda\__init__.py:235: UserWarning: NVIDIA GeForce RTX 5070 with CUDA capability sm_120 is not compatible with the current PyTorch installation. The current PyTorch install supports CUDA capabilities sm_50 sm_60 sm_61 sm_70 sm_75 sm_80 sm_86 sm_90. If you want to use the NVIDIA GeForce RTX 5070 GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/ warnings.warn( GPU 型号: NVIDIA GeForce RTX 5070 计算能力: (12, 0) Traceback (most recent call last): File "E:\PyTorch_Build\pytorch\cuda_test.py", line 25, in <module> check_cuda() File "E:\PyTorch_Build\pytorch\cuda_test.py", line 16, in check_cuda a = torch.randn(1000, 1000, device='cuda') RuntimeError: CUDA error: no kernel image is available for execution on the device CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1 Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. (cuda_env) PS E:\PyTorch_Build\pytorch>
09-02
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值