bld_lib.cpp

  name="google_ads_frame" marginwidth="0" marginheight="0" src="http://pagead2.googlesyndication.com/pagead/ads?client=ca-pub-5572165936844014&dt=1194442938015&lmt=1194190197&format=336x280_as&output=html&correlator=1194442937843&url=file%3A%2F%2F%2FC%3A%2FDocuments%2520and%2520Settings%2Flhh1%2F%E6%A1%8C%E9%9D%A2%2FCLanguage.htm&color_bg=FFFFFF&color_text=000000&color_link=000000&color_url=FFFFFF&color_border=FFFFFF&ad_type=text&ga_vid=583001034.1194442938&ga_sid=1194442938&ga_hid=1942779085&flash=9&u_h=768&u_w=1024&u_ah=740&u_aw=1024&u_cd=32&u_tz=480&u_java=true" frameborder="0" width="336" scrolling="no" height="280" allowtransparency="allowtransparency"> #include <iostream.h>
#include <string.h>

class Book
{
  public:
    Book(char *title) { strcpy(Book::title, title); };
    void show_title(void) { cout << title << endl; };
  private:
    char title[64];
};

class LibraryCard : public Book
{
  public:
    LibraryCard(char *title, char *author, char *publisher) : Book(title)
      {
        strcpy(LibraryCard::author, author);
        strcpy(LibraryCard::publisher, publisher);
      };
    void show_library(void) {
        show_title();
        cout << author << ' ' << publisher;
 };
  private:
    char author[64];
    char publisher[64];
};


void main(void)
 {
   LibraryCard card("Jamsa's C/C++ Programmer's Bible", "Jamsa and Klander",
    "Jamsa Press");
   card.show_library();
 }

 


Traceback (most recent call last): File "/data16/jiugan/code/DEIM-514/train.py", line 93, in <module> main(args) File "/data16/jiugan/code/DEIM-514/train.py", line 64, in main solver.fit(cfg_str) File "/data16/jiugan/code/DEIM-514/engine/solver/det_solver.py", line 86, in fit train_stats = train_one_epoch( ^^^^^^^^^^^^^^^^ File "/data16/jiugan/code/DEIM-514/engine/solver/det_engine.py", line 112, in train_one_epoch outputs = model(samples, targets=targets) # 前向传播 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/nn/parallel/distributed.py", line 1523, in forward else self._run_ddp_forward(*inputs, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/nn/parallel/distributed.py", line 1359, in _run_ddp_forward return self.module(*inputs, **kwargs) # type: ignore[index] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/jiugan/code/DEIM-514/engine/deim/deim.py", line 29, in forward x = self.decoder(x, targets) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/jiugan/code/DEIM-514/engine/deim/dfine_decoder.py", line 827, in forward self._get_decoder_input(memory, spatial_shapes, denoising_logits, denoising_bbox_unact) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/jiugan/code/DEIM-514/engine/deim/dfine_decoder.py", line 745, in _get_decoder_input anchors, valid_mask = self._generate_anchors(spatial_shapes, device=memory.device) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/jiugan/code/DEIM-514/engine/deim/dfine_decoder.py", line 731, in _generate_anchors anchors = torch.concat(anchors, dim=1).to(device) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RuntimeError: CUDA error: device-side assert triggered CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1. Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. [E ProcessGroupNCCL.cpp:1182] [Rank 0] NCCL watchdog thread terminated with exception: CUDA error: device-side assert triggered CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1. Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. Exception raised from c10_cuda_check_implementation at /opt/conda/conda-bld/pytorch_1711403408687/work/c10/cuda/CUDAException.cpp:44 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f46a4f80d87 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libc10.so) frame #1: c10::detail::torchCheckFail(char const*, char const*, unsigned int, std::string const&) + 0x64 (0x7f46a4f3175f in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libc10.so) frame #2: c10::cuda::c10_cuda_check_implementation(int, char const*, char const*, int, bool) + 0x118 (0x7f46a60628a8 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libc10_cuda.so) frame #3: c10d::ProcessGroupNCCL::WorkNCCL::finishedGPUExecutionInternal() const + 0x6c (0x7f46416909ec in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #4: c10d::ProcessGroupNCCL::WorkNCCL::isCompleted() + 0x58 (0x7f4641694b08 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #5: c10d::ProcessGroupNCCL::workCleanupLoop() + 0x15a (0x7f464169823a in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #6: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x119 (0x7f4641698e79 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #7: <unknown function> + 0xd8198 (0x7f469e6eb198 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/../../../.././libstdc++.so.6) frame #8: <unknown function> + 0x94b43 (0x7f46a7094b43 in /lib/x86_64-linux-gnu/libc.so.6) frame #9: <unknown function> + 0x126a00 (0x7f46a7126a00 in /lib/x86_64-linux-gnu/libc.so.6) terminate called after throwing an instance of 'c10::DistBackendError' what(): [Rank 0] NCCL watchdog thread terminated with exception: CUDA error: device-side assert triggered CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1. Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. Exception raised from c10_cuda_check_implementation at /opt/conda/conda-bld/pytorch_1711403408687/work/c10/cuda/CUDAException.cpp:44 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f46a4f80d87 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libc10.so) frame #1: c10::detail::torchCheckFail(char const*, char const*, unsigned int, std::string const&) + 0x64 (0x7f46a4f3175f in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libc10.so) frame #2: c10::cuda::c10_cuda_check_implementation(int, char const*, char const*, int, bool) + 0x118 (0x7f46a60628a8 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libc10_cuda.so) frame #3: c10d::ProcessGroupNCCL::WorkNCCL::finishedGPUExecutionInternal() const + 0x6c (0x7f46416909ec in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #4: c10d::ProcessGroupNCCL::WorkNCCL::isCompleted() + 0x58 (0x7f4641694b08 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #5: c10d::ProcessGroupNCCL::workCleanupLoop() + 0x15a (0x7f464169823a in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #6: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x119 (0x7f4641698e79 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #7: <unknown function> + 0xd8198 (0x7f469e6eb198 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/../../../.././libstdc++.so.6) frame #8: <unknown function> + 0x94b43 (0x7f46a7094b43 in /lib/x86_64-linux-gnu/libc.so.6) frame #9: <unknown function> + 0x126a00 (0x7f46a7126a00 in /lib/x86_64-linux-gnu/libc.so.6) Exception raised from ncclCommWatchdog at /opt/conda/conda-bld/pytorch_1711403408687/work/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1186 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f46a4f80d87 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libc10.so) frame #1: <unknown function> + 0xdef733 (0x7f46413ef733 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/libtorch_cuda.so) frame #2: <unknown function> + 0xd8198 (0x7f469e6eb198 in /data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/lib/../../../.././libstdc++.so.6) frame #3: <unknown function> + 0x94b43 (0x7f46a7094b43 in /lib/x86_64-linux-gnu/libc.so.6) frame #4: <unknown function> + 0x126a00 (0x7f46a7126a00 in /lib/x86_64-linux-gnu/libc.so.6) [2025-05-28 13:49:53,599] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: -6) local_rank: 0 (pid: 639748) of binary: /data16/home/zjl/miniconda3/envs/deim/bin/python Traceback (most recent call last): File "/data16/home/zjl/miniconda3/envs/deim/bin/torchrun", line 33, in <module> sys.exit(load_entry_point('torch==2.2.2', 'console_scripts', 'torchrun')()) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper return f(*args, **kwargs) ^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/distributed/run.py", line 812, in main run(args) File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/distributed/run.py", line 803, in run elastic_launch( File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/distributed/launcher/api.py", line 135, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/data16/home/zjl/miniconda3/envs/deim/lib/python3.11/site-packages/torch/distributed/launcher/api.py", line 268, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ======================================================= train.py FAILED ------------------------------------------------------- Failures: <NO_OTHER_FAILURES> ------------------------------------------------------- Root Cause (first observed failure): [0]: time : 2025-05-28_13:49:53 host : cv147cv rank : 0 (local_rank: 0) exitcode : -6 (pid: 639748) error_file: <N/A> traceback : Signal 6 (SIGABRT) received by PID 639748 =======================================================报错
最新发布
05-30
<think>好的,用户现在遇到了在PCU_2.5项目的Debug配置下构建和清理的问题,涉及到gmake、XDCtools、ti.targets.C28_float和TMS320F280049C平台。我需要仔细分析之前的对话和用户当前的问题,提供系统级的解决方案。 首先回顾之前的回答,用户提到构建错误是“gmake: No rule to make target 'bts5k/test_branch/v28'”,我给出的解决方案包括验证路径、修正构建规则、处理路径格式、检查编译器兼容性、处理动态库链接和清理工程。现在用户的新问题可能仍然与这些方面相关,但更具体地涉及Debug配置的构建和清理。 用户这次的提问更明确地指出了项目名称和配置,可能之前的解决方案未能完全解决问题,或者出现了其他相关错误。需要确定用户是否执行了之前的步骤,例如检查路径是否存在、修正package.mak中的规则、处理环境变量等。如果用户已经尝试过这些步骤,但问题仍然存在,可能需要更深入的分析。 接下来,思考可能的错误点。构建和清理问题通常与Makefile规则、依赖项缺失、路径错误或工具链配置有关。特别是在使用XDCtools时,配置错误可能导致构建失败。ti.targets.C28_float目标平台和TMS320F280049C处理器的兼容性也需要检查,例如编译器版本是否支持该平台,Bios版本是否匹配。 另外,用户提到清理过程中也可能出现问题,这可能意味着make clean规则有问题,或者某些生成的文件无法被正确删除,比如权限问题或路径错误。需要检查Makefile中的clean目标是否正确地删除了所有中间文件和输出文件。 还需要考虑环境变量和工具路径是否正确设置,尤其是在Windows系统下,路径中的空格或反斜杠可能导致问题。用户是否在路径中使用了正确的斜杠,环境变量如XDC_PATH是否正确指向了所需的工具包。 此外,查看用户提供的构建日志(如果有的话)可以帮助定位具体的错误信息。例如,是否有未找到的文件、权限被拒绝、命令执行失败等。用户可能需要启用详细构建日志来获取更多信息。 可能的解决方案包括: 1. 重新检查所有文件路径,确保不存在拼写错误,并且路径中的目录确实存在。 2. 确保Makefile中的规则正确,特别是对于v28目录的生成规则。 3. 检查XDCtools的配置,确认xdcpath正确包含所需的包路径。 4. 验证编译器版本和Bios版本的兼容性,必要时更新工具链。 5. 清理工程时,手动删除残留文件,确保make clean规则正确。 6. 处理Windows路径格式问题,使用引号包裹含空格的路径,统一使用正斜杠。 7. 检查动态库链接是否正确,特别是跨平台或不同版本之间的兼容性。 还需要考虑用户是否在项目配置中正确指定了目标设备和浮点设置,例如TMS320F280049C是否支持ti.targets.C28_float,以及相关的编译选项是否正确。可能需要查看TI的官方文档,确认目标配置和编译器选项的兼容性。 最后,建议用户提供具体的构建错误信息,以便更精准地定位问题。如果用户尝试了上述步骤仍然失败,可能需要更详细的日志分析或逐步调试构建过程。</think>### **系统级解决方案:PCU_2.5 Debug构建与清理问题** --- #### **1. 构建失败诊断核心步骤** ##### **1.1 验证XDC工具链配置** - **问题定位**:XDCtools的`xdcpath`未正确包含`ti.platforms.tms320x28`或`ti.targets.C28_float`支持包。 - **操作步骤**: 1. 在`app.cfg`或`config.bld`中显式声明路径: ```javascript var xdcPath = [ "D:/CCS/bios_6_51_00_15/packages", "D:/CCS/ccs/tools/compiler/ti-cgt-c2000_22.6.0.LTS/include" ]; Build.addXdcPath(xdcPath); ``` 2. 检查`ti.targets.C28_float`兼容性: ```bash # 在XDC交互模式下验证目标支持 xs xdc.tools.configuro --target ti.targets.C28_float --platform ti.platforms.tms320x28:TMS320F280049C ``` 若报错“invalid platform”,需升级`bios_6_51_00_15`或安装补丁[^1]。 ##### **1.2 修复路径解析错误** - **问题定位**:Windows路径格式(反斜杠`\`或空格)导致`gmake`解析失败。 - **操作步骤**: 1. 在`package.mak`中强制使用Unix路径格式: ```makefile CCS_INSTALL_DIR := D:/CCS/ccs BIOS_DIR := D:/CCS/bios_6_51_00_15/packages ``` 2. 处理含空格路径(如`controlSUITE2_DMC Rev`): ```makefile # 使用双引号包裹路径 DEP_LIB := "$(CCS_INSTALL_DIR)/tools/controlSUITE2_DMC Rev/lib" ``` ##### **1.3 清理残留文件** - **问题定位**:`gmake clean`未正确清除中间文件(如`.obj`、`.deps`)。 - **操作步骤**: 1. 在`package.mak`中扩展`clean`目标: ```makefile clean: $(RM) -rf ./configPkg # 删除XDC生成的配置目录 $(RM) -f *.out *.map *.lst # 删除可执行文件与调试文件 $(RM) -f src/*.o src/*.d # 清理源码目录中间文件 ``` 2. 手动删除隐藏文件: ```bash # Windows需启用显示隐藏文件 del /s /q *.deps *.mak ``` --- #### **2. 关键配置修正** ##### **2.1 编译器选项兼容性** - **问题定位**:`ti-cgt-c2000_22.6.0.LTS`与`C28_float`目标的优化冲突。 - **操作步骤**: 1. 在`app.cfg`中禁用冲突优化: ```javascript var cFlags = [ "--float_support=fpu32", # 强制启用FPU支持 "--opt_level=0", # 禁用优化以兼容调试 "--advice:performance=off" # 关闭性能警告 ]; Build.targetArgs = " -c " + cFlags.join(" "); ``` ##### **2.2 动态库依赖修复** - **问题定位**:`sysbios.a28FP`或`Hwi_asm.s28`链接失败。 - **操作步骤**: 1. 检查`sysbios.a28FP`的生成规则: ```makefile # 在package.mak中显式声明库构建 sysbios.a28FP: $(BIOS_DIR)/ti/sysbios/family/c28/*.s28 $(ASM) $(ASMFLAGS) -fr=./ $^ $(AR) r $@ *.obj ``` 2. 若`Hwi_asm.s28`缺失,从TI官网下载`C2000Ware`并复制到`bios_6_51_00_15/packages/ti/sysbios/family/c28/`[^3]。 --- #### **3. 调试与验证** ##### **3.1 生成详细构建日志** - **操作步骤**: ```bash gmake all -j4 --debug=v 2>&1 | tee build.log ``` - 搜索日志中的`No rule to make target`或`undefined reference`。 - 示例关键错误: ``` gmake[1]: *** No rule to make target 'D:/CCS/bios_6_51_00_15/packages/ti/sysbios/family/c28/Hwi_disp_asm.s28', needed by 'sysbios.a28FP'. Stop. ``` 表示`Hwi_disp_asm.s28`文件缺失或路径错误。 ##### **3.2 验证平台与目标兼容性** - **操作步骤**: 1. 确认`TMS320F280049C`支持`C28_float`目标: ```javascript // 在app.cfg中检查设备配置 var Platform = xdc.useModule('ti.platforms.tms320x28.Platform'); Platform.deviceName = "TMS320F280049C"; Platform.includeFloatingPoint = true; // 必须为true ``` --- #### **4. 相关问题** 1. **如何修复`ti.targets.C28_float`未找到的错误?** - 安装`C2000Ware`并更新`bios_6_51_00_15`的FPU支持包[^3]。 2. **XDCtools报告`invalid platform`时如何解决?** - 在`config.bld`中指定完整平台路径: ```javascript var Platform = xdc.module('ti.platforms.tms320x28:TMS320F280049C'); ``` 3. **`gmake clean`后仍残留文件怎么办?** - 手动删除`Debug`和`Release`目录,并检查文件权限[^5]。 4. **如何优化大型工程的构建速度?** - 使用`-jN`并行编译(N=CPU核心数)和`ccache`缓存: ```makefile export CCACHE_DIR=/tmp/ccache CFLAGS += --ccache ``` --- 通过上述步骤可系统性解决构建与清理问题。若仍有异常,请提供`build.log`中首个报错的具体行号及上下文。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值