Emergency_Download

本文介绍紧急NAND闪存编程器(ENANDPRG),它由QPST工具发送至内部存储并初始化外部DDRSDRAM,设置读写及零初始化区域,并通过串口与QPST通信,接收引导加载程序映像并将其编程到闪存中。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Support for Emergency NAND Flash Programmer (ENANDPRG)

ENANDPRG is sent by the QPSTTM utility to the Internal Memory (IMEM) and is
similar to the normal NAND Flash Programmer (NANDPRG). In addition to receiving
the packets from QPST and programming the Flash, it is responsible for setting up the
external RAM required for read/write and zero-initialized regions, as this RAM is not set
up by the PBL.


3.ENPRGxxxx.HEX executes in the IMEM and sets up the DRAM. In current versions, it
initializes the external DDR SDRAM on the EBI-1. In dual-core systems, which have internal
stacked memory on the SMI interface, the ENPRGxxxx.HEX can also be configured to
support internal stacked memory, as an alternative to the external DDR SDRAM. DRAM is
required for the read/write and zero-initialized regions of the emergency download code.
4. ENPRGxxxx.HEX initializes the NAND controller, in preparation for the Flash programming
operations.
5. ENPRGxxxx.HEX talks to QPST over the serial interface, accepts the boot loader images,
and programs them onto the Flash.

80-VP758-1_B_Emergency_Download_Feature.pdf



from ultralytics import YOLO import torch import os import gc import shutil from datetime import datetime def check_disk_space(min_gb=5, path='.'): """检查磁盘空间""" total, used, free = shutil.disk_usage(path) free_gb = free / (1024 ** 3) if free_gb < min_gb: raise RuntimeError(f"磁盘空间不足!当前可用: {free_gb:.1f}GB,需要至少 {min_gb}GB") print(f"✓ 磁盘空间检查通过 - 可用空间: {free_gb:.1f}GB") def configure_gpu(): os.environ["CUDA_LAUNCH_BLOCKING"] = "0" os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128" os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" torch.backends.cudnn.benchmark = True torch.set_float32_matmul_precision('highest') torch.cuda.set_per_process_memory_fraction(0.95) def clean_memory(): """显存清理""" gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() torch.cuda.synchronize() """def print_gpu_status(): if torch.cuda.is_available(): total = torch.cuda.get_device_properties(0).total_memory / 1024 ** 3 used = torch.cuda.memory_allocated() / 1024 ** 3 peak = torch.cuda.max_memory_allocated() / 1024 ** 3 print( f"\n[GPU状态] 总量: {total:.1f}GB | 当前: {used:.2f}GB | 峰值: {peak:.2f}GB | 利用率: {peak / total * 100:.1f}%")""" def train(): try: # 初始化配置 #configure_gpu() clean_memory() check_disk_space(min_gb=10) # 训练前检查磁盘空间 # 硬件检测 device = 'cuda' if torch.cuda.is_available() else 'cpu' gpu_name = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU" print(f"▶ 训练设备: {device} ({gpu_name})") print(f"▶ CUDA版本: {torch.version.cuda}") # print_gpu_status() model = YOLO('yolo11.pt').to(device) train_args = { "data": "yolo_dataset.yaml", "epochs": 300, "batch": 8, "imgsz": 960, # 从1088提升到1280 "device": 0, "workers": 4, "optimizer": "SGD", # 从AdamW改为SGD "lr0": 0.01, # 学习率调整为0.01 "cos_lr": False, # 禁用余弦学习率(改为使用固定步长衰减) "amp": True, "patience": 150, # 早停耐心值增加 "project": "/opt/experiments", "name": "exp_${current_time}", # 实际运行时用当前时间生成 "single_cls": False, # 如果是单类别任务可以改为true "mosaic": 1.0, # 全程使用mosaic "close_mosaic": 20, # 最后20个epoch关闭mosaic "pretrained": True, # 使用预训练权重 "save_period": 10, # 保存频率改为10个epoch "exist_ok": True, "overlap_mask": True, # 启用重叠掩码解析 "mask_ratio": 4, "dropout": 0.25, # 提高dropout率 "val": True, "plots": True, "rect": True, "warmup_epochs": 10, # 延长预热 "warmup_momentum": 0.9, "warmup_bias_lr": 0.2, "box": 8.0, # 调整框回归损失权重(原7.5) "cls": 0.5, "dfl": 1.5, "cache": "ram", "mixup": 0.2, # 新增mixup增强 "hsv_h": 0.015, # 色调增强 "hsv_s": 0.7, # 饱和度增强 "hsv_v": 0.4, # 明度增强 "momentum": 0.937, # SGD动量 "weight_decay": 0.0005, # 权重衰减 } print("\n⚡ 开始训练(A10优化版V2)...") results = model.train(**train_args) print_gpu_status() # 验证 metrics = model.val() print(f"\n✅ 训练完成 mAP50-95: {metrics.box.map:.3f}") except RuntimeError as e: if 'disk space' in str(e).lower(): print(f"\n❌ 严重错误: {str(e)}") print("请清理磁盘空间或指定其他存储路径") else: print(f"\n❌ 训练错误: {str(e)}") # 尝试保存到临时位置 try: save_path = f'/tmp/emergency_save_{datetime.now().strftime("%Y%m%d_%H%M%S")}.pt' torch.save(model.state_dict(), save_path) print(f"⚠ 紧急保存模型到: {save_path}") except Exception as save_error: print(f"❌ 紧急保存失败: {str(save_error)}") except Exception as e: print(f"\n❌ 未处理的异常: {str(e)}") raise e finally: clean_memory() #print_gpu_status() if __name__ == '__main__': train() 把这段代码改成cpu版本的
最新发布
08-08
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值